2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <net/vxlan.h>
34 #include <linux/microchipphy.h>
35 #include <linux/of_net.h>
38 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
39 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
40 #define DRIVER_NAME "lan78xx"
41 #define DRIVER_VERSION "1.0.4"
43 #define TX_TIMEOUT_JIFFIES (5 * HZ)
44 #define THROTTLE_JIFFIES (HZ / 8)
45 #define UNLINK_TIMEOUT_MS 3
47 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
49 #define SS_USB_PKT_SIZE (1024)
50 #define HS_USB_PKT_SIZE (512)
51 #define FS_USB_PKT_SIZE (64)
53 #define MAX_RX_FIFO_SIZE (12 * 1024)
54 #define MAX_TX_FIFO_SIZE (12 * 1024)
55 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
56 #define DEFAULT_BULK_IN_DELAY (0x0800)
57 #define MAX_SINGLE_PACKET_SIZE (9000)
58 #define DEFAULT_TX_CSUM_ENABLE (true)
59 #define DEFAULT_RX_CSUM_ENABLE (true)
60 #define DEFAULT_TSO_CSUM_ENABLE (true)
61 #define DEFAULT_VLAN_FILTER_ENABLE (true)
62 #define TX_OVERHEAD (8)
65 #define LAN78XX_USB_VENDOR_ID (0x0424)
66 #define LAN7800_USB_PRODUCT_ID (0x7800)
67 #define LAN7850_USB_PRODUCT_ID (0x7850)
68 #define LAN78XX_EEPROM_MAGIC (0x78A5)
69 #define LAN78XX_OTP_MAGIC (0x78F3)
70 #define AT29M2AF_USB_VENDOR_ID (0x07C9)
71 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
76 #define EEPROM_INDICATOR (0xA5)
77 #define EEPROM_MAC_OFFSET (0x01)
78 #define MAX_EEPROM_SIZE 512
79 #define OTP_INDICATOR_1 (0xF3)
80 #define OTP_INDICATOR_2 (0xF7)
82 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
83 WAKE_MCAST | WAKE_BCAST | \
84 WAKE_ARP | WAKE_MAGIC)
86 /* USB related defines */
87 #define BULK_IN_PIPE 1
88 #define BULK_OUT_PIPE 2
90 /* default autosuspend delay (mSec)*/
91 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
93 /* statistic update interval (mSec) */
94 #define STAT_UPDATE_TIMER (1 * 1000)
96 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
98 "RX Alignment Errors",
101 "RX Undersize Frame Errors",
102 "RX Oversize Frame Errors",
104 "RX Unicast Byte Count",
105 "RX Broadcast Byte Count",
106 "RX Multicast Byte Count",
108 "RX Broadcast Frames",
109 "RX Multicast Frames",
112 "RX 65 - 127 Byte Frames",
113 "RX 128 - 255 Byte Frames",
114 "RX 256 - 511 Bytes Frames",
115 "RX 512 - 1023 Byte Frames",
116 "RX 1024 - 1518 Byte Frames",
117 "RX Greater 1518 Byte Frames",
118 "EEE RX LPI Transitions",
121 "TX Excess Deferral Errors",
124 "TX Single Collisions",
125 "TX Multiple Collisions",
126 "TX Excessive Collision",
127 "TX Late Collisions",
128 "TX Unicast Byte Count",
129 "TX Broadcast Byte Count",
130 "TX Multicast Byte Count",
132 "TX Broadcast Frames",
133 "TX Multicast Frames",
136 "TX 65 - 127 Byte Frames",
137 "TX 128 - 255 Byte Frames",
138 "TX 256 - 511 Bytes Frames",
139 "TX 512 - 1023 Byte Frames",
140 "TX 1024 - 1518 Byte Frames",
141 "TX Greater 1518 Byte Frames",
142 "EEE TX LPI Transitions",
146 struct lan78xx_statstage {
148 u32 rx_alignment_errors;
149 u32 rx_fragment_errors;
150 u32 rx_jabber_errors;
151 u32 rx_undersize_frame_errors;
152 u32 rx_oversize_frame_errors;
153 u32 rx_dropped_frames;
154 u32 rx_unicast_byte_count;
155 u32 rx_broadcast_byte_count;
156 u32 rx_multicast_byte_count;
157 u32 rx_unicast_frames;
158 u32 rx_broadcast_frames;
159 u32 rx_multicast_frames;
161 u32 rx_64_byte_frames;
162 u32 rx_65_127_byte_frames;
163 u32 rx_128_255_byte_frames;
164 u32 rx_256_511_bytes_frames;
165 u32 rx_512_1023_byte_frames;
166 u32 rx_1024_1518_byte_frames;
167 u32 rx_greater_1518_byte_frames;
168 u32 eee_rx_lpi_transitions;
171 u32 tx_excess_deferral_errors;
172 u32 tx_carrier_errors;
173 u32 tx_bad_byte_count;
174 u32 tx_single_collisions;
175 u32 tx_multiple_collisions;
176 u32 tx_excessive_collision;
177 u32 tx_late_collisions;
178 u32 tx_unicast_byte_count;
179 u32 tx_broadcast_byte_count;
180 u32 tx_multicast_byte_count;
181 u32 tx_unicast_frames;
182 u32 tx_broadcast_frames;
183 u32 tx_multicast_frames;
185 u32 tx_64_byte_frames;
186 u32 tx_65_127_byte_frames;
187 u32 tx_128_255_byte_frames;
188 u32 tx_256_511_bytes_frames;
189 u32 tx_512_1023_byte_frames;
190 u32 tx_1024_1518_byte_frames;
191 u32 tx_greater_1518_byte_frames;
192 u32 eee_tx_lpi_transitions;
196 struct lan78xx_statstage64 {
198 u64 rx_alignment_errors;
199 u64 rx_fragment_errors;
200 u64 rx_jabber_errors;
201 u64 rx_undersize_frame_errors;
202 u64 rx_oversize_frame_errors;
203 u64 rx_dropped_frames;
204 u64 rx_unicast_byte_count;
205 u64 rx_broadcast_byte_count;
206 u64 rx_multicast_byte_count;
207 u64 rx_unicast_frames;
208 u64 rx_broadcast_frames;
209 u64 rx_multicast_frames;
211 u64 rx_64_byte_frames;
212 u64 rx_65_127_byte_frames;
213 u64 rx_128_255_byte_frames;
214 u64 rx_256_511_bytes_frames;
215 u64 rx_512_1023_byte_frames;
216 u64 rx_1024_1518_byte_frames;
217 u64 rx_greater_1518_byte_frames;
218 u64 eee_rx_lpi_transitions;
221 u64 tx_excess_deferral_errors;
222 u64 tx_carrier_errors;
223 u64 tx_bad_byte_count;
224 u64 tx_single_collisions;
225 u64 tx_multiple_collisions;
226 u64 tx_excessive_collision;
227 u64 tx_late_collisions;
228 u64 tx_unicast_byte_count;
229 u64 tx_broadcast_byte_count;
230 u64 tx_multicast_byte_count;
231 u64 tx_unicast_frames;
232 u64 tx_broadcast_frames;
233 u64 tx_multicast_frames;
235 u64 tx_64_byte_frames;
236 u64 tx_65_127_byte_frames;
237 u64 tx_128_255_byte_frames;
238 u64 tx_256_511_bytes_frames;
239 u64 tx_512_1023_byte_frames;
240 u64 tx_1024_1518_byte_frames;
241 u64 tx_greater_1518_byte_frames;
242 u64 eee_tx_lpi_transitions;
248 struct lan78xx_priv {
249 struct lan78xx_net *dev;
251 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
252 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
253 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
254 struct mutex dataport_mutex; /* for dataport access */
255 spinlock_t rfe_ctl_lock; /* for rfe register access */
256 struct work_struct set_multicast;
257 struct work_struct set_vlan;
271 struct skb_data { /* skb->cb is one of these */
273 struct lan78xx_net *dev;
274 enum skb_state state;
280 struct usb_ctrlrequest req;
281 struct lan78xx_net *dev;
284 #define EVENT_TX_HALT 0
285 #define EVENT_RX_HALT 1
286 #define EVENT_RX_MEMORY 2
287 #define EVENT_STS_SPLIT 3
288 #define EVENT_LINK_RESET 4
289 #define EVENT_RX_PAUSED 5
290 #define EVENT_DEV_WAKING 6
291 #define EVENT_DEV_ASLEEP 7
292 #define EVENT_DEV_OPEN 8
293 #define EVENT_STAT_UPDATE 9
296 struct mutex access_lock; /* for stats access */
297 struct lan78xx_statstage saved;
298 struct lan78xx_statstage rollover_count;
299 struct lan78xx_statstage rollover_max;
300 struct lan78xx_statstage64 curr_stat;
304 struct net_device *net;
305 struct usb_device *udev;
306 struct usb_interface *intf;
311 struct sk_buff_head rxq;
312 struct sk_buff_head txq;
313 struct sk_buff_head done;
314 struct sk_buff_head rxq_pause;
315 struct sk_buff_head txq_pend;
317 struct tasklet_struct bh;
318 struct delayed_work wq;
322 struct urb *urb_intr;
323 struct usb_anchor deferred;
325 struct mutex phy_mutex; /* for phy access */
326 unsigned pipe_in, pipe_out, pipe_intr;
328 u32 hard_mtu; /* count any extra framing */
329 size_t rx_urb_size; /* size for rx urbs */
333 wait_queue_head_t *wait;
334 unsigned char suspend_count;
337 struct timer_list delay;
338 struct timer_list stat_monitor;
340 unsigned long data[5];
347 struct mii_bus *mdiobus;
350 u8 fc_request_control;
353 struct statstage stats;
356 /* use ethtool to change the level for any given device */
357 static int msg_level = -1;
358 module_param(msg_level, int, 0);
359 MODULE_PARM_DESC(msg_level, "Override default message level");
361 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
363 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
369 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
370 USB_VENDOR_REQUEST_READ_REGISTER,
371 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
372 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
373 if (likely(ret >= 0)) {
377 netdev_warn(dev->net,
378 "Failed to read register index 0x%08x. ret = %d",
387 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
389 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
398 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
399 USB_VENDOR_REQUEST_WRITE_REGISTER,
400 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
401 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
402 if (unlikely(ret < 0)) {
403 netdev_warn(dev->net,
404 "Failed to write register index 0x%08x. ret = %d",
413 static int lan78xx_read_stats(struct lan78xx_net *dev,
414 struct lan78xx_statstage *data)
418 struct lan78xx_statstage *stats;
422 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
426 ret = usb_control_msg(dev->udev,
427 usb_rcvctrlpipe(dev->udev, 0),
428 USB_VENDOR_REQUEST_GET_STATS,
429 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
434 USB_CTRL_SET_TIMEOUT);
435 if (likely(ret >= 0)) {
438 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
439 le32_to_cpus(&src[i]);
443 netdev_warn(dev->net,
444 "Failed to read stat ret = %d", ret);
452 #define check_counter_rollover(struct1, dev_stats, member) { \
453 if (struct1->member < dev_stats.saved.member) \
454 dev_stats.rollover_count.member++; \
457 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
458 struct lan78xx_statstage *stats)
460 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
461 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
462 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
463 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
464 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
465 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
466 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
467 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
468 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
469 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
470 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
471 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
472 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
473 check_counter_rollover(stats, dev->stats, rx_pause_frames);
474 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
475 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
476 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
477 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
478 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
479 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
480 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
481 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
482 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
483 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
484 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
485 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
486 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
487 check_counter_rollover(stats, dev->stats, tx_single_collisions);
488 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
489 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
490 check_counter_rollover(stats, dev->stats, tx_late_collisions);
491 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
492 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
493 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
494 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
495 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
496 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
497 check_counter_rollover(stats, dev->stats, tx_pause_frames);
498 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
499 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
500 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
501 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
502 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
503 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
504 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
505 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
506 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
508 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
511 static void lan78xx_update_stats(struct lan78xx_net *dev)
513 u32 *p, *count, *max;
516 struct lan78xx_statstage lan78xx_stats;
518 if (usb_autopm_get_interface(dev->intf) < 0)
521 p = (u32 *)&lan78xx_stats;
522 count = (u32 *)&dev->stats.rollover_count;
523 max = (u32 *)&dev->stats.rollover_max;
524 data = (u64 *)&dev->stats.curr_stat;
526 mutex_lock(&dev->stats.access_lock);
528 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
529 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
531 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
532 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
534 mutex_unlock(&dev->stats.access_lock);
536 usb_autopm_put_interface(dev->intf);
539 /* Loop until the read is completed with timeout called with phy_mutex held */
540 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
542 unsigned long start_time = jiffies;
547 ret = lan78xx_read_reg(dev, MII_ACC, &val);
548 if (unlikely(ret < 0))
551 if (!(val & MII_ACC_MII_BUSY_))
553 } while (!time_after(jiffies, start_time + HZ));
558 static inline u32 mii_access(int id, int index, int read)
562 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
563 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
565 ret |= MII_ACC_MII_READ_;
567 ret |= MII_ACC_MII_WRITE_;
568 ret |= MII_ACC_MII_BUSY_;
573 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
575 unsigned long start_time = jiffies;
580 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
581 if (unlikely(ret < 0))
584 if (!(val & E2P_CMD_EPC_BUSY_) ||
585 (val & E2P_CMD_EPC_TIMEOUT_))
587 usleep_range(40, 100);
588 } while (!time_after(jiffies, start_time + HZ));
590 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
591 netdev_warn(dev->net, "EEPROM read operation timeout");
598 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
600 unsigned long start_time = jiffies;
605 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
606 if (unlikely(ret < 0))
609 if (!(val & E2P_CMD_EPC_BUSY_))
612 usleep_range(40, 100);
613 } while (!time_after(jiffies, start_time + HZ));
615 netdev_warn(dev->net, "EEPROM is busy");
619 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
620 u32 length, u8 *data)
627 /* depends on chip, some EEPROM pins are muxed with LED function.
628 * disable & restore LED function to access EEPROM.
630 ret = lan78xx_read_reg(dev, HW_CFG, &val);
632 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
633 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
634 ret = lan78xx_write_reg(dev, HW_CFG, val);
637 retval = lan78xx_eeprom_confirm_not_busy(dev);
641 for (i = 0; i < length; i++) {
642 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
643 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
644 ret = lan78xx_write_reg(dev, E2P_CMD, val);
645 if (unlikely(ret < 0)) {
650 retval = lan78xx_wait_eeprom(dev);
654 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
655 if (unlikely(ret < 0)) {
660 data[i] = val & 0xFF;
666 if (dev->chipid == ID_REV_CHIP_ID_7800_)
667 ret = lan78xx_write_reg(dev, HW_CFG, saved);
672 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
673 u32 length, u8 *data)
678 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
679 if ((ret == 0) && (sig == EEPROM_INDICATOR))
680 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
687 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
688 u32 length, u8 *data)
695 /* depends on chip, some EEPROM pins are muxed with LED function.
696 * disable & restore LED function to access EEPROM.
698 ret = lan78xx_read_reg(dev, HW_CFG, &val);
700 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
701 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
702 ret = lan78xx_write_reg(dev, HW_CFG, val);
705 retval = lan78xx_eeprom_confirm_not_busy(dev);
709 /* Issue write/erase enable command */
710 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
711 ret = lan78xx_write_reg(dev, E2P_CMD, val);
712 if (unlikely(ret < 0)) {
717 retval = lan78xx_wait_eeprom(dev);
721 for (i = 0; i < length; i++) {
722 /* Fill data register */
724 ret = lan78xx_write_reg(dev, E2P_DATA, val);
730 /* Send "write" command */
731 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
732 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
733 ret = lan78xx_write_reg(dev, E2P_CMD, val);
739 retval = lan78xx_wait_eeprom(dev);
748 if (dev->chipid == ID_REV_CHIP_ID_7800_)
749 ret = lan78xx_write_reg(dev, HW_CFG, saved);
754 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
755 u32 length, u8 *data)
760 unsigned long timeout;
762 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
764 if (buf & OTP_PWR_DN_PWRDN_N_) {
765 /* clear it and wait to be cleared */
766 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
768 timeout = jiffies + HZ;
771 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
772 if (time_after(jiffies, timeout)) {
773 netdev_warn(dev->net,
774 "timeout on OTP_PWR_DN");
777 } while (buf & OTP_PWR_DN_PWRDN_N_);
780 for (i = 0; i < length; i++) {
781 ret = lan78xx_write_reg(dev, OTP_ADDR1,
782 ((offset + i) >> 8) & OTP_ADDR1_15_11);
783 ret = lan78xx_write_reg(dev, OTP_ADDR2,
784 ((offset + i) & OTP_ADDR2_10_3));
786 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
787 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
789 timeout = jiffies + HZ;
792 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
793 if (time_after(jiffies, timeout)) {
794 netdev_warn(dev->net,
795 "timeout on OTP_STATUS");
798 } while (buf & OTP_STATUS_BUSY_);
800 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
802 data[i] = (u8)(buf & 0xFF);
808 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
809 u32 length, u8 *data)
814 unsigned long timeout;
816 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
818 if (buf & OTP_PWR_DN_PWRDN_N_) {
819 /* clear it and wait to be cleared */
820 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
822 timeout = jiffies + HZ;
825 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
826 if (time_after(jiffies, timeout)) {
827 netdev_warn(dev->net,
828 "timeout on OTP_PWR_DN completion");
831 } while (buf & OTP_PWR_DN_PWRDN_N_);
834 /* set to BYTE program mode */
835 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
837 for (i = 0; i < length; i++) {
838 ret = lan78xx_write_reg(dev, OTP_ADDR1,
839 ((offset + i) >> 8) & OTP_ADDR1_15_11);
840 ret = lan78xx_write_reg(dev, OTP_ADDR2,
841 ((offset + i) & OTP_ADDR2_10_3));
842 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
843 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
844 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
846 timeout = jiffies + HZ;
849 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
850 if (time_after(jiffies, timeout)) {
851 netdev_warn(dev->net,
852 "Timeout on OTP_STATUS completion");
855 } while (buf & OTP_STATUS_BUSY_);
861 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
862 u32 length, u8 *data)
867 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
870 if (sig == OTP_INDICATOR_2)
872 else if (sig != OTP_INDICATOR_1)
875 ret = lan78xx_read_raw_otp(dev, offset, length, data);
881 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
885 for (i = 0; i < 100; i++) {
888 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
889 if (unlikely(ret < 0))
892 if (dp_sel & DP_SEL_DPRDY_)
895 usleep_range(40, 100);
898 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
903 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
904 u32 addr, u32 length, u32 *buf)
906 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
910 if (usb_autopm_get_interface(dev->intf) < 0)
913 mutex_lock(&pdata->dataport_mutex);
915 ret = lan78xx_dataport_wait_not_busy(dev);
919 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
921 dp_sel &= ~DP_SEL_RSEL_MASK_;
922 dp_sel |= ram_select;
923 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
925 for (i = 0; i < length; i++) {
926 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
928 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
930 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
932 ret = lan78xx_dataport_wait_not_busy(dev);
938 mutex_unlock(&pdata->dataport_mutex);
939 usb_autopm_put_interface(dev->intf);
944 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
945 int index, u8 addr[ETH_ALEN])
949 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
951 temp = addr[2] | (temp << 8);
952 temp = addr[1] | (temp << 8);
953 temp = addr[0] | (temp << 8);
954 pdata->pfilter_table[index][1] = temp;
956 temp = addr[4] | (temp << 8);
957 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
958 pdata->pfilter_table[index][0] = temp;
962 /* returns hash bit number for given MAC address */
963 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
965 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
968 static void lan78xx_deferred_multicast_write(struct work_struct *param)
970 struct lan78xx_priv *pdata =
971 container_of(param, struct lan78xx_priv, set_multicast);
972 struct lan78xx_net *dev = pdata->dev;
976 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
979 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
980 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
982 for (i = 1; i < NUM_OF_MAF; i++) {
983 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
984 ret = lan78xx_write_reg(dev, MAF_LO(i),
985 pdata->pfilter_table[i][1]);
986 ret = lan78xx_write_reg(dev, MAF_HI(i),
987 pdata->pfilter_table[i][0]);
990 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
993 static void lan78xx_set_multicast(struct net_device *netdev)
995 struct lan78xx_net *dev = netdev_priv(netdev);
996 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1000 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1002 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1003 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1005 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1006 pdata->mchash_table[i] = 0;
1007 /* pfilter_table[0] has own HW address */
1008 for (i = 1; i < NUM_OF_MAF; i++) {
1009 pdata->pfilter_table[i][0] =
1010 pdata->pfilter_table[i][1] = 0;
1013 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1015 if (dev->net->flags & IFF_PROMISC) {
1016 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1017 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1019 if (dev->net->flags & IFF_ALLMULTI) {
1020 netif_dbg(dev, drv, dev->net,
1021 "receive all multicast enabled");
1022 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1026 if (netdev_mc_count(dev->net)) {
1027 struct netdev_hw_addr *ha;
1030 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1032 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1035 netdev_for_each_mc_addr(ha, netdev) {
1036 /* set first 32 into Perfect Filter */
1038 lan78xx_set_addr_filter(pdata, i, ha->addr);
1040 u32 bitnum = lan78xx_hash(ha->addr);
1042 pdata->mchash_table[bitnum / 32] |=
1043 (1 << (bitnum % 32));
1044 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1050 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1052 /* defer register writes to a sleepable context */
1053 schedule_work(&pdata->set_multicast);
1056 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1057 u16 lcladv, u16 rmtadv)
1059 u32 flow = 0, fct_flow = 0;
1063 if (dev->fc_autoneg)
1064 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1066 cap = dev->fc_request_control;
1068 if (cap & FLOW_CTRL_TX)
1069 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1071 if (cap & FLOW_CTRL_RX)
1072 flow |= FLOW_CR_RX_FCEN_;
1074 if (dev->udev->speed == USB_SPEED_SUPER)
1076 else if (dev->udev->speed == USB_SPEED_HIGH)
1079 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1080 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1081 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1083 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1085 /* threshold value should be set before enabling flow */
1086 ret = lan78xx_write_reg(dev, FLOW, flow);
1091 static int lan78xx_link_reset(struct lan78xx_net *dev)
1093 struct phy_device *phydev = dev->net->phydev;
1094 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1095 int ladv, radv, ret;
1098 /* clear PHY interrupt status */
1099 ret = phy_read(phydev, LAN88XX_INT_STS);
1100 if (unlikely(ret < 0))
1103 /* clear LAN78xx interrupt status */
1104 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1105 if (unlikely(ret < 0))
1108 phy_read_status(phydev);
1110 if (!phydev->link && dev->link_on) {
1111 dev->link_on = false;
1114 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1115 if (unlikely(ret < 0))
1118 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1119 if (unlikely(ret < 0))
1122 phy_mac_interrupt(phydev, 0);
1124 del_timer(&dev->stat_monitor);
1125 } else if (phydev->link && !dev->link_on) {
1126 dev->link_on = true;
1128 phy_ethtool_gset(phydev, &ecmd);
1130 ret = phy_read(phydev, LAN88XX_INT_STS);
1132 if (dev->udev->speed == USB_SPEED_SUPER) {
1133 if (ethtool_cmd_speed(&ecmd) == 1000) {
1135 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1136 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1137 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1139 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1140 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1141 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1143 /* enable U1 & U2 */
1144 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1145 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1146 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1147 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1151 ladv = phy_read(phydev, MII_ADVERTISE);
1155 radv = phy_read(phydev, MII_LPA);
1159 netif_dbg(dev, link, dev->net,
1160 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1161 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1163 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1164 phy_mac_interrupt(phydev, 1);
1166 if (!timer_pending(&dev->stat_monitor)) {
1168 mod_timer(&dev->stat_monitor,
1169 jiffies + STAT_UPDATE_TIMER);
1172 tasklet_schedule(&dev->bh);
1178 /* some work can't be done in tasklets, so we use keventd
1180 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1181 * but tasklet_schedule() doesn't. hope the failure is rare.
1183 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1185 set_bit(work, &dev->flags);
1186 if (!schedule_delayed_work(&dev->wq, 0))
1187 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1190 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1194 if (urb->actual_length != 4) {
1195 netdev_warn(dev->net,
1196 "unexpected urb length %d", urb->actual_length);
1200 memcpy(&intdata, urb->transfer_buffer, 4);
1201 le32_to_cpus(&intdata);
1203 if (intdata & INT_ENP_PHY_INT) {
1204 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1205 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1207 netdev_warn(dev->net,
1208 "unexpected interrupt: 0x%08x\n", intdata);
1211 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1213 return MAX_EEPROM_SIZE;
1216 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1217 struct ethtool_eeprom *ee, u8 *data)
1219 struct lan78xx_net *dev = netdev_priv(netdev);
1221 ee->magic = LAN78XX_EEPROM_MAGIC;
1223 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1226 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1227 struct ethtool_eeprom *ee, u8 *data)
1229 struct lan78xx_net *dev = netdev_priv(netdev);
1231 /* Allow entire eeprom update only */
1232 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1233 (ee->offset == 0) &&
1235 (data[0] == EEPROM_INDICATOR))
1236 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1237 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1238 (ee->offset == 0) &&
1240 (data[0] == OTP_INDICATOR_1))
1241 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1246 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1249 if (stringset == ETH_SS_STATS)
1250 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1253 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1255 if (sset == ETH_SS_STATS)
1256 return ARRAY_SIZE(lan78xx_gstrings);
1261 static void lan78xx_get_stats(struct net_device *netdev,
1262 struct ethtool_stats *stats, u64 *data)
1264 struct lan78xx_net *dev = netdev_priv(netdev);
1266 lan78xx_update_stats(dev);
1268 mutex_lock(&dev->stats.access_lock);
1269 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1270 mutex_unlock(&dev->stats.access_lock);
1273 static void lan78xx_get_wol(struct net_device *netdev,
1274 struct ethtool_wolinfo *wol)
1276 struct lan78xx_net *dev = netdev_priv(netdev);
1279 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1281 if (usb_autopm_get_interface(dev->intf) < 0)
1284 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1285 if (unlikely(ret < 0)) {
1289 if (buf & USB_CFG_RMT_WKP_) {
1290 wol->supported = WAKE_ALL;
1291 wol->wolopts = pdata->wol;
1298 usb_autopm_put_interface(dev->intf);
1301 static int lan78xx_set_wol(struct net_device *netdev,
1302 struct ethtool_wolinfo *wol)
1304 struct lan78xx_net *dev = netdev_priv(netdev);
1305 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1308 ret = usb_autopm_get_interface(dev->intf);
1312 if (wol->wolopts & ~WAKE_ALL)
1315 pdata->wol = wol->wolopts;
1317 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1319 phy_ethtool_set_wol(netdev->phydev, wol);
1321 usb_autopm_put_interface(dev->intf);
1326 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1328 struct lan78xx_net *dev = netdev_priv(net);
1329 struct phy_device *phydev = net->phydev;
1333 ret = usb_autopm_get_interface(dev->intf);
1337 ret = phy_ethtool_get_eee(phydev, edata);
1341 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1342 if (buf & MAC_CR_EEE_EN_) {
1343 edata->eee_enabled = true;
1344 edata->eee_active = !!(edata->advertised &
1345 edata->lp_advertised);
1346 edata->tx_lpi_enabled = true;
1347 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1348 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1349 edata->tx_lpi_timer = buf;
1351 edata->eee_enabled = false;
1352 edata->eee_active = false;
1353 edata->tx_lpi_enabled = false;
1354 edata->tx_lpi_timer = 0;
1359 usb_autopm_put_interface(dev->intf);
1364 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1366 struct lan78xx_net *dev = netdev_priv(net);
1370 ret = usb_autopm_get_interface(dev->intf);
1374 if (edata->eee_enabled) {
1375 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1376 buf |= MAC_CR_EEE_EN_;
1377 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1379 phy_ethtool_set_eee(net->phydev, edata);
1381 buf = (u32)edata->tx_lpi_timer;
1382 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1384 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1385 buf &= ~MAC_CR_EEE_EN_;
1386 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1389 usb_autopm_put_interface(dev->intf);
1394 static u32 lan78xx_get_link(struct net_device *net)
1396 phy_read_status(net->phydev);
1398 return net->phydev->link;
1401 static int lan78xx_nway_reset(struct net_device *net)
1403 return phy_start_aneg(net->phydev);
1406 static void lan78xx_get_drvinfo(struct net_device *net,
1407 struct ethtool_drvinfo *info)
1409 struct lan78xx_net *dev = netdev_priv(net);
1411 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1412 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1413 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1416 static u32 lan78xx_get_msglevel(struct net_device *net)
1418 struct lan78xx_net *dev = netdev_priv(net);
1420 return dev->msg_enable;
1423 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1425 struct lan78xx_net *dev = netdev_priv(net);
1427 dev->msg_enable = level;
1430 static int lan78xx_get_mdix_status(struct net_device *net)
1432 struct phy_device *phydev = net->phydev;
1435 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1436 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1437 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1442 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1444 struct lan78xx_net *dev = netdev_priv(net);
1445 struct phy_device *phydev = net->phydev;
1448 if (mdix_ctrl == ETH_TP_MDI) {
1449 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1450 LAN88XX_EXT_PAGE_SPACE_1);
1451 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1452 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1453 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1454 buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1455 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1456 LAN88XX_EXT_PAGE_SPACE_0);
1457 } else if (mdix_ctrl == ETH_TP_MDI_X) {
1458 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1459 LAN88XX_EXT_PAGE_SPACE_1);
1460 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1461 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1462 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1463 buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1464 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1465 LAN88XX_EXT_PAGE_SPACE_0);
1466 } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1467 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1468 LAN88XX_EXT_PAGE_SPACE_1);
1469 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1470 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1471 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1472 buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1473 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1474 LAN88XX_EXT_PAGE_SPACE_0);
1476 dev->mdix_ctrl = mdix_ctrl;
1479 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1481 struct lan78xx_net *dev = netdev_priv(net);
1482 struct phy_device *phydev = net->phydev;
1486 ret = usb_autopm_get_interface(dev->intf);
1490 ret = phy_ethtool_gset(phydev, cmd);
1492 buf = lan78xx_get_mdix_status(net);
1494 buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1495 if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1496 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1497 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1498 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1499 cmd->eth_tp_mdix = ETH_TP_MDI;
1500 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1501 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1502 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1503 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1506 usb_autopm_put_interface(dev->intf);
1511 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1513 struct lan78xx_net *dev = netdev_priv(net);
1514 struct phy_device *phydev = net->phydev;
1518 ret = usb_autopm_get_interface(dev->intf);
1522 if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1523 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1526 /* change speed & duplex */
1527 ret = phy_ethtool_sset(phydev, cmd);
1529 if (!cmd->autoneg) {
1530 /* force link down */
1531 temp = phy_read(phydev, MII_BMCR);
1532 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1534 phy_write(phydev, MII_BMCR, temp);
1537 usb_autopm_put_interface(dev->intf);
1542 static void lan78xx_get_pause(struct net_device *net,
1543 struct ethtool_pauseparam *pause)
1545 struct lan78xx_net *dev = netdev_priv(net);
1546 struct phy_device *phydev = net->phydev;
1547 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1549 phy_ethtool_gset(phydev, &ecmd);
1551 pause->autoneg = dev->fc_autoneg;
1553 if (dev->fc_request_control & FLOW_CTRL_TX)
1554 pause->tx_pause = 1;
1556 if (dev->fc_request_control & FLOW_CTRL_RX)
1557 pause->rx_pause = 1;
1560 static int lan78xx_set_pause(struct net_device *net,
1561 struct ethtool_pauseparam *pause)
1563 struct lan78xx_net *dev = netdev_priv(net);
1564 struct phy_device *phydev = net->phydev;
1565 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1568 phy_ethtool_gset(phydev, &ecmd);
1570 if (pause->autoneg && !ecmd.autoneg) {
1575 dev->fc_request_control = 0;
1576 if (pause->rx_pause)
1577 dev->fc_request_control |= FLOW_CTRL_RX;
1579 if (pause->tx_pause)
1580 dev->fc_request_control |= FLOW_CTRL_TX;
1585 ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1586 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1587 ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1588 phy_ethtool_sset(phydev, &ecmd);
1591 dev->fc_autoneg = pause->autoneg;
1598 static const struct ethtool_ops lan78xx_ethtool_ops = {
1599 .get_link = lan78xx_get_link,
1600 .nway_reset = lan78xx_nway_reset,
1601 .get_drvinfo = lan78xx_get_drvinfo,
1602 .get_msglevel = lan78xx_get_msglevel,
1603 .set_msglevel = lan78xx_set_msglevel,
1604 .get_settings = lan78xx_get_settings,
1605 .set_settings = lan78xx_set_settings,
1606 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1607 .get_eeprom = lan78xx_ethtool_get_eeprom,
1608 .set_eeprom = lan78xx_ethtool_set_eeprom,
1609 .get_ethtool_stats = lan78xx_get_stats,
1610 .get_sset_count = lan78xx_get_sset_count,
1611 .get_strings = lan78xx_get_strings,
1612 .get_wol = lan78xx_get_wol,
1613 .set_wol = lan78xx_set_wol,
1614 .get_eee = lan78xx_get_eee,
1615 .set_eee = lan78xx_set_eee,
1616 .get_pauseparam = lan78xx_get_pause,
1617 .set_pauseparam = lan78xx_set_pause,
1620 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1622 if (!netif_running(netdev))
1625 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1628 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1630 u32 addr_lo, addr_hi;
1634 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1635 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1637 addr[0] = addr_lo & 0xFF;
1638 addr[1] = (addr_lo >> 8) & 0xFF;
1639 addr[2] = (addr_lo >> 16) & 0xFF;
1640 addr[3] = (addr_lo >> 24) & 0xFF;
1641 addr[4] = addr_hi & 0xFF;
1642 addr[5] = (addr_hi >> 8) & 0xFF;
1644 if (!is_valid_ether_addr(addr)) {
1645 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1646 /* valid address present in Device Tree */
1647 netif_dbg(dev, ifup, dev->net,
1648 "MAC address read from Device Tree");
1649 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1650 ETH_ALEN, addr) == 0) ||
1651 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1652 ETH_ALEN, addr) == 0)) &&
1653 is_valid_ether_addr(addr)) {
1654 /* eeprom values are valid so use them */
1655 netif_dbg(dev, ifup, dev->net,
1656 "MAC address read from EEPROM");
1658 /* generate random MAC */
1659 random_ether_addr(addr);
1660 netif_dbg(dev, ifup, dev->net,
1661 "MAC address set to random addr");
1664 addr_lo = addr[0] | (addr[1] << 8) |
1665 (addr[2] << 16) | (addr[3] << 24);
1666 addr_hi = addr[4] | (addr[5] << 8);
1668 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1669 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1672 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1673 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1675 ether_addr_copy(dev->net->dev_addr, addr);
1678 /* MDIO read and write wrappers for phylib */
1679 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1681 struct lan78xx_net *dev = bus->priv;
1685 ret = usb_autopm_get_interface(dev->intf);
1689 mutex_lock(&dev->phy_mutex);
1691 /* confirm MII not busy */
1692 ret = lan78xx_phy_wait_not_busy(dev);
1696 /* set the address, index & direction (read from PHY) */
1697 addr = mii_access(phy_id, idx, MII_READ);
1698 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1700 ret = lan78xx_phy_wait_not_busy(dev);
1704 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1706 ret = (int)(val & 0xFFFF);
1709 mutex_unlock(&dev->phy_mutex);
1710 usb_autopm_put_interface(dev->intf);
1714 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1717 struct lan78xx_net *dev = bus->priv;
1721 ret = usb_autopm_get_interface(dev->intf);
1725 mutex_lock(&dev->phy_mutex);
1727 /* confirm MII not busy */
1728 ret = lan78xx_phy_wait_not_busy(dev);
1733 ret = lan78xx_write_reg(dev, MII_DATA, val);
1735 /* set the address, index & direction (write to PHY) */
1736 addr = mii_access(phy_id, idx, MII_WRITE);
1737 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1739 ret = lan78xx_phy_wait_not_busy(dev);
1744 mutex_unlock(&dev->phy_mutex);
1745 usb_autopm_put_interface(dev->intf);
1749 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1753 dev->mdiobus = mdiobus_alloc();
1754 if (!dev->mdiobus) {
1755 netdev_err(dev->net, "can't allocate MDIO bus\n");
1759 dev->mdiobus->priv = (void *)dev;
1760 dev->mdiobus->read = lan78xx_mdiobus_read;
1761 dev->mdiobus->write = lan78xx_mdiobus_write;
1762 dev->mdiobus->name = "lan78xx-mdiobus";
1763 dev->mdiobus->parent = &dev->udev->dev;
1765 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1766 dev->udev->bus->busnum, dev->udev->devnum);
1768 switch (dev->chipid) {
1769 case ID_REV_CHIP_ID_7800_:
1770 case ID_REV_CHIP_ID_7850_:
1771 /* set to internal PHY id */
1772 dev->mdiobus->phy_mask = ~(1 << 1);
1776 ret = mdiobus_register(dev->mdiobus);
1778 netdev_err(dev->net, "can't register MDIO bus\n");
1782 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1785 mdiobus_free(dev->mdiobus);
1789 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1791 mdiobus_unregister(dev->mdiobus);
1792 mdiobus_free(dev->mdiobus);
1795 static void lan78xx_link_status_change(struct net_device *net)
1797 struct phy_device *phydev = net->phydev;
1800 /* At forced 100 F/H mode, chip may fail to set mode correctly
1801 * when cable is switched between long(~50+m) and short one.
1802 * As workaround, set to 10 before setting to 100
1803 * at forced 100 F/H mode.
1805 if (!phydev->autoneg && (phydev->speed == 100)) {
1806 /* disable phy interrupt */
1807 temp = phy_read(phydev, LAN88XX_INT_MASK);
1808 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1809 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1811 temp = phy_read(phydev, MII_BMCR);
1812 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1813 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1814 temp |= BMCR_SPEED100;
1815 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1817 /* clear pending interrupt generated while workaround */
1818 temp = phy_read(phydev, LAN88XX_INT_STS);
1820 /* enable phy interrupt back */
1821 temp = phy_read(phydev, LAN88XX_INT_MASK);
1822 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1823 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1827 static int lan78xx_phy_init(struct lan78xx_net *dev)
1831 struct phy_device *phydev = dev->net->phydev;
1833 phydev = phy_find_first(dev->mdiobus);
1835 netdev_err(dev->net, "no PHY found\n");
1839 /* Enable PHY interrupts.
1840 * We handle our own interrupt
1842 ret = phy_read(phydev, LAN88XX_INT_STS);
1843 ret = phy_write(phydev, LAN88XX_INT_MASK,
1844 LAN88XX_INT_MASK_MDINTPIN_EN_ |
1845 LAN88XX_INT_MASK_LINK_CHANGE_);
1847 phydev->irq = PHY_IGNORE_INTERRUPT;
1849 ret = phy_connect_direct(dev->net, phydev,
1850 lan78xx_link_status_change,
1851 PHY_INTERFACE_MODE_GMII);
1853 netdev_err(dev->net, "can't attach PHY to %s\n",
1858 /* set to AUTOMDIX */
1859 lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1861 /* MAC doesn't support 1000T Half */
1862 phydev->supported &= ~SUPPORTED_1000baseT_Half;
1864 /* support both flow controls */
1865 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1866 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1867 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1868 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1870 genphy_config_aneg(phydev);
1872 dev->fc_autoneg = phydev->autoneg;
1876 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1881 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1887 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1889 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1892 buf &= ~MAC_RX_RXEN_;
1893 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1896 /* add 4 to size for FCS */
1897 buf &= ~MAC_RX_MAX_SIZE_MASK_;
1898 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1900 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1903 buf |= MAC_RX_RXEN_;
1904 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1910 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1912 struct sk_buff *skb;
1913 unsigned long flags;
1916 spin_lock_irqsave(&q->lock, flags);
1917 while (!skb_queue_empty(q)) {
1918 struct skb_data *entry;
1922 skb_queue_walk(q, skb) {
1923 entry = (struct skb_data *)skb->cb;
1924 if (entry->state != unlink_start)
1929 entry->state = unlink_start;
1932 /* Get reference count of the URB to avoid it to be
1933 * freed during usb_unlink_urb, which may trigger
1934 * use-after-free problem inside usb_unlink_urb since
1935 * usb_unlink_urb is always racing with .complete
1936 * handler(include defer_bh).
1939 spin_unlock_irqrestore(&q->lock, flags);
1940 /* during some PM-driven resume scenarios,
1941 * these (async) unlinks complete immediately
1943 ret = usb_unlink_urb(urb);
1944 if (ret != -EINPROGRESS && ret != 0)
1945 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1949 spin_lock_irqsave(&q->lock, flags);
1951 spin_unlock_irqrestore(&q->lock, flags);
1955 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1957 struct lan78xx_net *dev = netdev_priv(netdev);
1958 int ll_mtu = new_mtu + netdev->hard_header_len;
1959 int old_hard_mtu = dev->hard_mtu;
1960 int old_rx_urb_size = dev->rx_urb_size;
1963 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1968 /* no second zero-length packet read wanted after mtu-sized packets */
1969 if ((ll_mtu % dev->maxpacket) == 0)
1972 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1974 netdev->mtu = new_mtu;
1976 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1977 if (dev->rx_urb_size == old_hard_mtu) {
1978 dev->rx_urb_size = dev->hard_mtu;
1979 if (dev->rx_urb_size > old_rx_urb_size) {
1980 if (netif_running(dev->net)) {
1981 unlink_urbs(dev, &dev->rxq);
1982 tasklet_schedule(&dev->bh);
1990 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1992 struct lan78xx_net *dev = netdev_priv(netdev);
1993 struct sockaddr *addr = p;
1994 u32 addr_lo, addr_hi;
1997 if (netif_running(netdev))
2000 if (!is_valid_ether_addr(addr->sa_data))
2001 return -EADDRNOTAVAIL;
2003 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2005 addr_lo = netdev->dev_addr[0] |
2006 netdev->dev_addr[1] << 8 |
2007 netdev->dev_addr[2] << 16 |
2008 netdev->dev_addr[3] << 24;
2009 addr_hi = netdev->dev_addr[4] |
2010 netdev->dev_addr[5] << 8;
2012 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2013 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2015 /* Added to support MAC address changes */
2016 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2017 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2022 /* Enable or disable Rx checksum offload engine */
2023 static int lan78xx_set_features(struct net_device *netdev,
2024 netdev_features_t features)
2026 struct lan78xx_net *dev = netdev_priv(netdev);
2027 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2028 unsigned long flags;
2031 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2033 if (features & NETIF_F_RXCSUM) {
2034 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2035 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2037 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2038 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2041 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2042 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2044 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2046 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2048 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2053 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2055 struct lan78xx_priv *pdata =
2056 container_of(param, struct lan78xx_priv, set_vlan);
2057 struct lan78xx_net *dev = pdata->dev;
2059 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2060 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2063 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2064 __be16 proto, u16 vid)
2066 struct lan78xx_net *dev = netdev_priv(netdev);
2067 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2069 u16 vid_dword_index;
2071 vid_dword_index = (vid >> 5) & 0x7F;
2072 vid_bit_index = vid & 0x1F;
2074 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2076 /* defer register writes to a sleepable context */
2077 schedule_work(&pdata->set_vlan);
2082 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2083 __be16 proto, u16 vid)
2085 struct lan78xx_net *dev = netdev_priv(netdev);
2086 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2088 u16 vid_dword_index;
2090 vid_dword_index = (vid >> 5) & 0x7F;
2091 vid_bit_index = vid & 0x1F;
2093 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2095 /* defer register writes to a sleepable context */
2096 schedule_work(&pdata->set_vlan);
2101 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2105 u32 regs[6] = { 0 };
2107 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2108 if (buf & USB_CFG1_LTM_ENABLE_) {
2110 /* Get values from EEPROM first */
2111 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2112 if (temp[0] == 24) {
2113 ret = lan78xx_read_raw_eeprom(dev,
2120 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2121 if (temp[0] == 24) {
2122 ret = lan78xx_read_raw_otp(dev,
2132 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2133 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2134 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2135 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2136 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2137 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2140 static int lan78xx_reset(struct lan78xx_net *dev)
2142 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2145 unsigned long timeout;
2147 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2148 buf |= HW_CFG_LRST_;
2149 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2151 timeout = jiffies + HZ;
2154 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2155 if (time_after(jiffies, timeout)) {
2156 netdev_warn(dev->net,
2157 "timeout on completion of LiteReset");
2160 } while (buf & HW_CFG_LRST_);
2162 lan78xx_init_mac_address(dev);
2164 /* save DEVID for later usage */
2165 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2166 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2167 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2169 /* Respond to the IN token with a NAK */
2170 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2171 buf |= USB_CFG_BIR_;
2172 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2175 lan78xx_init_ltm(dev);
2177 dev->net->hard_header_len += TX_OVERHEAD;
2178 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2180 if (dev->udev->speed == USB_SPEED_SUPER) {
2181 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2182 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2185 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2186 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2187 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2188 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2189 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2191 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2192 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2197 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2198 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2200 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2202 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2204 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2205 buf |= USB_CFG_BCE_;
2206 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2208 /* set FIFO sizes */
2209 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2210 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2212 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2213 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2215 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2216 ret = lan78xx_write_reg(dev, FLOW, 0);
2217 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2219 /* Don't need rfe_ctl_lock during initialisation */
2220 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2221 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2222 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2224 /* Enable or disable checksum offload engines */
2225 lan78xx_set_features(dev->net, dev->net->features);
2227 lan78xx_set_multicast(dev->net);
2230 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2231 buf |= PMT_CTL_PHY_RST_;
2232 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2234 timeout = jiffies + HZ;
2237 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2238 if (time_after(jiffies, timeout)) {
2239 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2242 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2244 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2245 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2246 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2248 /* enable PHY interrupts */
2249 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2250 buf |= INT_ENP_PHY_INT;
2251 ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2253 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2254 buf |= MAC_TX_TXEN_;
2255 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2257 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2258 buf |= FCT_TX_CTL_EN_;
2259 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2261 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2263 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2264 buf |= MAC_RX_RXEN_;
2265 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2267 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2268 buf |= FCT_RX_CTL_EN_;
2269 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2274 static void lan78xx_init_stats(struct lan78xx_net *dev)
2279 /* initialize for stats update
2280 * some counters are 20bits and some are 32bits
2282 p = (u32 *)&dev->stats.rollover_max;
2283 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2286 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2287 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2288 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2289 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2290 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2291 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2292 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2293 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2294 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2295 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2297 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2300 static int lan78xx_open(struct net_device *net)
2302 struct lan78xx_net *dev = netdev_priv(net);
2305 ret = usb_autopm_get_interface(dev->intf);
2309 ret = lan78xx_reset(dev);
2313 ret = lan78xx_phy_init(dev);
2317 /* for Link Check */
2318 if (dev->urb_intr) {
2319 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2321 netif_err(dev, ifup, dev->net,
2322 "intr submit %d\n", ret);
2327 lan78xx_init_stats(dev);
2329 set_bit(EVENT_DEV_OPEN, &dev->flags);
2331 netif_start_queue(net);
2333 dev->link_on = false;
2335 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2337 usb_autopm_put_interface(dev->intf);
2343 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2345 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2346 DECLARE_WAITQUEUE(wait, current);
2349 /* ensure there are no more active urbs */
2350 add_wait_queue(&unlink_wakeup, &wait);
2351 set_current_state(TASK_UNINTERRUPTIBLE);
2352 dev->wait = &unlink_wakeup;
2353 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2355 /* maybe wait for deletions to finish. */
2356 while (!skb_queue_empty(&dev->rxq) &&
2357 !skb_queue_empty(&dev->txq) &&
2358 !skb_queue_empty(&dev->done)) {
2359 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2360 set_current_state(TASK_UNINTERRUPTIBLE);
2361 netif_dbg(dev, ifdown, dev->net,
2362 "waited for %d urb completions\n", temp);
2364 set_current_state(TASK_RUNNING);
2366 remove_wait_queue(&unlink_wakeup, &wait);
2369 static int lan78xx_stop(struct net_device *net)
2371 struct lan78xx_net *dev = netdev_priv(net);
2373 if (timer_pending(&dev->stat_monitor))
2374 del_timer_sync(&dev->stat_monitor);
2376 phy_stop(net->phydev);
2377 phy_disconnect(net->phydev);
2380 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2381 netif_stop_queue(net);
2383 netif_info(dev, ifdown, dev->net,
2384 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2385 net->stats.rx_packets, net->stats.tx_packets,
2386 net->stats.rx_errors, net->stats.tx_errors);
2388 lan78xx_terminate_urbs(dev);
2390 usb_kill_urb(dev->urb_intr);
2392 skb_queue_purge(&dev->rxq_pause);
2394 /* deferred work (task, timer, softirq) must also stop.
2395 * can't flush_scheduled_work() until we drop rtnl (later),
2396 * else workers could deadlock; so make workers a NOP.
2399 cancel_delayed_work_sync(&dev->wq);
2400 tasklet_kill(&dev->bh);
2402 usb_autopm_put_interface(dev->intf);
2407 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2408 struct sk_buff *skb, gfp_t flags)
2410 u32 tx_cmd_a, tx_cmd_b;
2412 if (skb_cow_head(skb, TX_OVERHEAD)) {
2413 dev_kfree_skb_any(skb);
2417 if (skb_linearize(skb)) {
2418 dev_kfree_skb_any(skb);
2422 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2424 if (skb->ip_summed == CHECKSUM_PARTIAL)
2425 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2428 if (skb_is_gso(skb)) {
2429 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2431 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2433 tx_cmd_a |= TX_CMD_A_LSO_;
2436 if (skb_vlan_tag_present(skb)) {
2437 tx_cmd_a |= TX_CMD_A_IVTG_;
2438 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2442 cpu_to_le32s(&tx_cmd_b);
2443 memcpy(skb->data, &tx_cmd_b, 4);
2446 cpu_to_le32s(&tx_cmd_a);
2447 memcpy(skb->data, &tx_cmd_a, 4);
2452 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2453 struct sk_buff_head *list, enum skb_state state)
2455 unsigned long flags;
2456 enum skb_state old_state;
2457 struct skb_data *entry = (struct skb_data *)skb->cb;
2459 spin_lock_irqsave(&list->lock, flags);
2460 old_state = entry->state;
2461 entry->state = state;
2463 __skb_unlink(skb, list);
2464 spin_unlock(&list->lock);
2465 spin_lock(&dev->done.lock);
2467 __skb_queue_tail(&dev->done, skb);
2468 if (skb_queue_len(&dev->done) == 1)
2469 tasklet_schedule(&dev->bh);
2470 spin_unlock_irqrestore(&dev->done.lock, flags);
2475 static void tx_complete(struct urb *urb)
2477 struct sk_buff *skb = (struct sk_buff *)urb->context;
2478 struct skb_data *entry = (struct skb_data *)skb->cb;
2479 struct lan78xx_net *dev = entry->dev;
2481 if (urb->status == 0) {
2482 dev->net->stats.tx_packets += entry->num_of_packet;
2483 dev->net->stats.tx_bytes += entry->length;
2485 dev->net->stats.tx_errors++;
2487 switch (urb->status) {
2489 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2492 /* software-driven interface shutdown */
2500 netif_stop_queue(dev->net);
2503 netif_dbg(dev, tx_err, dev->net,
2504 "tx err %d\n", entry->urb->status);
2509 usb_autopm_put_interface_async(dev->intf);
2511 defer_bh(dev, skb, &dev->txq, tx_done);
2514 static void lan78xx_queue_skb(struct sk_buff_head *list,
2515 struct sk_buff *newsk, enum skb_state state)
2517 struct skb_data *entry = (struct skb_data *)newsk->cb;
2519 __skb_queue_tail(list, newsk);
2520 entry->state = state;
2524 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2526 struct lan78xx_net *dev = netdev_priv(net);
2527 struct sk_buff *skb2 = NULL;
2530 skb_tx_timestamp(skb);
2531 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2535 skb_queue_tail(&dev->txq_pend, skb2);
2537 /* throttle TX patch at slower than SUPER SPEED USB */
2538 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2539 (skb_queue_len(&dev->txq_pend) > 10))
2540 netif_stop_queue(net);
2542 netif_dbg(dev, tx_err, dev->net,
2543 "lan78xx_tx_prep return NULL\n");
2544 dev->net->stats.tx_errors++;
2545 dev->net->stats.tx_dropped++;
2548 tasklet_schedule(&dev->bh);
2550 return NETDEV_TX_OK;
2553 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2555 struct lan78xx_priv *pdata = NULL;
2559 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2561 pdata = (struct lan78xx_priv *)(dev->data[0]);
2563 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2569 spin_lock_init(&pdata->rfe_ctl_lock);
2570 mutex_init(&pdata->dataport_mutex);
2572 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2574 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2575 pdata->vlan_table[i] = 0;
2577 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2579 dev->net->features = 0;
2581 if (DEFAULT_TX_CSUM_ENABLE)
2582 dev->net->features |= NETIF_F_HW_CSUM;
2584 if (DEFAULT_RX_CSUM_ENABLE)
2585 dev->net->features |= NETIF_F_RXCSUM;
2587 if (DEFAULT_TSO_CSUM_ENABLE)
2588 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2590 dev->net->hw_features = dev->net->features;
2592 /* Init all registers */
2593 ret = lan78xx_reset(dev);
2595 lan78xx_mdio_init(dev);
2597 dev->net->flags |= IFF_MULTICAST;
2599 pdata->wol = WAKE_MAGIC;
2604 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2606 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2608 lan78xx_remove_mdio(dev);
2611 netif_dbg(dev, ifdown, dev->net, "free pdata");
2618 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2619 struct sk_buff *skb,
2620 u32 rx_cmd_a, u32 rx_cmd_b)
2622 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2623 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2624 skb->ip_summed = CHECKSUM_NONE;
2626 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2627 skb->ip_summed = CHECKSUM_COMPLETE;
2631 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2635 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2636 skb_queue_tail(&dev->rxq_pause, skb);
2640 dev->net->stats.rx_packets++;
2641 dev->net->stats.rx_bytes += skb->len;
2643 skb->protocol = eth_type_trans(skb, dev->net);
2645 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2646 skb->len + sizeof(struct ethhdr), skb->protocol);
2647 memset(skb->cb, 0, sizeof(struct skb_data));
2649 if (skb_defer_rx_timestamp(skb))
2652 status = netif_rx(skb);
2653 if (status != NET_RX_SUCCESS)
2654 netif_dbg(dev, rx_err, dev->net,
2655 "netif_rx status %d\n", status);
2658 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2660 if (skb->len < dev->net->hard_header_len)
2663 while (skb->len > 0) {
2664 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2666 struct sk_buff *skb2;
2667 unsigned char *packet;
2669 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2670 le32_to_cpus(&rx_cmd_a);
2671 skb_pull(skb, sizeof(rx_cmd_a));
2673 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2674 le32_to_cpus(&rx_cmd_b);
2675 skb_pull(skb, sizeof(rx_cmd_b));
2677 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2678 le16_to_cpus(&rx_cmd_c);
2679 skb_pull(skb, sizeof(rx_cmd_c));
2683 /* get the packet length */
2684 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2685 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2687 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2688 netif_dbg(dev, rx_err, dev->net,
2689 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2691 /* last frame in this batch */
2692 if (skb->len == size) {
2693 lan78xx_rx_csum_offload(dev, skb,
2694 rx_cmd_a, rx_cmd_b);
2696 skb_trim(skb, skb->len - 4); /* remove fcs */
2697 skb->truesize = size + sizeof(struct sk_buff);
2702 skb2 = skb_clone(skb, GFP_ATOMIC);
2703 if (unlikely(!skb2)) {
2704 netdev_warn(dev->net, "Error allocating skb");
2709 skb2->data = packet;
2710 skb_set_tail_pointer(skb2, size);
2712 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2714 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2715 skb2->truesize = size + sizeof(struct sk_buff);
2717 lan78xx_skb_return(dev, skb2);
2720 skb_pull(skb, size);
2722 /* padding bytes before the next frame starts */
2724 skb_pull(skb, align_count);
2730 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2732 if (!lan78xx_rx(dev, skb)) {
2733 dev->net->stats.rx_errors++;
2738 lan78xx_skb_return(dev, skb);
2742 netif_dbg(dev, rx_err, dev->net, "drop\n");
2743 dev->net->stats.rx_errors++;
2745 skb_queue_tail(&dev->done, skb);
2748 static void rx_complete(struct urb *urb);
2750 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2752 struct sk_buff *skb;
2753 struct skb_data *entry;
2754 unsigned long lockflags;
2755 size_t size = dev->rx_urb_size;
2758 skb = netdev_alloc_skb_ip_align(dev->net, size);
2764 entry = (struct skb_data *)skb->cb;
2769 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2770 skb->data, size, rx_complete, skb);
2772 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2774 if (netif_device_present(dev->net) &&
2775 netif_running(dev->net) &&
2776 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2777 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2778 ret = usb_submit_urb(urb, GFP_ATOMIC);
2781 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2784 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2787 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2788 netif_device_detach(dev->net);
2794 netif_dbg(dev, rx_err, dev->net,
2795 "rx submit, %d\n", ret);
2796 tasklet_schedule(&dev->bh);
2799 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2802 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2804 dev_kfree_skb_any(skb);
2810 static void rx_complete(struct urb *urb)
2812 struct sk_buff *skb = (struct sk_buff *)urb->context;
2813 struct skb_data *entry = (struct skb_data *)skb->cb;
2814 struct lan78xx_net *dev = entry->dev;
2815 int urb_status = urb->status;
2816 enum skb_state state;
2818 skb_put(skb, urb->actual_length);
2822 switch (urb_status) {
2824 if (skb->len < dev->net->hard_header_len) {
2826 dev->net->stats.rx_errors++;
2827 dev->net->stats.rx_length_errors++;
2828 netif_dbg(dev, rx_err, dev->net,
2829 "rx length %d\n", skb->len);
2831 usb_mark_last_busy(dev->udev);
2834 dev->net->stats.rx_errors++;
2835 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2837 case -ECONNRESET: /* async unlink */
2838 case -ESHUTDOWN: /* hardware gone */
2839 netif_dbg(dev, ifdown, dev->net,
2840 "rx shutdown, code %d\n", urb_status);
2848 dev->net->stats.rx_errors++;
2854 /* data overrun ... flush fifo? */
2856 dev->net->stats.rx_over_errors++;
2861 dev->net->stats.rx_errors++;
2862 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2866 state = defer_bh(dev, skb, &dev->rxq, state);
2869 if (netif_running(dev->net) &&
2870 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2871 state != unlink_start) {
2872 rx_submit(dev, urb, GFP_ATOMIC);
2877 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2880 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2883 struct urb *urb = NULL;
2884 struct skb_data *entry;
2885 unsigned long flags;
2886 struct sk_buff_head *tqp = &dev->txq_pend;
2887 struct sk_buff *skb, *skb2;
2890 int skb_totallen, pkt_cnt;
2896 spin_lock_irqsave(&tqp->lock, flags);
2897 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2898 if (skb_is_gso(skb)) {
2900 /* handle previous packets first */
2904 length = skb->len - TX_OVERHEAD;
2905 __skb_unlink(skb, tqp);
2906 spin_unlock_irqrestore(&tqp->lock, flags);
2910 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2912 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2915 spin_unlock_irqrestore(&tqp->lock, flags);
2917 /* copy to a single skb */
2918 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2922 skb_put(skb, skb_totallen);
2924 for (count = pos = 0; count < pkt_cnt; count++) {
2925 skb2 = skb_dequeue(tqp);
2927 length += (skb2->len - TX_OVERHEAD);
2928 memcpy(skb->data + pos, skb2->data, skb2->len);
2929 pos += roundup(skb2->len, sizeof(u32));
2930 dev_kfree_skb(skb2);
2935 urb = usb_alloc_urb(0, GFP_ATOMIC);
2939 entry = (struct skb_data *)skb->cb;
2942 entry->length = length;
2943 entry->num_of_packet = count;
2945 spin_lock_irqsave(&dev->txq.lock, flags);
2946 ret = usb_autopm_get_interface_async(dev->intf);
2948 spin_unlock_irqrestore(&dev->txq.lock, flags);
2952 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2953 skb->data, skb->len, tx_complete, skb);
2955 if (length % dev->maxpacket == 0) {
2956 /* send USB_ZERO_PACKET */
2957 urb->transfer_flags |= URB_ZERO_PACKET;
2961 /* if this triggers the device is still a sleep */
2962 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2963 /* transmission will be done in resume */
2964 usb_anchor_urb(urb, &dev->deferred);
2965 /* no use to process more packets */
2966 netif_stop_queue(dev->net);
2968 spin_unlock_irqrestore(&dev->txq.lock, flags);
2969 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2974 ret = usb_submit_urb(urb, GFP_ATOMIC);
2977 netif_trans_update(dev->net);
2978 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2979 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2980 netif_stop_queue(dev->net);
2983 netif_stop_queue(dev->net);
2984 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2985 usb_autopm_put_interface_async(dev->intf);
2988 usb_autopm_put_interface_async(dev->intf);
2989 netif_dbg(dev, tx_err, dev->net,
2990 "tx: submit urb err %d\n", ret);
2994 spin_unlock_irqrestore(&dev->txq.lock, flags);
2997 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2999 dev->net->stats.tx_dropped++;
3001 dev_kfree_skb_any(skb);
3004 netif_dbg(dev, tx_queued, dev->net,
3005 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3008 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3013 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3014 for (i = 0; i < 10; i++) {
3015 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3017 urb = usb_alloc_urb(0, GFP_ATOMIC);
3019 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3023 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3024 tasklet_schedule(&dev->bh);
3026 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3027 netif_wake_queue(dev->net);
3030 static void lan78xx_bh(unsigned long param)
3032 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3033 struct sk_buff *skb;
3034 struct skb_data *entry;
3036 while ((skb = skb_dequeue(&dev->done))) {
3037 entry = (struct skb_data *)(skb->cb);
3038 switch (entry->state) {
3040 entry->state = rx_cleanup;
3041 rx_process(dev, skb);
3044 usb_free_urb(entry->urb);
3048 usb_free_urb(entry->urb);
3052 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3057 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3058 /* reset update timer delta */
3059 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3061 mod_timer(&dev->stat_monitor,
3062 jiffies + STAT_UPDATE_TIMER);
3065 if (!skb_queue_empty(&dev->txq_pend))
3068 if (!timer_pending(&dev->delay) &&
3069 !test_bit(EVENT_RX_HALT, &dev->flags))
3074 static void lan78xx_delayedwork(struct work_struct *work)
3077 struct lan78xx_net *dev;
3079 dev = container_of(work, struct lan78xx_net, wq.work);
3081 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3082 unlink_urbs(dev, &dev->txq);
3083 status = usb_autopm_get_interface(dev->intf);
3086 status = usb_clear_halt(dev->udev, dev->pipe_out);
3087 usb_autopm_put_interface(dev->intf);
3090 status != -ESHUTDOWN) {
3091 if (netif_msg_tx_err(dev))
3093 netdev_err(dev->net,
3094 "can't clear tx halt, status %d\n",
3097 clear_bit(EVENT_TX_HALT, &dev->flags);
3098 if (status != -ESHUTDOWN)
3099 netif_wake_queue(dev->net);
3102 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3103 unlink_urbs(dev, &dev->rxq);
3104 status = usb_autopm_get_interface(dev->intf);
3107 status = usb_clear_halt(dev->udev, dev->pipe_in);
3108 usb_autopm_put_interface(dev->intf);
3111 status != -ESHUTDOWN) {
3112 if (netif_msg_rx_err(dev))
3114 netdev_err(dev->net,
3115 "can't clear rx halt, status %d\n",
3118 clear_bit(EVENT_RX_HALT, &dev->flags);
3119 tasklet_schedule(&dev->bh);
3123 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3126 clear_bit(EVENT_LINK_RESET, &dev->flags);
3127 status = usb_autopm_get_interface(dev->intf);
3130 if (lan78xx_link_reset(dev) < 0) {
3131 usb_autopm_put_interface(dev->intf);
3133 netdev_info(dev->net, "link reset failed (%d)\n",
3136 usb_autopm_put_interface(dev->intf);
3140 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3141 lan78xx_update_stats(dev);
3143 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3145 mod_timer(&dev->stat_monitor,
3146 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3148 dev->delta = min((dev->delta * 2), 50);
3152 static void intr_complete(struct urb *urb)
3154 struct lan78xx_net *dev = urb->context;
3155 int status = urb->status;
3160 lan78xx_status(dev, urb);
3163 /* software-driven interface shutdown */
3164 case -ENOENT: /* urb killed */
3165 case -ESHUTDOWN: /* hardware gone */
3166 netif_dbg(dev, ifdown, dev->net,
3167 "intr shutdown, code %d\n", status);
3170 /* NOTE: not throttling like RX/TX, since this endpoint
3171 * already polls infrequently
3174 netdev_dbg(dev->net, "intr status %d\n", status);
3178 if (!netif_running(dev->net))
3181 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3182 status = usb_submit_urb(urb, GFP_ATOMIC);
3184 netif_err(dev, timer, dev->net,
3185 "intr resubmit --> %d\n", status);
3188 static void lan78xx_disconnect(struct usb_interface *intf)
3190 struct lan78xx_net *dev;
3191 struct usb_device *udev;
3192 struct net_device *net;
3194 dev = usb_get_intfdata(intf);
3195 usb_set_intfdata(intf, NULL);
3199 udev = interface_to_usbdev(intf);
3202 unregister_netdev(net);
3204 cancel_delayed_work_sync(&dev->wq);
3206 usb_scuttle_anchored_urbs(&dev->deferred);
3208 lan78xx_unbind(dev, intf);
3210 usb_kill_urb(dev->urb_intr);
3211 usb_free_urb(dev->urb_intr);
3217 static void lan78xx_tx_timeout(struct net_device *net)
3219 struct lan78xx_net *dev = netdev_priv(net);
3221 unlink_urbs(dev, &dev->txq);
3222 tasklet_schedule(&dev->bh);
3225 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3226 struct net_device *netdev,
3227 netdev_features_t features)
3229 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3230 features &= ~NETIF_F_GSO_MASK;
3232 features = vlan_features_check(skb, features);
3233 features = vxlan_features_check(skb, features);
3238 static const struct net_device_ops lan78xx_netdev_ops = {
3239 .ndo_open = lan78xx_open,
3240 .ndo_stop = lan78xx_stop,
3241 .ndo_start_xmit = lan78xx_start_xmit,
3242 .ndo_tx_timeout = lan78xx_tx_timeout,
3243 .ndo_change_mtu = lan78xx_change_mtu,
3244 .ndo_set_mac_address = lan78xx_set_mac_addr,
3245 .ndo_validate_addr = eth_validate_addr,
3246 .ndo_do_ioctl = lan78xx_ioctl,
3247 .ndo_set_rx_mode = lan78xx_set_multicast,
3248 .ndo_set_features = lan78xx_set_features,
3249 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3250 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3251 .ndo_features_check = lan78xx_features_check,
3254 static void lan78xx_stat_monitor(unsigned long param)
3256 struct lan78xx_net *dev;
3258 dev = (struct lan78xx_net *)param;
3260 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3263 static int lan78xx_probe(struct usb_interface *intf,
3264 const struct usb_device_id *id)
3266 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3267 struct lan78xx_net *dev;
3268 struct net_device *netdev;
3269 struct usb_device *udev;
3275 udev = interface_to_usbdev(intf);
3276 udev = usb_get_dev(udev);
3279 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3281 dev_err(&intf->dev, "Error: OOM\n");
3285 /* netdev_printk() needs this */
3286 SET_NETDEV_DEV(netdev, &intf->dev);
3288 dev = netdev_priv(netdev);
3292 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3293 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3295 skb_queue_head_init(&dev->rxq);
3296 skb_queue_head_init(&dev->txq);
3297 skb_queue_head_init(&dev->done);
3298 skb_queue_head_init(&dev->rxq_pause);
3299 skb_queue_head_init(&dev->txq_pend);
3300 mutex_init(&dev->phy_mutex);
3302 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3303 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3304 init_usb_anchor(&dev->deferred);
3306 netdev->netdev_ops = &lan78xx_netdev_ops;
3307 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3308 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3310 dev->stat_monitor.function = lan78xx_stat_monitor;
3311 dev->stat_monitor.data = (unsigned long)dev;
3313 init_timer(&dev->stat_monitor);
3315 mutex_init(&dev->stats.access_lock);
3317 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3322 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3323 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3324 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3329 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3330 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3331 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3336 ep_intr = &intf->cur_altsetting->endpoint[2];
3337 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3342 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3343 usb_endpoint_num(&ep_intr->desc));
3345 ret = lan78xx_bind(dev, intf);
3348 strcpy(netdev->name, "eth%d");
3350 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3351 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3352 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3354 period = ep_intr->desc.bInterval;
3355 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3356 buf = kmalloc(maxp, GFP_KERNEL);
3358 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3359 if (!dev->urb_intr) {
3364 usb_fill_int_urb(dev->urb_intr, dev->udev,
3365 dev->pipe_intr, buf, maxp,
3366 intr_complete, dev, period);
3367 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3371 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3373 /* Reject broken descriptors. */
3374 if (dev->maxpacket == 0) {
3379 /* driver requires remote-wakeup capability during autosuspend. */
3380 intf->needs_remote_wakeup = 1;
3382 ret = register_netdev(netdev);
3384 netif_err(dev, probe, netdev, "couldn't register the device\n");
3388 usb_set_intfdata(intf, dev);
3390 ret = device_set_wakeup_enable(&udev->dev, true);
3392 /* Default delay of 2sec has more overhead than advantage.
3393 * Set to 10sec as default.
3395 pm_runtime_set_autosuspend_delay(&udev->dev,
3396 DEFAULT_AUTOSUSPEND_DELAY);
3401 lan78xx_unbind(dev, intf);
3403 free_netdev(netdev);
3410 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3412 const u16 crc16poly = 0x8005;
3418 for (i = 0; i < len; i++) {
3420 for (bit = 0; bit < 8; bit++) {
3424 if (msb ^ (u16)(data & 1)) {
3426 crc |= (u16)0x0001U;
3435 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3443 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3444 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3445 const u8 arp_type[2] = { 0x08, 0x06 };
3447 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3448 buf &= ~MAC_TX_TXEN_;
3449 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3450 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3451 buf &= ~MAC_RX_RXEN_;
3452 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3454 ret = lan78xx_write_reg(dev, WUCSR, 0);
3455 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3456 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3461 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3462 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3463 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3465 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3466 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3469 if (wol & WAKE_PHY) {
3470 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3472 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3473 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3474 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3476 if (wol & WAKE_MAGIC) {
3477 temp_wucsr |= WUCSR_MPEN_;
3479 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3480 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3481 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3483 if (wol & WAKE_BCAST) {
3484 temp_wucsr |= WUCSR_BCST_EN_;
3486 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3487 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3488 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3490 if (wol & WAKE_MCAST) {
3491 temp_wucsr |= WUCSR_WAKE_EN_;
3493 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3494 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3495 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3497 WUF_CFGX_TYPE_MCAST_ |
3498 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3499 (crc & WUF_CFGX_CRC16_MASK_));
3501 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3502 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3503 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3504 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3507 /* for IPv6 Multicast */
3508 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3509 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3511 WUF_CFGX_TYPE_MCAST_ |
3512 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3513 (crc & WUF_CFGX_CRC16_MASK_));
3515 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3516 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3517 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3518 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3521 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3522 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3523 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3525 if (wol & WAKE_UCAST) {
3526 temp_wucsr |= WUCSR_PFDA_EN_;
3528 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3529 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3530 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3532 if (wol & WAKE_ARP) {
3533 temp_wucsr |= WUCSR_WAKE_EN_;
3535 /* set WUF_CFG & WUF_MASK
3536 * for packettype (offset 12,13) = ARP (0x0806)
3538 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3539 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3541 WUF_CFGX_TYPE_ALL_ |
3542 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3543 (crc & WUF_CFGX_CRC16_MASK_));
3545 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3546 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3547 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3548 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3551 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3552 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3553 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3556 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3558 /* when multiple WOL bits are set */
3559 if (hweight_long((unsigned long)wol) > 1) {
3560 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3561 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3562 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3564 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3567 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3568 buf |= PMT_CTL_WUPS_MASK_;
3569 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3571 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3572 buf |= MAC_RX_RXEN_;
3573 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3578 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3580 struct lan78xx_net *dev = usb_get_intfdata(intf);
3581 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3586 event = message.event;
3588 if (!dev->suspend_count++) {
3589 spin_lock_irq(&dev->txq.lock);
3590 /* don't autosuspend while transmitting */
3591 if ((skb_queue_len(&dev->txq) ||
3592 skb_queue_len(&dev->txq_pend)) &&
3593 PMSG_IS_AUTO(message)) {
3594 spin_unlock_irq(&dev->txq.lock);
3598 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3599 spin_unlock_irq(&dev->txq.lock);
3603 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3604 buf &= ~MAC_TX_TXEN_;
3605 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3606 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3607 buf &= ~MAC_RX_RXEN_;
3608 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3610 /* empty out the rx and queues */
3611 netif_device_detach(dev->net);
3612 lan78xx_terminate_urbs(dev);
3613 usb_kill_urb(dev->urb_intr);
3616 netif_device_attach(dev->net);
3619 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3620 del_timer(&dev->stat_monitor);
3622 if (PMSG_IS_AUTO(message)) {
3623 /* auto suspend (selective suspend) */
3624 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3625 buf &= ~MAC_TX_TXEN_;
3626 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3627 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3628 buf &= ~MAC_RX_RXEN_;
3629 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3631 ret = lan78xx_write_reg(dev, WUCSR, 0);
3632 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3633 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3635 /* set goodframe wakeup */
3636 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3638 buf |= WUCSR_RFE_WAKE_EN_;
3639 buf |= WUCSR_STORE_WAKE_;
3641 ret = lan78xx_write_reg(dev, WUCSR, buf);
3643 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3645 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3646 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3648 buf |= PMT_CTL_PHY_WAKE_EN_;
3649 buf |= PMT_CTL_WOL_EN_;
3650 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3651 buf |= PMT_CTL_SUS_MODE_3_;
3653 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3655 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3657 buf |= PMT_CTL_WUPS_MASK_;
3659 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3661 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3662 buf |= MAC_RX_RXEN_;
3663 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3665 lan78xx_set_suspend(dev, pdata->wol);
3674 static int lan78xx_resume(struct usb_interface *intf)
3676 struct lan78xx_net *dev = usb_get_intfdata(intf);
3677 struct sk_buff *skb;
3682 if (!timer_pending(&dev->stat_monitor)) {
3684 mod_timer(&dev->stat_monitor,
3685 jiffies + STAT_UPDATE_TIMER);
3688 if (!--dev->suspend_count) {
3689 /* resume interrupt URBs */
3690 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3691 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3693 spin_lock_irq(&dev->txq.lock);
3694 while ((res = usb_get_from_anchor(&dev->deferred))) {
3695 skb = (struct sk_buff *)res->context;
3696 ret = usb_submit_urb(res, GFP_ATOMIC);
3698 dev_kfree_skb_any(skb);
3700 usb_autopm_put_interface_async(dev->intf);
3702 netif_trans_update(dev->net);
3703 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3707 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3708 spin_unlock_irq(&dev->txq.lock);
3710 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3711 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3712 netif_start_queue(dev->net);
3713 tasklet_schedule(&dev->bh);
3717 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3718 ret = lan78xx_write_reg(dev, WUCSR, 0);
3719 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3721 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3723 WUCSR2_IPV6_TCPSYN_RCD_ |
3724 WUCSR2_IPV4_TCPSYN_RCD_);
3726 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3727 WUCSR_EEE_RX_WAKE_ |
3729 WUCSR_RFE_WAKE_FR_ |
3734 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3735 buf |= MAC_TX_TXEN_;
3736 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3741 static int lan78xx_reset_resume(struct usb_interface *intf)
3743 struct lan78xx_net *dev = usb_get_intfdata(intf);
3747 lan78xx_phy_init(dev);
3749 return lan78xx_resume(intf);
3752 static const struct usb_device_id products[] = {
3754 /* LAN7800 USB Gigabit Ethernet Device */
3755 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3758 /* LAN7850 USB Gigabit Ethernet Device */
3759 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3762 /* ATM2-AF USB Gigabit Ethernet Device */
3763 USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
3767 MODULE_DEVICE_TABLE(usb, products);
3769 static struct usb_driver lan78xx_driver = {
3770 .name = DRIVER_NAME,
3771 .id_table = products,
3772 .probe = lan78xx_probe,
3773 .disconnect = lan78xx_disconnect,
3774 .suspend = lan78xx_suspend,
3775 .resume = lan78xx_resume,
3776 .reset_resume = lan78xx_reset_resume,
3777 .supports_autosuspend = 1,
3778 .disable_hub_initiated_lpm = 1,
3781 module_usb_driver(lan78xx_driver);
3783 MODULE_AUTHOR(DRIVER_AUTHOR);
3784 MODULE_DESCRIPTION(DRIVER_DESC);
3785 MODULE_LICENSE("GPL");