2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <net/vxlan.h>
34 #include <linux/microchipphy.h>
37 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
38 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
39 #define DRIVER_NAME "lan78xx"
40 #define DRIVER_VERSION "1.0.1"
42 #define TX_TIMEOUT_JIFFIES (5 * HZ)
43 #define THROTTLE_JIFFIES (HZ / 8)
44 #define UNLINK_TIMEOUT_MS 3
46 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
48 #define SS_USB_PKT_SIZE (1024)
49 #define HS_USB_PKT_SIZE (512)
50 #define FS_USB_PKT_SIZE (64)
52 #define MAX_RX_FIFO_SIZE (12 * 1024)
53 #define MAX_TX_FIFO_SIZE (12 * 1024)
54 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
55 #define DEFAULT_BULK_IN_DELAY (0x0800)
56 #define MAX_SINGLE_PACKET_SIZE (9000)
57 #define DEFAULT_TX_CSUM_ENABLE (true)
58 #define DEFAULT_RX_CSUM_ENABLE (true)
59 #define DEFAULT_TSO_CSUM_ENABLE (true)
60 #define DEFAULT_VLAN_FILTER_ENABLE (true)
61 #define TX_OVERHEAD (8)
64 #define LAN78XX_USB_VENDOR_ID (0x0424)
65 #define LAN7800_USB_PRODUCT_ID (0x7800)
66 #define LAN7850_USB_PRODUCT_ID (0x7850)
67 #define LAN78XX_EEPROM_MAGIC (0x78A5)
68 #define LAN78XX_OTP_MAGIC (0x78F3)
73 #define EEPROM_INDICATOR (0xA5)
74 #define EEPROM_MAC_OFFSET (0x01)
75 #define MAX_EEPROM_SIZE 512
76 #define OTP_INDICATOR_1 (0xF3)
77 #define OTP_INDICATOR_2 (0xF7)
79 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
80 WAKE_MCAST | WAKE_BCAST | \
81 WAKE_ARP | WAKE_MAGIC)
83 /* USB related defines */
84 #define BULK_IN_PIPE 1
85 #define BULK_OUT_PIPE 2
87 /* default autosuspend delay (mSec)*/
88 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
90 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
92 "RX Alignment Errors",
95 "RX Undersize Frame Errors",
96 "RX Oversize Frame Errors",
98 "RX Unicast Byte Count",
99 "RX Broadcast Byte Count",
100 "RX Multicast Byte Count",
102 "RX Broadcast Frames",
103 "RX Multicast Frames",
106 "RX 65 - 127 Byte Frames",
107 "RX 128 - 255 Byte Frames",
108 "RX 256 - 511 Bytes Frames",
109 "RX 512 - 1023 Byte Frames",
110 "RX 1024 - 1518 Byte Frames",
111 "RX Greater 1518 Byte Frames",
112 "EEE RX LPI Transitions",
115 "TX Excess Deferral Errors",
118 "TX Single Collisions",
119 "TX Multiple Collisions",
120 "TX Excessive Collision",
121 "TX Late Collisions",
122 "TX Unicast Byte Count",
123 "TX Broadcast Byte Count",
124 "TX Multicast Byte Count",
126 "TX Broadcast Frames",
127 "TX Multicast Frames",
130 "TX 65 - 127 Byte Frames",
131 "TX 128 - 255 Byte Frames",
132 "TX 256 - 511 Bytes Frames",
133 "TX 512 - 1023 Byte Frames",
134 "TX 1024 - 1518 Byte Frames",
135 "TX Greater 1518 Byte Frames",
136 "EEE TX LPI Transitions",
140 struct lan78xx_statstage {
142 u32 rx_alignment_errors;
143 u32 rx_fragment_errors;
144 u32 rx_jabber_errors;
145 u32 rx_undersize_frame_errors;
146 u32 rx_oversize_frame_errors;
147 u32 rx_dropped_frames;
148 u32 rx_unicast_byte_count;
149 u32 rx_broadcast_byte_count;
150 u32 rx_multicast_byte_count;
151 u32 rx_unicast_frames;
152 u32 rx_broadcast_frames;
153 u32 rx_multicast_frames;
155 u32 rx_64_byte_frames;
156 u32 rx_65_127_byte_frames;
157 u32 rx_128_255_byte_frames;
158 u32 rx_256_511_bytes_frames;
159 u32 rx_512_1023_byte_frames;
160 u32 rx_1024_1518_byte_frames;
161 u32 rx_greater_1518_byte_frames;
162 u32 eee_rx_lpi_transitions;
165 u32 tx_excess_deferral_errors;
166 u32 tx_carrier_errors;
167 u32 tx_bad_byte_count;
168 u32 tx_single_collisions;
169 u32 tx_multiple_collisions;
170 u32 tx_excessive_collision;
171 u32 tx_late_collisions;
172 u32 tx_unicast_byte_count;
173 u32 tx_broadcast_byte_count;
174 u32 tx_multicast_byte_count;
175 u32 tx_unicast_frames;
176 u32 tx_broadcast_frames;
177 u32 tx_multicast_frames;
179 u32 tx_64_byte_frames;
180 u32 tx_65_127_byte_frames;
181 u32 tx_128_255_byte_frames;
182 u32 tx_256_511_bytes_frames;
183 u32 tx_512_1023_byte_frames;
184 u32 tx_1024_1518_byte_frames;
185 u32 tx_greater_1518_byte_frames;
186 u32 eee_tx_lpi_transitions;
192 struct lan78xx_priv {
193 struct lan78xx_net *dev;
195 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
196 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
197 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
198 struct mutex dataport_mutex; /* for dataport access */
199 spinlock_t rfe_ctl_lock; /* for rfe register access */
200 struct work_struct set_multicast;
201 struct work_struct set_vlan;
215 struct skb_data { /* skb->cb is one of these */
217 struct lan78xx_net *dev;
218 enum skb_state state;
223 struct usb_ctrlrequest req;
224 struct lan78xx_net *dev;
227 #define EVENT_TX_HALT 0
228 #define EVENT_RX_HALT 1
229 #define EVENT_RX_MEMORY 2
230 #define EVENT_STS_SPLIT 3
231 #define EVENT_LINK_RESET 4
232 #define EVENT_RX_PAUSED 5
233 #define EVENT_DEV_WAKING 6
234 #define EVENT_DEV_ASLEEP 7
235 #define EVENT_DEV_OPEN 8
238 struct net_device *net;
239 struct usb_device *udev;
240 struct usb_interface *intf;
245 struct sk_buff_head rxq;
246 struct sk_buff_head txq;
247 struct sk_buff_head done;
248 struct sk_buff_head rxq_pause;
249 struct sk_buff_head txq_pend;
251 struct tasklet_struct bh;
252 struct delayed_work wq;
256 struct urb *urb_intr;
257 struct usb_anchor deferred;
259 struct mutex phy_mutex; /* for phy access */
260 unsigned pipe_in, pipe_out, pipe_intr;
262 u32 hard_mtu; /* count any extra framing */
263 size_t rx_urb_size; /* size for rx urbs */
267 wait_queue_head_t *wait;
268 unsigned char suspend_count;
271 struct timer_list delay;
273 unsigned long data[5];
279 struct mii_bus *mdiobus;
282 /* use ethtool to change the level for any given device */
283 static int msg_level = -1;
284 module_param(msg_level, int, 0);
285 MODULE_PARM_DESC(msg_level, "Override default message level");
287 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
289 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
295 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
296 USB_VENDOR_REQUEST_READ_REGISTER,
297 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
298 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
299 if (likely(ret >= 0)) {
303 netdev_warn(dev->net,
304 "Failed to read register index 0x%08x. ret = %d",
313 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
315 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
324 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
325 USB_VENDOR_REQUEST_WRITE_REGISTER,
326 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
327 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
328 if (unlikely(ret < 0)) {
329 netdev_warn(dev->net,
330 "Failed to write register index 0x%08x. ret = %d",
339 static int lan78xx_read_stats(struct lan78xx_net *dev,
340 struct lan78xx_statstage *data)
344 struct lan78xx_statstage *stats;
348 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
352 ret = usb_control_msg(dev->udev,
353 usb_rcvctrlpipe(dev->udev, 0),
354 USB_VENDOR_REQUEST_GET_STATS,
355 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
360 USB_CTRL_SET_TIMEOUT);
361 if (likely(ret >= 0)) {
364 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
365 le32_to_cpus(&src[i]);
369 netdev_warn(dev->net,
370 "Failed to read stat ret = %d", ret);
378 /* Loop until the read is completed with timeout called with phy_mutex held */
379 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
381 unsigned long start_time = jiffies;
386 ret = lan78xx_read_reg(dev, MII_ACC, &val);
387 if (unlikely(ret < 0))
390 if (!(val & MII_ACC_MII_BUSY_))
392 } while (!time_after(jiffies, start_time + HZ));
397 static inline u32 mii_access(int id, int index, int read)
401 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
402 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
404 ret |= MII_ACC_MII_READ_;
406 ret |= MII_ACC_MII_WRITE_;
407 ret |= MII_ACC_MII_BUSY_;
412 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
414 unsigned long start_time = jiffies;
419 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
420 if (unlikely(ret < 0))
423 if (!(val & E2P_CMD_EPC_BUSY_) ||
424 (val & E2P_CMD_EPC_TIMEOUT_))
426 usleep_range(40, 100);
427 } while (!time_after(jiffies, start_time + HZ));
429 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
430 netdev_warn(dev->net, "EEPROM read operation timeout");
437 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
439 unsigned long start_time = jiffies;
444 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
445 if (unlikely(ret < 0))
448 if (!(val & E2P_CMD_EPC_BUSY_))
451 usleep_range(40, 100);
452 } while (!time_after(jiffies, start_time + HZ));
454 netdev_warn(dev->net, "EEPROM is busy");
458 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
459 u32 length, u8 *data)
464 ret = lan78xx_eeprom_confirm_not_busy(dev);
468 for (i = 0; i < length; i++) {
469 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
470 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
471 ret = lan78xx_write_reg(dev, E2P_CMD, val);
472 if (unlikely(ret < 0))
475 ret = lan78xx_wait_eeprom(dev);
479 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
480 if (unlikely(ret < 0))
483 data[i] = val & 0xFF;
490 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
491 u32 length, u8 *data)
496 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
497 if ((ret == 0) && (sig == EEPROM_INDICATOR))
498 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
505 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
506 u32 length, u8 *data)
511 ret = lan78xx_eeprom_confirm_not_busy(dev);
515 /* Issue write/erase enable command */
516 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
517 ret = lan78xx_write_reg(dev, E2P_CMD, val);
518 if (unlikely(ret < 0))
521 ret = lan78xx_wait_eeprom(dev);
525 for (i = 0; i < length; i++) {
526 /* Fill data register */
528 ret = lan78xx_write_reg(dev, E2P_DATA, val);
532 /* Send "write" command */
533 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
534 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
535 ret = lan78xx_write_reg(dev, E2P_CMD, val);
539 ret = lan78xx_wait_eeprom(dev);
549 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
550 u32 length, u8 *data)
555 unsigned long timeout;
557 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
559 if (buf & OTP_PWR_DN_PWRDN_N_) {
560 /* clear it and wait to be cleared */
561 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
563 timeout = jiffies + HZ;
566 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
567 if (time_after(jiffies, timeout)) {
568 netdev_warn(dev->net,
569 "timeout on OTP_PWR_DN");
572 } while (buf & OTP_PWR_DN_PWRDN_N_);
575 for (i = 0; i < length; i++) {
576 ret = lan78xx_write_reg(dev, OTP_ADDR1,
577 ((offset + i) >> 8) & OTP_ADDR1_15_11);
578 ret = lan78xx_write_reg(dev, OTP_ADDR2,
579 ((offset + i) & OTP_ADDR2_10_3));
581 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
582 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
584 timeout = jiffies + HZ;
587 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
588 if (time_after(jiffies, timeout)) {
589 netdev_warn(dev->net,
590 "timeout on OTP_STATUS");
593 } while (buf & OTP_STATUS_BUSY_);
595 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
597 data[i] = (u8)(buf & 0xFF);
603 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
604 u32 length, u8 *data)
609 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
612 if (sig == OTP_INDICATOR_1)
614 else if (sig == OTP_INDICATOR_2)
619 ret = lan78xx_read_raw_otp(dev, offset, length, data);
625 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
629 for (i = 0; i < 100; i++) {
632 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
633 if (unlikely(ret < 0))
636 if (dp_sel & DP_SEL_DPRDY_)
639 usleep_range(40, 100);
642 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
647 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
648 u32 addr, u32 length, u32 *buf)
650 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
654 if (usb_autopm_get_interface(dev->intf) < 0)
657 mutex_lock(&pdata->dataport_mutex);
659 ret = lan78xx_dataport_wait_not_busy(dev);
663 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
665 dp_sel &= ~DP_SEL_RSEL_MASK_;
666 dp_sel |= ram_select;
667 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
669 for (i = 0; i < length; i++) {
670 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
672 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
674 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
676 ret = lan78xx_dataport_wait_not_busy(dev);
682 mutex_unlock(&pdata->dataport_mutex);
683 usb_autopm_put_interface(dev->intf);
688 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
689 int index, u8 addr[ETH_ALEN])
693 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
695 temp = addr[2] | (temp << 8);
696 temp = addr[1] | (temp << 8);
697 temp = addr[0] | (temp << 8);
698 pdata->pfilter_table[index][1] = temp;
700 temp = addr[4] | (temp << 8);
701 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
702 pdata->pfilter_table[index][0] = temp;
706 /* returns hash bit number for given MAC address */
707 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
709 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
712 static void lan78xx_deferred_multicast_write(struct work_struct *param)
714 struct lan78xx_priv *pdata =
715 container_of(param, struct lan78xx_priv, set_multicast);
716 struct lan78xx_net *dev = pdata->dev;
720 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
723 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
724 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
726 for (i = 1; i < NUM_OF_MAF; i++) {
727 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
728 ret = lan78xx_write_reg(dev, MAF_LO(i),
729 pdata->pfilter_table[i][1]);
730 ret = lan78xx_write_reg(dev, MAF_HI(i),
731 pdata->pfilter_table[i][0]);
734 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
737 static void lan78xx_set_multicast(struct net_device *netdev)
739 struct lan78xx_net *dev = netdev_priv(netdev);
740 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
744 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
746 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
747 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
749 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
750 pdata->mchash_table[i] = 0;
751 /* pfilter_table[0] has own HW address */
752 for (i = 1; i < NUM_OF_MAF; i++) {
753 pdata->pfilter_table[i][0] =
754 pdata->pfilter_table[i][1] = 0;
757 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
759 if (dev->net->flags & IFF_PROMISC) {
760 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
761 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
763 if (dev->net->flags & IFF_ALLMULTI) {
764 netif_dbg(dev, drv, dev->net,
765 "receive all multicast enabled");
766 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
770 if (netdev_mc_count(dev->net)) {
771 struct netdev_hw_addr *ha;
774 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
776 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
779 netdev_for_each_mc_addr(ha, netdev) {
780 /* set first 32 into Perfect Filter */
782 lan78xx_set_addr_filter(pdata, i, ha->addr);
784 u32 bitnum = lan78xx_hash(ha->addr);
786 pdata->mchash_table[bitnum / 32] |=
787 (1 << (bitnum % 32));
788 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
794 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
796 /* defer register writes to a sleepable context */
797 schedule_work(&pdata->set_multicast);
800 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
801 u16 lcladv, u16 rmtadv)
803 u32 flow = 0, fct_flow = 0;
806 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
808 if (cap & FLOW_CTRL_TX)
809 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
811 if (cap & FLOW_CTRL_RX)
812 flow |= FLOW_CR_RX_FCEN_;
814 if (dev->udev->speed == USB_SPEED_SUPER)
816 else if (dev->udev->speed == USB_SPEED_HIGH)
819 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
820 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
821 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
823 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
825 /* threshold value should be set before enabling flow */
826 ret = lan78xx_write_reg(dev, FLOW, flow);
831 static int lan78xx_link_reset(struct lan78xx_net *dev)
833 struct phy_device *phydev = dev->net->phydev;
834 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
838 /* clear PHY interrupt status */
839 ret = phy_read(phydev, LAN88XX_INT_STS);
840 if (unlikely(ret < 0))
843 /* clear LAN78xx interrupt status */
844 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
845 if (unlikely(ret < 0))
848 phy_read_status(phydev);
850 if (!phydev->link && dev->link_on) {
851 dev->link_on = false;
852 netif_carrier_off(dev->net);
855 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
856 if (unlikely(ret < 0))
859 ret = lan78xx_write_reg(dev, MAC_CR, buf);
860 if (unlikely(ret < 0))
862 } else if (phydev->link && !dev->link_on) {
865 phy_ethtool_gset(phydev, &ecmd);
867 ret = phy_read(phydev, LAN88XX_INT_STS);
869 if (dev->udev->speed == USB_SPEED_SUPER) {
870 if (ethtool_cmd_speed(&ecmd) == 1000) {
872 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
873 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
874 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
876 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
877 buf |= USB_CFG1_DEV_U1_INIT_EN_;
878 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
881 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
882 buf |= USB_CFG1_DEV_U2_INIT_EN_;
883 buf |= USB_CFG1_DEV_U1_INIT_EN_;
884 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
888 ladv = phy_read(phydev, MII_ADVERTISE);
892 radv = phy_read(phydev, MII_LPA);
896 netif_dbg(dev, link, dev->net,
897 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
898 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
900 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
901 netif_carrier_on(dev->net);
903 tasklet_schedule(&dev->bh);
909 /* some work can't be done in tasklets, so we use keventd
911 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
912 * but tasklet_schedule() doesn't. hope the failure is rare.
914 void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
916 set_bit(work, &dev->flags);
917 if (!schedule_delayed_work(&dev->wq, 0))
918 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
921 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
925 if (urb->actual_length != 4) {
926 netdev_warn(dev->net,
927 "unexpected urb length %d", urb->actual_length);
931 memcpy(&intdata, urb->transfer_buffer, 4);
932 le32_to_cpus(&intdata);
934 if (intdata & INT_ENP_PHY_INT) {
935 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
936 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
938 netdev_warn(dev->net,
939 "unexpected interrupt: 0x%08x\n", intdata);
942 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
944 return MAX_EEPROM_SIZE;
947 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
948 struct ethtool_eeprom *ee, u8 *data)
950 struct lan78xx_net *dev = netdev_priv(netdev);
952 ee->magic = LAN78XX_EEPROM_MAGIC;
954 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
957 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
958 struct ethtool_eeprom *ee, u8 *data)
960 struct lan78xx_net *dev = netdev_priv(netdev);
962 /* Allow entire eeprom update only */
963 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
966 (data[0] == EEPROM_INDICATOR))
967 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
968 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
971 (data[0] == OTP_INDICATOR_1))
972 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
977 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
980 if (stringset == ETH_SS_STATS)
981 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
984 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
986 if (sset == ETH_SS_STATS)
987 return ARRAY_SIZE(lan78xx_gstrings);
992 static void lan78xx_get_stats(struct net_device *netdev,
993 struct ethtool_stats *stats, u64 *data)
995 struct lan78xx_net *dev = netdev_priv(netdev);
996 struct lan78xx_statstage lan78xx_stat;
1000 if (usb_autopm_get_interface(dev->intf) < 0)
1003 if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1004 p = (u32 *)&lan78xx_stat;
1005 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1009 usb_autopm_put_interface(dev->intf);
1012 static void lan78xx_get_wol(struct net_device *netdev,
1013 struct ethtool_wolinfo *wol)
1015 struct lan78xx_net *dev = netdev_priv(netdev);
1018 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1020 if (usb_autopm_get_interface(dev->intf) < 0)
1023 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1024 if (unlikely(ret < 0)) {
1028 if (buf & USB_CFG_RMT_WKP_) {
1029 wol->supported = WAKE_ALL;
1030 wol->wolopts = pdata->wol;
1037 usb_autopm_put_interface(dev->intf);
1040 static int lan78xx_set_wol(struct net_device *netdev,
1041 struct ethtool_wolinfo *wol)
1043 struct lan78xx_net *dev = netdev_priv(netdev);
1044 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1047 ret = usb_autopm_get_interface(dev->intf);
1051 if (wol->wolopts & ~WAKE_ALL)
1054 pdata->wol = wol->wolopts;
1056 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1058 phy_ethtool_set_wol(netdev->phydev, wol);
1060 usb_autopm_put_interface(dev->intf);
1065 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1067 struct lan78xx_net *dev = netdev_priv(net);
1068 struct phy_device *phydev = net->phydev;
1072 ret = usb_autopm_get_interface(dev->intf);
1076 ret = phy_ethtool_get_eee(phydev, edata);
1080 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1081 if (buf & MAC_CR_EEE_EN_) {
1082 edata->eee_enabled = true;
1083 edata->eee_active = !!(edata->advertised &
1084 edata->lp_advertised);
1085 edata->tx_lpi_enabled = true;
1086 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1087 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1088 edata->tx_lpi_timer = buf;
1090 edata->eee_enabled = false;
1091 edata->eee_active = false;
1092 edata->tx_lpi_enabled = false;
1093 edata->tx_lpi_timer = 0;
1098 usb_autopm_put_interface(dev->intf);
1103 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1105 struct lan78xx_net *dev = netdev_priv(net);
1109 ret = usb_autopm_get_interface(dev->intf);
1113 if (edata->eee_enabled) {
1114 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1115 buf |= MAC_CR_EEE_EN_;
1116 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1118 phy_ethtool_set_eee(net->phydev, edata);
1120 buf = (u32)edata->tx_lpi_timer;
1121 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1123 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1124 buf &= ~MAC_CR_EEE_EN_;
1125 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1128 usb_autopm_put_interface(dev->intf);
1133 static u32 lan78xx_get_link(struct net_device *net)
1135 phy_read_status(net->phydev);
1137 return net->phydev->link;
1140 int lan78xx_nway_reset(struct net_device *net)
1142 return phy_start_aneg(net->phydev);
1145 static void lan78xx_get_drvinfo(struct net_device *net,
1146 struct ethtool_drvinfo *info)
1148 struct lan78xx_net *dev = netdev_priv(net);
1150 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1151 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1152 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1155 static u32 lan78xx_get_msglevel(struct net_device *net)
1157 struct lan78xx_net *dev = netdev_priv(net);
1159 return dev->msg_enable;
1162 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1164 struct lan78xx_net *dev = netdev_priv(net);
1166 dev->msg_enable = level;
1169 static int lan78xx_get_mdix_status(struct net_device *net)
1171 struct phy_device *phydev = net->phydev;
1174 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1175 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1176 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1181 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1183 struct lan78xx_net *dev = netdev_priv(net);
1184 struct phy_device *phydev = net->phydev;
1187 if (mdix_ctrl == ETH_TP_MDI) {
1188 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1189 LAN88XX_EXT_PAGE_SPACE_1);
1190 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1191 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1192 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1193 buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1194 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1195 LAN88XX_EXT_PAGE_SPACE_0);
1196 } else if (mdix_ctrl == ETH_TP_MDI_X) {
1197 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1198 LAN88XX_EXT_PAGE_SPACE_1);
1199 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1200 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1201 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1202 buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1203 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1204 LAN88XX_EXT_PAGE_SPACE_0);
1205 } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1206 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1207 LAN88XX_EXT_PAGE_SPACE_1);
1208 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1209 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1210 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1211 buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1212 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1213 LAN88XX_EXT_PAGE_SPACE_0);
1215 dev->mdix_ctrl = mdix_ctrl;
1218 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1220 struct lan78xx_net *dev = netdev_priv(net);
1221 struct phy_device *phydev = net->phydev;
1225 ret = usb_autopm_get_interface(dev->intf);
1229 ret = phy_ethtool_gset(phydev, cmd);
1231 buf = lan78xx_get_mdix_status(net);
1233 buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1234 if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1235 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1236 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1237 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1238 cmd->eth_tp_mdix = ETH_TP_MDI;
1239 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1240 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1241 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1242 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1245 usb_autopm_put_interface(dev->intf);
1250 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1252 struct lan78xx_net *dev = netdev_priv(net);
1253 struct phy_device *phydev = net->phydev;
1257 ret = usb_autopm_get_interface(dev->intf);
1261 if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1262 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1265 /* change speed & duplex */
1266 ret = phy_ethtool_sset(phydev, cmd);
1268 if (!cmd->autoneg) {
1269 /* force link down */
1270 temp = phy_read(phydev, MII_BMCR);
1271 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1273 phy_write(phydev, MII_BMCR, temp);
1276 usb_autopm_put_interface(dev->intf);
1281 static const struct ethtool_ops lan78xx_ethtool_ops = {
1282 .get_link = lan78xx_get_link,
1283 .nway_reset = lan78xx_nway_reset,
1284 .get_drvinfo = lan78xx_get_drvinfo,
1285 .get_msglevel = lan78xx_get_msglevel,
1286 .set_msglevel = lan78xx_set_msglevel,
1287 .get_settings = lan78xx_get_settings,
1288 .set_settings = lan78xx_set_settings,
1289 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1290 .get_eeprom = lan78xx_ethtool_get_eeprom,
1291 .set_eeprom = lan78xx_ethtool_set_eeprom,
1292 .get_ethtool_stats = lan78xx_get_stats,
1293 .get_sset_count = lan78xx_get_sset_count,
1294 .get_strings = lan78xx_get_strings,
1295 .get_wol = lan78xx_get_wol,
1296 .set_wol = lan78xx_set_wol,
1297 .get_eee = lan78xx_get_eee,
1298 .set_eee = lan78xx_set_eee,
1301 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1303 if (!netif_running(netdev))
1306 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1309 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1311 u32 addr_lo, addr_hi;
1315 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1316 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1318 addr[0] = addr_lo & 0xFF;
1319 addr[1] = (addr_lo >> 8) & 0xFF;
1320 addr[2] = (addr_lo >> 16) & 0xFF;
1321 addr[3] = (addr_lo >> 24) & 0xFF;
1322 addr[4] = addr_hi & 0xFF;
1323 addr[5] = (addr_hi >> 8) & 0xFF;
1325 if (!is_valid_ether_addr(addr)) {
1326 /* reading mac address from EEPROM or OTP */
1327 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1329 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1331 if (is_valid_ether_addr(addr)) {
1332 /* eeprom values are valid so use them */
1333 netif_dbg(dev, ifup, dev->net,
1334 "MAC address read from EEPROM");
1336 /* generate random MAC */
1337 random_ether_addr(addr);
1338 netif_dbg(dev, ifup, dev->net,
1339 "MAC address set to random addr");
1342 addr_lo = addr[0] | (addr[1] << 8) |
1343 (addr[2] << 16) | (addr[3] << 24);
1344 addr_hi = addr[4] | (addr[5] << 8);
1346 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1347 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1349 /* generate random MAC */
1350 random_ether_addr(addr);
1351 netif_dbg(dev, ifup, dev->net,
1352 "MAC address set to random addr");
1356 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1357 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1359 ether_addr_copy(dev->net->dev_addr, addr);
1362 /* MDIO read and write wrappers for phylib */
1363 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1365 struct lan78xx_net *dev = bus->priv;
1369 ret = usb_autopm_get_interface(dev->intf);
1373 mutex_lock(&dev->phy_mutex);
1375 /* confirm MII not busy */
1376 ret = lan78xx_phy_wait_not_busy(dev);
1380 /* set the address, index & direction (read from PHY) */
1381 addr = mii_access(phy_id, idx, MII_READ);
1382 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1384 ret = lan78xx_phy_wait_not_busy(dev);
1388 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1390 ret = (int)(val & 0xFFFF);
1393 mutex_unlock(&dev->phy_mutex);
1394 usb_autopm_put_interface(dev->intf);
1398 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1401 struct lan78xx_net *dev = bus->priv;
1405 ret = usb_autopm_get_interface(dev->intf);
1409 mutex_lock(&dev->phy_mutex);
1411 /* confirm MII not busy */
1412 ret = lan78xx_phy_wait_not_busy(dev);
1417 ret = lan78xx_write_reg(dev, MII_DATA, val);
1419 /* set the address, index & direction (write to PHY) */
1420 addr = mii_access(phy_id, idx, MII_WRITE);
1421 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1423 ret = lan78xx_phy_wait_not_busy(dev);
1428 mutex_unlock(&dev->phy_mutex);
1429 usb_autopm_put_interface(dev->intf);
1433 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1438 dev->mdiobus = mdiobus_alloc();
1439 if (!dev->mdiobus) {
1440 netdev_err(dev->net, "can't allocate MDIO bus\n");
1444 dev->mdiobus->priv = (void *)dev;
1445 dev->mdiobus->read = lan78xx_mdiobus_read;
1446 dev->mdiobus->write = lan78xx_mdiobus_write;
1447 dev->mdiobus->name = "lan78xx-mdiobus";
1448 dev->mdiobus->parent = &dev->udev->dev;
1450 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1451 dev->udev->bus->busnum, dev->udev->devnum);
1453 dev->mdiobus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1454 if (!dev->mdiobus->irq) {
1459 /* handle our own interrupt */
1460 for (i = 0; i < PHY_MAX_ADDR; i++)
1461 dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
1463 switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
1466 /* set to internal PHY id */
1467 dev->mdiobus->phy_mask = ~(1 << 1);
1471 ret = mdiobus_register(dev->mdiobus);
1473 netdev_err(dev->net, "can't register MDIO bus\n");
1477 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1480 kfree(dev->mdiobus->irq);
1482 mdiobus_free(dev->mdiobus);
1486 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1488 mdiobus_unregister(dev->mdiobus);
1489 kfree(dev->mdiobus->irq);
1490 mdiobus_free(dev->mdiobus);
1493 static void lan78xx_link_status_change(struct net_device *net)
1498 static int lan78xx_phy_init(struct lan78xx_net *dev)
1501 struct phy_device *phydev = dev->net->phydev;
1503 phydev = phy_find_first(dev->mdiobus);
1505 netdev_err(dev->net, "no PHY found\n");
1509 ret = phy_connect_direct(dev->net, phydev,
1510 lan78xx_link_status_change,
1511 PHY_INTERFACE_MODE_GMII);
1513 netdev_err(dev->net, "can't attach PHY to %s\n",
1518 /* set to AUTOMDIX */
1519 lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1521 /* MAC doesn't support 1000T Half */
1522 phydev->supported &= ~SUPPORTED_1000baseT_Half;
1523 phydev->supported |= (SUPPORTED_10baseT_Half |
1524 SUPPORTED_10baseT_Full |
1525 SUPPORTED_100baseT_Half |
1526 SUPPORTED_100baseT_Full |
1527 SUPPORTED_1000baseT_Full |
1528 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1529 genphy_config_aneg(phydev);
1531 /* Workaround to enable PHY interrupt.
1532 * phy_start_interrupts() is API for requesting and enabling
1533 * PHY interrupt. However, USB-to-Ethernet device can't use
1534 * request_irq() called in phy_start_interrupts().
1535 * Set PHY to PHY_HALTED and call phy_start()
1536 * to make a call to phy_enable_interrupts()
1541 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1546 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1552 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1554 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1557 buf &= ~MAC_RX_RXEN_;
1558 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1561 /* add 4 to size for FCS */
1562 buf &= ~MAC_RX_MAX_SIZE_MASK_;
1563 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1565 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1568 buf |= MAC_RX_RXEN_;
1569 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1575 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1577 struct sk_buff *skb;
1578 unsigned long flags;
1581 spin_lock_irqsave(&q->lock, flags);
1582 while (!skb_queue_empty(q)) {
1583 struct skb_data *entry;
1587 skb_queue_walk(q, skb) {
1588 entry = (struct skb_data *)skb->cb;
1589 if (entry->state != unlink_start)
1594 entry->state = unlink_start;
1597 /* Get reference count of the URB to avoid it to be
1598 * freed during usb_unlink_urb, which may trigger
1599 * use-after-free problem inside usb_unlink_urb since
1600 * usb_unlink_urb is always racing with .complete
1601 * handler(include defer_bh).
1604 spin_unlock_irqrestore(&q->lock, flags);
1605 /* during some PM-driven resume scenarios,
1606 * these (async) unlinks complete immediately
1608 ret = usb_unlink_urb(urb);
1609 if (ret != -EINPROGRESS && ret != 0)
1610 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1614 spin_lock_irqsave(&q->lock, flags);
1616 spin_unlock_irqrestore(&q->lock, flags);
1620 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1622 struct lan78xx_net *dev = netdev_priv(netdev);
1623 int ll_mtu = new_mtu + netdev->hard_header_len;
1624 int old_hard_mtu = dev->hard_mtu;
1625 int old_rx_urb_size = dev->rx_urb_size;
1628 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1633 /* no second zero-length packet read wanted after mtu-sized packets */
1634 if ((ll_mtu % dev->maxpacket) == 0)
1637 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1639 netdev->mtu = new_mtu;
1641 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1642 if (dev->rx_urb_size == old_hard_mtu) {
1643 dev->rx_urb_size = dev->hard_mtu;
1644 if (dev->rx_urb_size > old_rx_urb_size) {
1645 if (netif_running(dev->net)) {
1646 unlink_urbs(dev, &dev->rxq);
1647 tasklet_schedule(&dev->bh);
1655 int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1657 struct lan78xx_net *dev = netdev_priv(netdev);
1658 struct sockaddr *addr = p;
1659 u32 addr_lo, addr_hi;
1662 if (netif_running(netdev))
1665 if (!is_valid_ether_addr(addr->sa_data))
1666 return -EADDRNOTAVAIL;
1668 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1670 addr_lo = netdev->dev_addr[0] |
1671 netdev->dev_addr[1] << 8 |
1672 netdev->dev_addr[2] << 16 |
1673 netdev->dev_addr[3] << 24;
1674 addr_hi = netdev->dev_addr[4] |
1675 netdev->dev_addr[5] << 8;
1677 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1678 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1683 /* Enable or disable Rx checksum offload engine */
1684 static int lan78xx_set_features(struct net_device *netdev,
1685 netdev_features_t features)
1687 struct lan78xx_net *dev = netdev_priv(netdev);
1688 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1689 unsigned long flags;
1692 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1694 if (features & NETIF_F_RXCSUM) {
1695 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1696 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1698 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1699 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1702 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1703 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1705 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1707 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1709 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1714 static void lan78xx_deferred_vlan_write(struct work_struct *param)
1716 struct lan78xx_priv *pdata =
1717 container_of(param, struct lan78xx_priv, set_vlan);
1718 struct lan78xx_net *dev = pdata->dev;
1720 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1721 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1724 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1725 __be16 proto, u16 vid)
1727 struct lan78xx_net *dev = netdev_priv(netdev);
1728 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1730 u16 vid_dword_index;
1732 vid_dword_index = (vid >> 5) & 0x7F;
1733 vid_bit_index = vid & 0x1F;
1735 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1737 /* defer register writes to a sleepable context */
1738 schedule_work(&pdata->set_vlan);
1743 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1744 __be16 proto, u16 vid)
1746 struct lan78xx_net *dev = netdev_priv(netdev);
1747 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1749 u16 vid_dword_index;
1751 vid_dword_index = (vid >> 5) & 0x7F;
1752 vid_bit_index = vid & 0x1F;
1754 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1756 /* defer register writes to a sleepable context */
1757 schedule_work(&pdata->set_vlan);
1762 static void lan78xx_init_ltm(struct lan78xx_net *dev)
1766 u32 regs[6] = { 0 };
1768 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1769 if (buf & USB_CFG1_LTM_ENABLE_) {
1771 /* Get values from EEPROM first */
1772 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1773 if (temp[0] == 24) {
1774 ret = lan78xx_read_raw_eeprom(dev,
1781 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1782 if (temp[0] == 24) {
1783 ret = lan78xx_read_raw_otp(dev,
1793 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1794 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1795 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1796 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1797 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1798 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1801 static int lan78xx_reset(struct lan78xx_net *dev)
1803 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1806 unsigned long timeout;
1808 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1809 buf |= HW_CFG_LRST_;
1810 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1812 timeout = jiffies + HZ;
1815 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1816 if (time_after(jiffies, timeout)) {
1817 netdev_warn(dev->net,
1818 "timeout on completion of LiteReset");
1821 } while (buf & HW_CFG_LRST_);
1823 lan78xx_init_mac_address(dev);
1825 /* save DEVID for later usage */
1826 ret = lan78xx_read_reg(dev, ID_REV, &buf);
1829 /* Respond to the IN token with a NAK */
1830 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1831 buf |= USB_CFG_BIR_;
1832 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1835 lan78xx_init_ltm(dev);
1837 dev->net->hard_header_len += TX_OVERHEAD;
1838 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1840 if (dev->udev->speed == USB_SPEED_SUPER) {
1841 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1842 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1845 } else if (dev->udev->speed == USB_SPEED_HIGH) {
1846 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1847 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1848 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1849 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1851 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1852 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1857 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1858 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1860 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1862 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1864 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1865 buf |= USB_CFG_BCE_;
1866 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1868 /* set FIFO sizes */
1869 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1870 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1872 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1873 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1875 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1876 ret = lan78xx_write_reg(dev, FLOW, 0);
1877 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1879 /* Don't need rfe_ctl_lock during initialisation */
1880 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1881 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1882 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1884 /* Enable or disable checksum offload engines */
1885 lan78xx_set_features(dev->net, dev->net->features);
1887 lan78xx_set_multicast(dev->net);
1890 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1891 buf |= PMT_CTL_PHY_RST_;
1892 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1894 timeout = jiffies + HZ;
1897 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1898 if (time_after(jiffies, timeout)) {
1899 netdev_warn(dev->net, "timeout waiting for PHY Reset");
1902 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
1904 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1905 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
1906 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1908 /* enable PHY interrupts */
1909 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1910 buf |= INT_ENP_PHY_INT;
1911 ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
1913 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
1914 buf |= MAC_TX_TXEN_;
1915 ret = lan78xx_write_reg(dev, MAC_TX, buf);
1917 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
1918 buf |= FCT_TX_CTL_EN_;
1919 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
1921 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
1923 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1924 buf |= MAC_RX_RXEN_;
1925 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1927 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
1928 buf |= FCT_RX_CTL_EN_;
1929 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
1934 static int lan78xx_open(struct net_device *net)
1936 struct lan78xx_net *dev = netdev_priv(net);
1939 ret = usb_autopm_get_interface(dev->intf);
1943 ret = lan78xx_reset(dev);
1947 ret = lan78xx_phy_init(dev);
1951 /* for Link Check */
1952 if (dev->urb_intr) {
1953 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
1955 netif_err(dev, ifup, dev->net,
1956 "intr submit %d\n", ret);
1961 set_bit(EVENT_DEV_OPEN, &dev->flags);
1963 netif_start_queue(net);
1965 dev->link_on = false;
1967 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1969 usb_autopm_put_interface(dev->intf);
1975 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
1977 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
1978 DECLARE_WAITQUEUE(wait, current);
1981 /* ensure there are no more active urbs */
1982 add_wait_queue(&unlink_wakeup, &wait);
1983 set_current_state(TASK_UNINTERRUPTIBLE);
1984 dev->wait = &unlink_wakeup;
1985 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
1987 /* maybe wait for deletions to finish. */
1988 while (!skb_queue_empty(&dev->rxq) &&
1989 !skb_queue_empty(&dev->txq) &&
1990 !skb_queue_empty(&dev->done)) {
1991 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
1992 set_current_state(TASK_UNINTERRUPTIBLE);
1993 netif_dbg(dev, ifdown, dev->net,
1994 "waited for %d urb completions\n", temp);
1996 set_current_state(TASK_RUNNING);
1998 remove_wait_queue(&unlink_wakeup, &wait);
2001 int lan78xx_stop(struct net_device *net)
2003 struct lan78xx_net *dev = netdev_priv(net);
2005 phy_stop(net->phydev);
2006 phy_disconnect(net->phydev);
2009 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2010 netif_stop_queue(net);
2012 netif_info(dev, ifdown, dev->net,
2013 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2014 net->stats.rx_packets, net->stats.tx_packets,
2015 net->stats.rx_errors, net->stats.tx_errors);
2017 lan78xx_terminate_urbs(dev);
2019 usb_kill_urb(dev->urb_intr);
2021 skb_queue_purge(&dev->rxq_pause);
2023 /* deferred work (task, timer, softirq) must also stop.
2024 * can't flush_scheduled_work() until we drop rtnl (later),
2025 * else workers could deadlock; so make workers a NOP.
2028 cancel_delayed_work_sync(&dev->wq);
2029 tasklet_kill(&dev->bh);
2031 usb_autopm_put_interface(dev->intf);
2036 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2037 struct sk_buff *skb, gfp_t flags)
2039 u32 tx_cmd_a, tx_cmd_b;
2041 if (skb_cow_head(skb, TX_OVERHEAD)) {
2042 dev_kfree_skb_any(skb);
2046 if (skb_linearize(skb)) {
2047 dev_kfree_skb_any(skb);
2051 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2053 if (skb->ip_summed == CHECKSUM_PARTIAL)
2054 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2057 if (skb_is_gso(skb)) {
2058 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2060 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2062 tx_cmd_a |= TX_CMD_A_LSO_;
2065 if (skb_vlan_tag_present(skb)) {
2066 tx_cmd_a |= TX_CMD_A_IVTG_;
2067 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2071 cpu_to_le32s(&tx_cmd_b);
2072 memcpy(skb->data, &tx_cmd_b, 4);
2075 cpu_to_le32s(&tx_cmd_a);
2076 memcpy(skb->data, &tx_cmd_a, 4);
2081 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2082 struct sk_buff_head *list, enum skb_state state)
2084 unsigned long flags;
2085 enum skb_state old_state;
2086 struct skb_data *entry = (struct skb_data *)skb->cb;
2088 spin_lock_irqsave(&list->lock, flags);
2089 old_state = entry->state;
2090 entry->state = state;
2092 __skb_unlink(skb, list);
2093 spin_unlock(&list->lock);
2094 spin_lock(&dev->done.lock);
2096 __skb_queue_tail(&dev->done, skb);
2097 if (skb_queue_len(&dev->done) == 1)
2098 tasklet_schedule(&dev->bh);
2099 spin_unlock_irqrestore(&dev->done.lock, flags);
2104 static void tx_complete(struct urb *urb)
2106 struct sk_buff *skb = (struct sk_buff *)urb->context;
2107 struct skb_data *entry = (struct skb_data *)skb->cb;
2108 struct lan78xx_net *dev = entry->dev;
2110 if (urb->status == 0) {
2111 dev->net->stats.tx_packets++;
2112 dev->net->stats.tx_bytes += entry->length;
2114 dev->net->stats.tx_errors++;
2116 switch (urb->status) {
2118 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2121 /* software-driven interface shutdown */
2129 netif_stop_queue(dev->net);
2132 netif_dbg(dev, tx_err, dev->net,
2133 "tx err %d\n", entry->urb->status);
2138 usb_autopm_put_interface_async(dev->intf);
2140 defer_bh(dev, skb, &dev->txq, tx_done);
2143 static void lan78xx_queue_skb(struct sk_buff_head *list,
2144 struct sk_buff *newsk, enum skb_state state)
2146 struct skb_data *entry = (struct skb_data *)newsk->cb;
2148 __skb_queue_tail(list, newsk);
2149 entry->state = state;
2152 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2154 struct lan78xx_net *dev = netdev_priv(net);
2155 struct sk_buff *skb2 = NULL;
2158 skb_tx_timestamp(skb);
2159 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2163 skb_queue_tail(&dev->txq_pend, skb2);
2165 if (skb_queue_len(&dev->txq_pend) > 10)
2166 netif_stop_queue(net);
2168 netif_dbg(dev, tx_err, dev->net,
2169 "lan78xx_tx_prep return NULL\n");
2170 dev->net->stats.tx_errors++;
2171 dev->net->stats.tx_dropped++;
2174 tasklet_schedule(&dev->bh);
2176 return NETDEV_TX_OK;
2179 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2181 struct lan78xx_priv *pdata = NULL;
2185 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2187 pdata = (struct lan78xx_priv *)(dev->data[0]);
2189 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2195 spin_lock_init(&pdata->rfe_ctl_lock);
2196 mutex_init(&pdata->dataport_mutex);
2198 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2200 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2201 pdata->vlan_table[i] = 0;
2203 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2205 dev->net->features = 0;
2207 if (DEFAULT_TX_CSUM_ENABLE)
2208 dev->net->features |= NETIF_F_HW_CSUM;
2210 if (DEFAULT_RX_CSUM_ENABLE)
2211 dev->net->features |= NETIF_F_RXCSUM;
2213 if (DEFAULT_TSO_CSUM_ENABLE)
2214 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2216 dev->net->hw_features = dev->net->features;
2218 /* Init all registers */
2219 ret = lan78xx_reset(dev);
2221 lan78xx_mdio_init(dev);
2223 dev->net->flags |= IFF_MULTICAST;
2225 pdata->wol = WAKE_MAGIC;
2230 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2232 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2234 lan78xx_remove_mdio(dev);
2237 netif_dbg(dev, ifdown, dev->net, "free pdata");
2244 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2245 struct sk_buff *skb,
2246 u32 rx_cmd_a, u32 rx_cmd_b)
2248 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2249 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2250 skb->ip_summed = CHECKSUM_NONE;
2252 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2253 skb->ip_summed = CHECKSUM_COMPLETE;
2257 void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2261 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2262 skb_queue_tail(&dev->rxq_pause, skb);
2266 skb->protocol = eth_type_trans(skb, dev->net);
2267 dev->net->stats.rx_packets++;
2268 dev->net->stats.rx_bytes += skb->len;
2270 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2271 skb->len + sizeof(struct ethhdr), skb->protocol);
2272 memset(skb->cb, 0, sizeof(struct skb_data));
2274 if (skb_defer_rx_timestamp(skb))
2277 status = netif_rx(skb);
2278 if (status != NET_RX_SUCCESS)
2279 netif_dbg(dev, rx_err, dev->net,
2280 "netif_rx status %d\n", status);
2283 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2285 if (skb->len < dev->net->hard_header_len)
2288 while (skb->len > 0) {
2289 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2291 struct sk_buff *skb2;
2292 unsigned char *packet;
2294 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2295 le32_to_cpus(&rx_cmd_a);
2296 skb_pull(skb, sizeof(rx_cmd_a));
2298 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2299 le32_to_cpus(&rx_cmd_b);
2300 skb_pull(skb, sizeof(rx_cmd_b));
2302 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2303 le16_to_cpus(&rx_cmd_c);
2304 skb_pull(skb, sizeof(rx_cmd_c));
2308 /* get the packet length */
2309 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2310 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2312 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2313 netif_dbg(dev, rx_err, dev->net,
2314 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2316 /* last frame in this batch */
2317 if (skb->len == size) {
2318 lan78xx_rx_csum_offload(dev, skb,
2319 rx_cmd_a, rx_cmd_b);
2321 skb_trim(skb, skb->len - 4); /* remove fcs */
2322 skb->truesize = size + sizeof(struct sk_buff);
2327 skb2 = skb_clone(skb, GFP_ATOMIC);
2328 if (unlikely(!skb2)) {
2329 netdev_warn(dev->net, "Error allocating skb");
2334 skb2->data = packet;
2335 skb_set_tail_pointer(skb2, size);
2337 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2339 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2340 skb2->truesize = size + sizeof(struct sk_buff);
2342 lan78xx_skb_return(dev, skb2);
2345 skb_pull(skb, size);
2347 /* padding bytes before the next frame starts */
2349 skb_pull(skb, align_count);
2355 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2357 if (!lan78xx_rx(dev, skb)) {
2358 dev->net->stats.rx_errors++;
2363 lan78xx_skb_return(dev, skb);
2367 netif_dbg(dev, rx_err, dev->net, "drop\n");
2368 dev->net->stats.rx_errors++;
2370 skb_queue_tail(&dev->done, skb);
2373 static void rx_complete(struct urb *urb);
2375 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2377 struct sk_buff *skb;
2378 struct skb_data *entry;
2379 unsigned long lockflags;
2380 size_t size = dev->rx_urb_size;
2383 skb = netdev_alloc_skb_ip_align(dev->net, size);
2389 entry = (struct skb_data *)skb->cb;
2394 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2395 skb->data, size, rx_complete, skb);
2397 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2399 if (netif_device_present(dev->net) &&
2400 netif_running(dev->net) &&
2401 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2402 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2403 ret = usb_submit_urb(urb, GFP_ATOMIC);
2406 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2409 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2412 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2413 netif_device_detach(dev->net);
2419 netif_dbg(dev, rx_err, dev->net,
2420 "rx submit, %d\n", ret);
2421 tasklet_schedule(&dev->bh);
2424 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2427 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2429 dev_kfree_skb_any(skb);
2435 static void rx_complete(struct urb *urb)
2437 struct sk_buff *skb = (struct sk_buff *)urb->context;
2438 struct skb_data *entry = (struct skb_data *)skb->cb;
2439 struct lan78xx_net *dev = entry->dev;
2440 int urb_status = urb->status;
2441 enum skb_state state;
2443 skb_put(skb, urb->actual_length);
2447 switch (urb_status) {
2449 if (skb->len < dev->net->hard_header_len) {
2451 dev->net->stats.rx_errors++;
2452 dev->net->stats.rx_length_errors++;
2453 netif_dbg(dev, rx_err, dev->net,
2454 "rx length %d\n", skb->len);
2456 usb_mark_last_busy(dev->udev);
2459 dev->net->stats.rx_errors++;
2460 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2462 case -ECONNRESET: /* async unlink */
2463 case -ESHUTDOWN: /* hardware gone */
2464 netif_dbg(dev, ifdown, dev->net,
2465 "rx shutdown, code %d\n", urb_status);
2473 dev->net->stats.rx_errors++;
2479 /* data overrun ... flush fifo? */
2481 dev->net->stats.rx_over_errors++;
2486 dev->net->stats.rx_errors++;
2487 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2491 state = defer_bh(dev, skb, &dev->rxq, state);
2494 if (netif_running(dev->net) &&
2495 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2496 state != unlink_start) {
2497 rx_submit(dev, urb, GFP_ATOMIC);
2502 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2505 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2508 struct urb *urb = NULL;
2509 struct skb_data *entry;
2510 unsigned long flags;
2511 struct sk_buff_head *tqp = &dev->txq_pend;
2512 struct sk_buff *skb, *skb2;
2515 int skb_totallen, pkt_cnt;
2519 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2520 if (skb_is_gso(skb)) {
2522 /* handle previous packets first */
2526 skb2 = skb_dequeue(tqp);
2530 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2532 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2536 /* copy to a single skb */
2537 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2541 skb_put(skb, skb_totallen);
2543 for (count = pos = 0; count < pkt_cnt; count++) {
2544 skb2 = skb_dequeue(tqp);
2546 memcpy(skb->data + pos, skb2->data, skb2->len);
2547 pos += roundup(skb2->len, sizeof(u32));
2548 dev_kfree_skb(skb2);
2552 length = skb_totallen;
2555 urb = usb_alloc_urb(0, GFP_ATOMIC);
2557 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2561 entry = (struct skb_data *)skb->cb;
2564 entry->length = length;
2566 spin_lock_irqsave(&dev->txq.lock, flags);
2567 ret = usb_autopm_get_interface_async(dev->intf);
2569 spin_unlock_irqrestore(&dev->txq.lock, flags);
2573 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2574 skb->data, skb->len, tx_complete, skb);
2576 if (length % dev->maxpacket == 0) {
2577 /* send USB_ZERO_PACKET */
2578 urb->transfer_flags |= URB_ZERO_PACKET;
2582 /* if this triggers the device is still a sleep */
2583 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2584 /* transmission will be done in resume */
2585 usb_anchor_urb(urb, &dev->deferred);
2586 /* no use to process more packets */
2587 netif_stop_queue(dev->net);
2589 spin_unlock_irqrestore(&dev->txq.lock, flags);
2590 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2595 ret = usb_submit_urb(urb, GFP_ATOMIC);
2598 dev->net->trans_start = jiffies;
2599 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2600 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2601 netif_stop_queue(dev->net);
2604 netif_stop_queue(dev->net);
2605 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2606 usb_autopm_put_interface_async(dev->intf);
2609 usb_autopm_put_interface_async(dev->intf);
2610 netif_dbg(dev, tx_err, dev->net,
2611 "tx: submit urb err %d\n", ret);
2615 spin_unlock_irqrestore(&dev->txq.lock, flags);
2618 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2620 dev->net->stats.tx_dropped++;
2622 dev_kfree_skb_any(skb);
2625 netif_dbg(dev, tx_queued, dev->net,
2626 "> tx, len %d, type 0x%x\n", length, skb->protocol);
2629 static void lan78xx_rx_bh(struct lan78xx_net *dev)
2634 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2635 for (i = 0; i < 10; i++) {
2636 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2638 urb = usb_alloc_urb(0, GFP_ATOMIC);
2640 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2644 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2645 tasklet_schedule(&dev->bh);
2647 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2648 netif_wake_queue(dev->net);
2651 static void lan78xx_bh(unsigned long param)
2653 struct lan78xx_net *dev = (struct lan78xx_net *)param;
2654 struct sk_buff *skb;
2655 struct skb_data *entry;
2657 while ((skb = skb_dequeue(&dev->done))) {
2658 entry = (struct skb_data *)(skb->cb);
2659 switch (entry->state) {
2661 entry->state = rx_cleanup;
2662 rx_process(dev, skb);
2665 usb_free_urb(entry->urb);
2669 usb_free_urb(entry->urb);
2673 netdev_dbg(dev->net, "skb state %d\n", entry->state);
2678 if (netif_device_present(dev->net) && netif_running(dev->net)) {
2679 if (!skb_queue_empty(&dev->txq_pend))
2682 if (!timer_pending(&dev->delay) &&
2683 !test_bit(EVENT_RX_HALT, &dev->flags))
2688 static void lan78xx_delayedwork(struct work_struct *work)
2691 struct lan78xx_net *dev;
2693 dev = container_of(work, struct lan78xx_net, wq.work);
2695 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2696 unlink_urbs(dev, &dev->txq);
2697 status = usb_autopm_get_interface(dev->intf);
2700 status = usb_clear_halt(dev->udev, dev->pipe_out);
2701 usb_autopm_put_interface(dev->intf);
2704 status != -ESHUTDOWN) {
2705 if (netif_msg_tx_err(dev))
2707 netdev_err(dev->net,
2708 "can't clear tx halt, status %d\n",
2711 clear_bit(EVENT_TX_HALT, &dev->flags);
2712 if (status != -ESHUTDOWN)
2713 netif_wake_queue(dev->net);
2716 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2717 unlink_urbs(dev, &dev->rxq);
2718 status = usb_autopm_get_interface(dev->intf);
2721 status = usb_clear_halt(dev->udev, dev->pipe_in);
2722 usb_autopm_put_interface(dev->intf);
2725 status != -ESHUTDOWN) {
2726 if (netif_msg_rx_err(dev))
2728 netdev_err(dev->net,
2729 "can't clear rx halt, status %d\n",
2732 clear_bit(EVENT_RX_HALT, &dev->flags);
2733 tasklet_schedule(&dev->bh);
2737 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2740 clear_bit(EVENT_LINK_RESET, &dev->flags);
2741 status = usb_autopm_get_interface(dev->intf);
2744 if (lan78xx_link_reset(dev) < 0) {
2745 usb_autopm_put_interface(dev->intf);
2747 netdev_info(dev->net, "link reset failed (%d)\n",
2750 usb_autopm_put_interface(dev->intf);
2755 static void intr_complete(struct urb *urb)
2757 struct lan78xx_net *dev = urb->context;
2758 int status = urb->status;
2763 lan78xx_status(dev, urb);
2766 /* software-driven interface shutdown */
2767 case -ENOENT: /* urb killed */
2768 case -ESHUTDOWN: /* hardware gone */
2769 netif_dbg(dev, ifdown, dev->net,
2770 "intr shutdown, code %d\n", status);
2773 /* NOTE: not throttling like RX/TX, since this endpoint
2774 * already polls infrequently
2777 netdev_dbg(dev->net, "intr status %d\n", status);
2781 if (!netif_running(dev->net))
2784 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2785 status = usb_submit_urb(urb, GFP_ATOMIC);
2787 netif_err(dev, timer, dev->net,
2788 "intr resubmit --> %d\n", status);
2791 static void lan78xx_disconnect(struct usb_interface *intf)
2793 struct lan78xx_net *dev;
2794 struct usb_device *udev;
2795 struct net_device *net;
2797 dev = usb_get_intfdata(intf);
2798 usb_set_intfdata(intf, NULL);
2802 udev = interface_to_usbdev(intf);
2805 unregister_netdev(net);
2807 cancel_delayed_work_sync(&dev->wq);
2809 usb_scuttle_anchored_urbs(&dev->deferred);
2811 lan78xx_unbind(dev, intf);
2813 usb_kill_urb(dev->urb_intr);
2814 usb_free_urb(dev->urb_intr);
2820 void lan78xx_tx_timeout(struct net_device *net)
2822 struct lan78xx_net *dev = netdev_priv(net);
2824 unlink_urbs(dev, &dev->txq);
2825 tasklet_schedule(&dev->bh);
2828 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
2829 struct net_device *netdev,
2830 netdev_features_t features)
2832 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
2833 features &= ~NETIF_F_GSO_MASK;
2835 features = vlan_features_check(skb, features);
2836 features = vxlan_features_check(skb, features);
2841 static const struct net_device_ops lan78xx_netdev_ops = {
2842 .ndo_open = lan78xx_open,
2843 .ndo_stop = lan78xx_stop,
2844 .ndo_start_xmit = lan78xx_start_xmit,
2845 .ndo_tx_timeout = lan78xx_tx_timeout,
2846 .ndo_change_mtu = lan78xx_change_mtu,
2847 .ndo_set_mac_address = lan78xx_set_mac_addr,
2848 .ndo_validate_addr = eth_validate_addr,
2849 .ndo_do_ioctl = lan78xx_ioctl,
2850 .ndo_set_rx_mode = lan78xx_set_multicast,
2851 .ndo_set_features = lan78xx_set_features,
2852 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
2853 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
2854 .ndo_features_check = lan78xx_features_check,
2857 static int lan78xx_probe(struct usb_interface *intf,
2858 const struct usb_device_id *id)
2860 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
2861 struct lan78xx_net *dev;
2862 struct net_device *netdev;
2863 struct usb_device *udev;
2869 udev = interface_to_usbdev(intf);
2870 udev = usb_get_dev(udev);
2873 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
2875 dev_err(&intf->dev, "Error: OOM\n");
2879 /* netdev_printk() needs this */
2880 SET_NETDEV_DEV(netdev, &intf->dev);
2882 dev = netdev_priv(netdev);
2886 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
2887 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
2889 skb_queue_head_init(&dev->rxq);
2890 skb_queue_head_init(&dev->txq);
2891 skb_queue_head_init(&dev->done);
2892 skb_queue_head_init(&dev->rxq_pause);
2893 skb_queue_head_init(&dev->txq_pend);
2894 mutex_init(&dev->phy_mutex);
2896 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
2897 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
2898 init_usb_anchor(&dev->deferred);
2900 netdev->netdev_ops = &lan78xx_netdev_ops;
2901 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
2902 netdev->ethtool_ops = &lan78xx_ethtool_ops;
2904 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
2909 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
2910 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
2911 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
2916 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
2917 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
2918 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
2923 ep_intr = &intf->cur_altsetting->endpoint[2];
2924 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
2929 dev->pipe_intr = usb_rcvintpipe(dev->udev,
2930 usb_endpoint_num(&ep_intr->desc));
2932 ret = lan78xx_bind(dev, intf);
2935 strcpy(netdev->name, "eth%d");
2937 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
2938 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
2939 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
2941 period = ep_intr->desc.bInterval;
2942 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
2943 buf = kmalloc(maxp, GFP_KERNEL);
2945 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
2946 if (!dev->urb_intr) {
2950 usb_fill_int_urb(dev->urb_intr, dev->udev,
2951 dev->pipe_intr, buf, maxp,
2952 intr_complete, dev, period);
2953 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
2957 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
2959 /* driver requires remote-wakeup capability during autosuspend. */
2960 intf->needs_remote_wakeup = 1;
2962 ret = register_netdev(netdev);
2964 netif_err(dev, probe, netdev, "couldn't register the device\n");
2968 usb_set_intfdata(intf, dev);
2970 ret = device_set_wakeup_enable(&udev->dev, true);
2972 /* Default delay of 2sec has more overhead than advantage.
2973 * Set to 10sec as default.
2975 pm_runtime_set_autosuspend_delay(&udev->dev,
2976 DEFAULT_AUTOSUSPEND_DELAY);
2981 lan78xx_unbind(dev, intf);
2983 free_netdev(netdev);
2990 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
2992 const u16 crc16poly = 0x8005;
2998 for (i = 0; i < len; i++) {
3000 for (bit = 0; bit < 8; bit++) {
3004 if (msb ^ (u16)(data & 1)) {
3006 crc |= (u16)0x0001U;
3015 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3023 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3024 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3025 const u8 arp_type[2] = { 0x08, 0x06 };
3027 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3028 buf &= ~MAC_TX_TXEN_;
3029 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3030 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3031 buf &= ~MAC_RX_RXEN_;
3032 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3034 ret = lan78xx_write_reg(dev, WUCSR, 0);
3035 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3036 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3041 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3042 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3043 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3045 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3046 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3049 if (wol & WAKE_PHY) {
3050 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3052 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3053 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3054 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3056 if (wol & WAKE_MAGIC) {
3057 temp_wucsr |= WUCSR_MPEN_;
3059 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3060 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3061 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3063 if (wol & WAKE_BCAST) {
3064 temp_wucsr |= WUCSR_BCST_EN_;
3066 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3067 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3068 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3070 if (wol & WAKE_MCAST) {
3071 temp_wucsr |= WUCSR_WAKE_EN_;
3073 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3074 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3075 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3077 WUF_CFGX_TYPE_MCAST_ |
3078 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3079 (crc & WUF_CFGX_CRC16_MASK_));
3081 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3082 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3083 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3084 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3087 /* for IPv6 Multicast */
3088 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3089 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3091 WUF_CFGX_TYPE_MCAST_ |
3092 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3093 (crc & WUF_CFGX_CRC16_MASK_));
3095 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3096 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3097 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3098 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3101 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3102 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3103 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3105 if (wol & WAKE_UCAST) {
3106 temp_wucsr |= WUCSR_PFDA_EN_;
3108 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3109 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3110 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3112 if (wol & WAKE_ARP) {
3113 temp_wucsr |= WUCSR_WAKE_EN_;
3115 /* set WUF_CFG & WUF_MASK
3116 * for packettype (offset 12,13) = ARP (0x0806)
3118 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3119 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3121 WUF_CFGX_TYPE_ALL_ |
3122 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3123 (crc & WUF_CFGX_CRC16_MASK_));
3125 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3126 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3127 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3128 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3131 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3132 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3133 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3136 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3138 /* when multiple WOL bits are set */
3139 if (hweight_long((unsigned long)wol) > 1) {
3140 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3141 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3142 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3144 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3147 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3148 buf |= PMT_CTL_WUPS_MASK_;
3149 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3151 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3152 buf |= MAC_RX_RXEN_;
3153 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3158 int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3160 struct lan78xx_net *dev = usb_get_intfdata(intf);
3161 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3166 event = message.event;
3168 if (!dev->suspend_count++) {
3169 spin_lock_irq(&dev->txq.lock);
3170 /* don't autosuspend while transmitting */
3171 if ((skb_queue_len(&dev->txq) ||
3172 skb_queue_len(&dev->txq_pend)) &&
3173 PMSG_IS_AUTO(message)) {
3174 spin_unlock_irq(&dev->txq.lock);
3178 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3179 spin_unlock_irq(&dev->txq.lock);
3183 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3184 buf &= ~MAC_TX_TXEN_;
3185 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3186 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3187 buf &= ~MAC_RX_RXEN_;
3188 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3190 /* empty out the rx and queues */
3191 netif_device_detach(dev->net);
3192 lan78xx_terminate_urbs(dev);
3193 usb_kill_urb(dev->urb_intr);
3196 netif_device_attach(dev->net);
3199 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3200 if (PMSG_IS_AUTO(message)) {
3201 /* auto suspend (selective suspend) */
3202 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3203 buf &= ~MAC_TX_TXEN_;
3204 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3205 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3206 buf &= ~MAC_RX_RXEN_;
3207 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3209 ret = lan78xx_write_reg(dev, WUCSR, 0);
3210 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3211 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3213 /* set goodframe wakeup */
3214 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3216 buf |= WUCSR_RFE_WAKE_EN_;
3217 buf |= WUCSR_STORE_WAKE_;
3219 ret = lan78xx_write_reg(dev, WUCSR, buf);
3221 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3223 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3224 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3226 buf |= PMT_CTL_PHY_WAKE_EN_;
3227 buf |= PMT_CTL_WOL_EN_;
3228 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3229 buf |= PMT_CTL_SUS_MODE_3_;
3231 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3233 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3235 buf |= PMT_CTL_WUPS_MASK_;
3237 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3239 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3240 buf |= MAC_RX_RXEN_;
3241 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3243 lan78xx_set_suspend(dev, pdata->wol);
3252 int lan78xx_resume(struct usb_interface *intf)
3254 struct lan78xx_net *dev = usb_get_intfdata(intf);
3255 struct sk_buff *skb;
3260 if (!--dev->suspend_count) {
3261 /* resume interrupt URBs */
3262 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3263 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3265 spin_lock_irq(&dev->txq.lock);
3266 while ((res = usb_get_from_anchor(&dev->deferred))) {
3267 skb = (struct sk_buff *)res->context;
3268 ret = usb_submit_urb(res, GFP_ATOMIC);
3270 dev_kfree_skb_any(skb);
3272 usb_autopm_put_interface_async(dev->intf);
3274 dev->net->trans_start = jiffies;
3275 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3279 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3280 spin_unlock_irq(&dev->txq.lock);
3282 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3283 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3284 netif_start_queue(dev->net);
3285 tasklet_schedule(&dev->bh);
3289 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3290 ret = lan78xx_write_reg(dev, WUCSR, 0);
3291 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3293 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3295 WUCSR2_IPV6_TCPSYN_RCD_ |
3296 WUCSR2_IPV4_TCPSYN_RCD_);
3298 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3299 WUCSR_EEE_RX_WAKE_ |
3301 WUCSR_RFE_WAKE_FR_ |
3306 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3307 buf |= MAC_TX_TXEN_;
3308 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3313 int lan78xx_reset_resume(struct usb_interface *intf)
3315 struct lan78xx_net *dev = usb_get_intfdata(intf);
3319 lan78xx_phy_init(dev);
3321 return lan78xx_resume(intf);
3324 static const struct usb_device_id products[] = {
3326 /* LAN7800 USB Gigabit Ethernet Device */
3327 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3330 /* LAN7850 USB Gigabit Ethernet Device */
3331 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3335 MODULE_DEVICE_TABLE(usb, products);
3337 static struct usb_driver lan78xx_driver = {
3338 .name = DRIVER_NAME,
3339 .id_table = products,
3340 .probe = lan78xx_probe,
3341 .disconnect = lan78xx_disconnect,
3342 .suspend = lan78xx_suspend,
3343 .resume = lan78xx_resume,
3344 .reset_resume = lan78xx_reset_resume,
3345 .supports_autosuspend = 1,
3346 .disable_hub_initiated_lpm = 1,
3349 module_usb_driver(lan78xx_driver);
3351 MODULE_AUTHOR(DRIVER_AUTHOR);
3352 MODULE_DESCRIPTION(DRIVER_DESC);
3353 MODULE_LICENSE("GPL");