2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <net/vxlan.h>
35 #include <linux/interrupt.h>
36 #include <linux/irqdomain.h>
37 #include <linux/irq.h>
38 #include <linux/irqchip/chained_irq.h>
39 #include <linux/microchipphy.h>
40 #include <linux/phy_fixed.h>
41 #include <linux/of_mdio.h>
42 #include <linux/of_net.h>
45 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
46 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
47 #define DRIVER_NAME "lan78xx"
49 #define TX_TIMEOUT_JIFFIES (5 * HZ)
50 #define THROTTLE_JIFFIES (HZ / 8)
51 #define UNLINK_TIMEOUT_MS 3
53 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
55 #define SS_USB_PKT_SIZE (1024)
56 #define HS_USB_PKT_SIZE (512)
57 #define FS_USB_PKT_SIZE (64)
59 #define MAX_RX_FIFO_SIZE (12 * 1024)
60 #define MAX_TX_FIFO_SIZE (12 * 1024)
61 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
62 #define DEFAULT_BULK_IN_DELAY (0x0800)
63 #define MAX_SINGLE_PACKET_SIZE (9000)
64 #define DEFAULT_TX_CSUM_ENABLE (true)
65 #define DEFAULT_RX_CSUM_ENABLE (true)
66 #define DEFAULT_TSO_CSUM_ENABLE (true)
67 #define DEFAULT_VLAN_FILTER_ENABLE (true)
68 #define DEFAULT_VLAN_RX_OFFLOAD (true)
69 #define TX_OVERHEAD (8)
72 #define LAN78XX_USB_VENDOR_ID (0x0424)
73 #define LAN7800_USB_PRODUCT_ID (0x7800)
74 #define LAN7850_USB_PRODUCT_ID (0x7850)
75 #define LAN7801_USB_PRODUCT_ID (0x7801)
76 #define LAN78XX_EEPROM_MAGIC (0x78A5)
77 #define LAN78XX_OTP_MAGIC (0x78F3)
82 #define EEPROM_INDICATOR (0xA5)
83 #define EEPROM_MAC_OFFSET (0x01)
84 #define MAX_EEPROM_SIZE 512
85 #define OTP_INDICATOR_1 (0xF3)
86 #define OTP_INDICATOR_2 (0xF7)
88 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
89 WAKE_MCAST | WAKE_BCAST | \
90 WAKE_ARP | WAKE_MAGIC)
92 /* USB related defines */
93 #define BULK_IN_PIPE 1
94 #define BULK_OUT_PIPE 2
96 /* default autosuspend delay (mSec)*/
97 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
99 /* statistic update interval (mSec) */
100 #define STAT_UPDATE_TIMER (1 * 1000)
102 /* defines interrupts from interrupt EP */
103 #define MAX_INT_EP (32)
104 #define INT_EP_INTEP (31)
105 #define INT_EP_OTP_WR_DONE (28)
106 #define INT_EP_EEE_TX_LPI_START (26)
107 #define INT_EP_EEE_TX_LPI_STOP (25)
108 #define INT_EP_EEE_RX_LPI (24)
109 #define INT_EP_MAC_RESET_TIMEOUT (23)
110 #define INT_EP_RDFO (22)
111 #define INT_EP_TXE (21)
112 #define INT_EP_USB_STATUS (20)
113 #define INT_EP_TX_DIS (19)
114 #define INT_EP_RX_DIS (18)
115 #define INT_EP_PHY (17)
116 #define INT_EP_DP (16)
117 #define INT_EP_MAC_ERR (15)
118 #define INT_EP_TDFU (14)
119 #define INT_EP_TDFO (13)
120 #define INT_EP_UTX (12)
121 #define INT_EP_GPIO_11 (11)
122 #define INT_EP_GPIO_10 (10)
123 #define INT_EP_GPIO_9 (9)
124 #define INT_EP_GPIO_8 (8)
125 #define INT_EP_GPIO_7 (7)
126 #define INT_EP_GPIO_6 (6)
127 #define INT_EP_GPIO_5 (5)
128 #define INT_EP_GPIO_4 (4)
129 #define INT_EP_GPIO_3 (3)
130 #define INT_EP_GPIO_2 (2)
131 #define INT_EP_GPIO_1 (1)
132 #define INT_EP_GPIO_0 (0)
134 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
136 "RX Alignment Errors",
137 "Rx Fragment Errors",
139 "RX Undersize Frame Errors",
140 "RX Oversize Frame Errors",
142 "RX Unicast Byte Count",
143 "RX Broadcast Byte Count",
144 "RX Multicast Byte Count",
146 "RX Broadcast Frames",
147 "RX Multicast Frames",
150 "RX 65 - 127 Byte Frames",
151 "RX 128 - 255 Byte Frames",
152 "RX 256 - 511 Bytes Frames",
153 "RX 512 - 1023 Byte Frames",
154 "RX 1024 - 1518 Byte Frames",
155 "RX Greater 1518 Byte Frames",
156 "EEE RX LPI Transitions",
159 "TX Excess Deferral Errors",
162 "TX Single Collisions",
163 "TX Multiple Collisions",
164 "TX Excessive Collision",
165 "TX Late Collisions",
166 "TX Unicast Byte Count",
167 "TX Broadcast Byte Count",
168 "TX Multicast Byte Count",
170 "TX Broadcast Frames",
171 "TX Multicast Frames",
174 "TX 65 - 127 Byte Frames",
175 "TX 128 - 255 Byte Frames",
176 "TX 256 - 511 Bytes Frames",
177 "TX 512 - 1023 Byte Frames",
178 "TX 1024 - 1518 Byte Frames",
179 "TX Greater 1518 Byte Frames",
180 "EEE TX LPI Transitions",
184 struct lan78xx_statstage {
186 u32 rx_alignment_errors;
187 u32 rx_fragment_errors;
188 u32 rx_jabber_errors;
189 u32 rx_undersize_frame_errors;
190 u32 rx_oversize_frame_errors;
191 u32 rx_dropped_frames;
192 u32 rx_unicast_byte_count;
193 u32 rx_broadcast_byte_count;
194 u32 rx_multicast_byte_count;
195 u32 rx_unicast_frames;
196 u32 rx_broadcast_frames;
197 u32 rx_multicast_frames;
199 u32 rx_64_byte_frames;
200 u32 rx_65_127_byte_frames;
201 u32 rx_128_255_byte_frames;
202 u32 rx_256_511_bytes_frames;
203 u32 rx_512_1023_byte_frames;
204 u32 rx_1024_1518_byte_frames;
205 u32 rx_greater_1518_byte_frames;
206 u32 eee_rx_lpi_transitions;
209 u32 tx_excess_deferral_errors;
210 u32 tx_carrier_errors;
211 u32 tx_bad_byte_count;
212 u32 tx_single_collisions;
213 u32 tx_multiple_collisions;
214 u32 tx_excessive_collision;
215 u32 tx_late_collisions;
216 u32 tx_unicast_byte_count;
217 u32 tx_broadcast_byte_count;
218 u32 tx_multicast_byte_count;
219 u32 tx_unicast_frames;
220 u32 tx_broadcast_frames;
221 u32 tx_multicast_frames;
223 u32 tx_64_byte_frames;
224 u32 tx_65_127_byte_frames;
225 u32 tx_128_255_byte_frames;
226 u32 tx_256_511_bytes_frames;
227 u32 tx_512_1023_byte_frames;
228 u32 tx_1024_1518_byte_frames;
229 u32 tx_greater_1518_byte_frames;
230 u32 eee_tx_lpi_transitions;
234 struct lan78xx_statstage64 {
236 u64 rx_alignment_errors;
237 u64 rx_fragment_errors;
238 u64 rx_jabber_errors;
239 u64 rx_undersize_frame_errors;
240 u64 rx_oversize_frame_errors;
241 u64 rx_dropped_frames;
242 u64 rx_unicast_byte_count;
243 u64 rx_broadcast_byte_count;
244 u64 rx_multicast_byte_count;
245 u64 rx_unicast_frames;
246 u64 rx_broadcast_frames;
247 u64 rx_multicast_frames;
249 u64 rx_64_byte_frames;
250 u64 rx_65_127_byte_frames;
251 u64 rx_128_255_byte_frames;
252 u64 rx_256_511_bytes_frames;
253 u64 rx_512_1023_byte_frames;
254 u64 rx_1024_1518_byte_frames;
255 u64 rx_greater_1518_byte_frames;
256 u64 eee_rx_lpi_transitions;
259 u64 tx_excess_deferral_errors;
260 u64 tx_carrier_errors;
261 u64 tx_bad_byte_count;
262 u64 tx_single_collisions;
263 u64 tx_multiple_collisions;
264 u64 tx_excessive_collision;
265 u64 tx_late_collisions;
266 u64 tx_unicast_byte_count;
267 u64 tx_broadcast_byte_count;
268 u64 tx_multicast_byte_count;
269 u64 tx_unicast_frames;
270 u64 tx_broadcast_frames;
271 u64 tx_multicast_frames;
273 u64 tx_64_byte_frames;
274 u64 tx_65_127_byte_frames;
275 u64 tx_128_255_byte_frames;
276 u64 tx_256_511_bytes_frames;
277 u64 tx_512_1023_byte_frames;
278 u64 tx_1024_1518_byte_frames;
279 u64 tx_greater_1518_byte_frames;
280 u64 eee_tx_lpi_transitions;
284 static u32 lan78xx_regs[] = {
306 #define PHY_REG_SIZE (32 * sizeof(u32))
310 struct lan78xx_priv {
311 struct lan78xx_net *dev;
313 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
314 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
315 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
316 struct mutex dataport_mutex; /* for dataport access */
317 spinlock_t rfe_ctl_lock; /* for rfe register access */
318 struct work_struct set_multicast;
319 struct work_struct set_vlan;
333 struct skb_data { /* skb->cb is one of these */
335 struct lan78xx_net *dev;
336 enum skb_state state;
342 struct usb_ctrlrequest req;
343 struct lan78xx_net *dev;
346 #define EVENT_TX_HALT 0
347 #define EVENT_RX_HALT 1
348 #define EVENT_RX_MEMORY 2
349 #define EVENT_STS_SPLIT 3
350 #define EVENT_LINK_RESET 4
351 #define EVENT_RX_PAUSED 5
352 #define EVENT_DEV_WAKING 6
353 #define EVENT_DEV_ASLEEP 7
354 #define EVENT_DEV_OPEN 8
355 #define EVENT_STAT_UPDATE 9
358 struct mutex access_lock; /* for stats access */
359 struct lan78xx_statstage saved;
360 struct lan78xx_statstage rollover_count;
361 struct lan78xx_statstage rollover_max;
362 struct lan78xx_statstage64 curr_stat;
365 struct irq_domain_data {
366 struct irq_domain *irqdomain;
368 struct irq_chip *irqchip;
369 irq_flow_handler_t irq_handler;
371 struct mutex irq_lock; /* for irq bus access */
375 struct net_device *net;
376 struct usb_device *udev;
377 struct usb_interface *intf;
382 struct sk_buff_head rxq;
383 struct sk_buff_head txq;
384 struct sk_buff_head done;
385 struct sk_buff_head rxq_pause;
386 struct sk_buff_head txq_pend;
388 struct tasklet_struct bh;
389 struct delayed_work wq;
393 struct urb *urb_intr;
394 struct usb_anchor deferred;
396 struct mutex phy_mutex; /* for phy access */
397 unsigned pipe_in, pipe_out, pipe_intr;
399 u32 hard_mtu; /* count any extra framing */
400 size_t rx_urb_size; /* size for rx urbs */
404 wait_queue_head_t *wait;
405 unsigned char suspend_count;
408 struct timer_list delay;
409 struct timer_list stat_monitor;
411 unsigned long data[5];
418 struct mii_bus *mdiobus;
419 phy_interface_t interface;
422 u8 fc_request_control;
425 struct statstage stats;
427 struct irq_domain_data domain_data;
430 /* define external phy id */
431 #define PHY_LAN8835 (0x0007C130)
432 #define PHY_KSZ9031RNX (0x00221620)
434 /* use ethtool to change the level for any given device */
435 static int msg_level = -1;
436 module_param(msg_level, int, 0);
437 MODULE_PARM_DESC(msg_level, "Override default message level");
439 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
441 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
447 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
448 USB_VENDOR_REQUEST_READ_REGISTER,
449 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
450 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
451 if (likely(ret >= 0)) {
455 netdev_warn(dev->net,
456 "Failed to read register index 0x%08x. ret = %d",
465 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
467 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
476 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
477 USB_VENDOR_REQUEST_WRITE_REGISTER,
478 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
479 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
480 if (unlikely(ret < 0)) {
481 netdev_warn(dev->net,
482 "Failed to write register index 0x%08x. ret = %d",
491 static int lan78xx_read_stats(struct lan78xx_net *dev,
492 struct lan78xx_statstage *data)
496 struct lan78xx_statstage *stats;
500 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
504 ret = usb_control_msg(dev->udev,
505 usb_rcvctrlpipe(dev->udev, 0),
506 USB_VENDOR_REQUEST_GET_STATS,
507 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
512 USB_CTRL_SET_TIMEOUT);
513 if (likely(ret >= 0)) {
516 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
517 le32_to_cpus(&src[i]);
521 netdev_warn(dev->net,
522 "Failed to read stat ret = %d", ret);
530 #define check_counter_rollover(struct1, dev_stats, member) { \
531 if (struct1->member < dev_stats.saved.member) \
532 dev_stats.rollover_count.member++; \
535 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
536 struct lan78xx_statstage *stats)
538 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
539 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
540 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
541 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
542 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
543 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
544 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
545 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
546 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
547 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
548 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
549 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
550 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
551 check_counter_rollover(stats, dev->stats, rx_pause_frames);
552 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
553 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
554 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
555 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
556 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
557 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
558 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
559 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
560 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
561 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
562 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
563 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
564 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
565 check_counter_rollover(stats, dev->stats, tx_single_collisions);
566 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
567 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
568 check_counter_rollover(stats, dev->stats, tx_late_collisions);
569 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
570 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
571 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
572 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
573 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
574 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
575 check_counter_rollover(stats, dev->stats, tx_pause_frames);
576 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
577 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
578 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
579 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
580 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
581 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
582 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
583 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
584 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
586 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
589 static void lan78xx_update_stats(struct lan78xx_net *dev)
591 u32 *p, *count, *max;
594 struct lan78xx_statstage lan78xx_stats;
596 if (usb_autopm_get_interface(dev->intf) < 0)
599 p = (u32 *)&lan78xx_stats;
600 count = (u32 *)&dev->stats.rollover_count;
601 max = (u32 *)&dev->stats.rollover_max;
602 data = (u64 *)&dev->stats.curr_stat;
604 mutex_lock(&dev->stats.access_lock);
606 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
607 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
609 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
610 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
612 mutex_unlock(&dev->stats.access_lock);
614 usb_autopm_put_interface(dev->intf);
617 /* Loop until the read is completed with timeout called with phy_mutex held */
618 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
620 unsigned long start_time = jiffies;
625 ret = lan78xx_read_reg(dev, MII_ACC, &val);
626 if (unlikely(ret < 0))
629 if (!(val & MII_ACC_MII_BUSY_))
631 } while (!time_after(jiffies, start_time + HZ));
636 static inline u32 mii_access(int id, int index, int read)
640 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
641 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
643 ret |= MII_ACC_MII_READ_;
645 ret |= MII_ACC_MII_WRITE_;
646 ret |= MII_ACC_MII_BUSY_;
651 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
653 unsigned long start_time = jiffies;
658 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
659 if (unlikely(ret < 0))
662 if (!(val & E2P_CMD_EPC_BUSY_) ||
663 (val & E2P_CMD_EPC_TIMEOUT_))
665 usleep_range(40, 100);
666 } while (!time_after(jiffies, start_time + HZ));
668 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
669 netdev_warn(dev->net, "EEPROM read operation timeout");
676 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
678 unsigned long start_time = jiffies;
683 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
684 if (unlikely(ret < 0))
687 if (!(val & E2P_CMD_EPC_BUSY_))
690 usleep_range(40, 100);
691 } while (!time_after(jiffies, start_time + HZ));
693 netdev_warn(dev->net, "EEPROM is busy");
697 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
698 u32 length, u8 *data)
705 /* depends on chip, some EEPROM pins are muxed with LED function.
706 * disable & restore LED function to access EEPROM.
708 ret = lan78xx_read_reg(dev, HW_CFG, &val);
710 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
711 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
712 ret = lan78xx_write_reg(dev, HW_CFG, val);
715 retval = lan78xx_eeprom_confirm_not_busy(dev);
719 for (i = 0; i < length; i++) {
720 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
721 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
722 ret = lan78xx_write_reg(dev, E2P_CMD, val);
723 if (unlikely(ret < 0)) {
728 retval = lan78xx_wait_eeprom(dev);
732 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
733 if (unlikely(ret < 0)) {
738 data[i] = val & 0xFF;
744 if (dev->chipid == ID_REV_CHIP_ID_7800_)
745 ret = lan78xx_write_reg(dev, HW_CFG, saved);
750 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
751 u32 length, u8 *data)
756 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
757 if ((ret == 0) && (sig == EEPROM_INDICATOR))
758 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
765 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
766 u32 length, u8 *data)
773 /* depends on chip, some EEPROM pins are muxed with LED function.
774 * disable & restore LED function to access EEPROM.
776 ret = lan78xx_read_reg(dev, HW_CFG, &val);
778 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
779 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
780 ret = lan78xx_write_reg(dev, HW_CFG, val);
783 retval = lan78xx_eeprom_confirm_not_busy(dev);
787 /* Issue write/erase enable command */
788 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
789 ret = lan78xx_write_reg(dev, E2P_CMD, val);
790 if (unlikely(ret < 0)) {
795 retval = lan78xx_wait_eeprom(dev);
799 for (i = 0; i < length; i++) {
800 /* Fill data register */
802 ret = lan78xx_write_reg(dev, E2P_DATA, val);
808 /* Send "write" command */
809 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
810 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
811 ret = lan78xx_write_reg(dev, E2P_CMD, val);
817 retval = lan78xx_wait_eeprom(dev);
826 if (dev->chipid == ID_REV_CHIP_ID_7800_)
827 ret = lan78xx_write_reg(dev, HW_CFG, saved);
832 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
833 u32 length, u8 *data)
838 unsigned long timeout;
840 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
842 if (buf & OTP_PWR_DN_PWRDN_N_) {
843 /* clear it and wait to be cleared */
844 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
846 timeout = jiffies + HZ;
849 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
850 if (time_after(jiffies, timeout)) {
851 netdev_warn(dev->net,
852 "timeout on OTP_PWR_DN");
855 } while (buf & OTP_PWR_DN_PWRDN_N_);
858 for (i = 0; i < length; i++) {
859 ret = lan78xx_write_reg(dev, OTP_ADDR1,
860 ((offset + i) >> 8) & OTP_ADDR1_15_11);
861 ret = lan78xx_write_reg(dev, OTP_ADDR2,
862 ((offset + i) & OTP_ADDR2_10_3));
864 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
865 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
867 timeout = jiffies + HZ;
870 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
871 if (time_after(jiffies, timeout)) {
872 netdev_warn(dev->net,
873 "timeout on OTP_STATUS");
876 } while (buf & OTP_STATUS_BUSY_);
878 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
880 data[i] = (u8)(buf & 0xFF);
886 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
887 u32 length, u8 *data)
892 unsigned long timeout;
894 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
896 if (buf & OTP_PWR_DN_PWRDN_N_) {
897 /* clear it and wait to be cleared */
898 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
900 timeout = jiffies + HZ;
903 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
904 if (time_after(jiffies, timeout)) {
905 netdev_warn(dev->net,
906 "timeout on OTP_PWR_DN completion");
909 } while (buf & OTP_PWR_DN_PWRDN_N_);
912 /* set to BYTE program mode */
913 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
915 for (i = 0; i < length; i++) {
916 ret = lan78xx_write_reg(dev, OTP_ADDR1,
917 ((offset + i) >> 8) & OTP_ADDR1_15_11);
918 ret = lan78xx_write_reg(dev, OTP_ADDR2,
919 ((offset + i) & OTP_ADDR2_10_3));
920 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
921 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
922 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
924 timeout = jiffies + HZ;
927 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
928 if (time_after(jiffies, timeout)) {
929 netdev_warn(dev->net,
930 "Timeout on OTP_STATUS completion");
933 } while (buf & OTP_STATUS_BUSY_);
939 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
940 u32 length, u8 *data)
945 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
948 if (sig == OTP_INDICATOR_1)
950 else if (sig == OTP_INDICATOR_2)
955 ret = lan78xx_read_raw_otp(dev, offset, length, data);
961 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
965 for (i = 0; i < 100; i++) {
968 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
969 if (unlikely(ret < 0))
972 if (dp_sel & DP_SEL_DPRDY_)
975 usleep_range(40, 100);
978 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
983 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
984 u32 addr, u32 length, u32 *buf)
986 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
990 if (usb_autopm_get_interface(dev->intf) < 0)
993 mutex_lock(&pdata->dataport_mutex);
995 ret = lan78xx_dataport_wait_not_busy(dev);
999 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1001 dp_sel &= ~DP_SEL_RSEL_MASK_;
1002 dp_sel |= ram_select;
1003 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1005 for (i = 0; i < length; i++) {
1006 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1008 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1010 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1012 ret = lan78xx_dataport_wait_not_busy(dev);
1018 mutex_unlock(&pdata->dataport_mutex);
1019 usb_autopm_put_interface(dev->intf);
1024 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1025 int index, u8 addr[ETH_ALEN])
1029 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1031 temp = addr[2] | (temp << 8);
1032 temp = addr[1] | (temp << 8);
1033 temp = addr[0] | (temp << 8);
1034 pdata->pfilter_table[index][1] = temp;
1036 temp = addr[4] | (temp << 8);
1037 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1038 pdata->pfilter_table[index][0] = temp;
1042 /* returns hash bit number for given MAC address */
1043 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1045 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1048 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1050 struct lan78xx_priv *pdata =
1051 container_of(param, struct lan78xx_priv, set_multicast);
1052 struct lan78xx_net *dev = pdata->dev;
1056 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1059 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1060 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1062 for (i = 1; i < NUM_OF_MAF; i++) {
1063 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1064 ret = lan78xx_write_reg(dev, MAF_LO(i),
1065 pdata->pfilter_table[i][1]);
1066 ret = lan78xx_write_reg(dev, MAF_HI(i),
1067 pdata->pfilter_table[i][0]);
1070 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1073 static void lan78xx_set_multicast(struct net_device *netdev)
1075 struct lan78xx_net *dev = netdev_priv(netdev);
1076 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1077 unsigned long flags;
1080 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1082 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1083 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1085 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1086 pdata->mchash_table[i] = 0;
1087 /* pfilter_table[0] has own HW address */
1088 for (i = 1; i < NUM_OF_MAF; i++) {
1089 pdata->pfilter_table[i][0] =
1090 pdata->pfilter_table[i][1] = 0;
1093 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1095 if (dev->net->flags & IFF_PROMISC) {
1096 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1097 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1099 if (dev->net->flags & IFF_ALLMULTI) {
1100 netif_dbg(dev, drv, dev->net,
1101 "receive all multicast enabled");
1102 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1106 if (netdev_mc_count(dev->net)) {
1107 struct netdev_hw_addr *ha;
1110 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1112 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1115 netdev_for_each_mc_addr(ha, netdev) {
1116 /* set first 32 into Perfect Filter */
1118 lan78xx_set_addr_filter(pdata, i, ha->addr);
1120 u32 bitnum = lan78xx_hash(ha->addr);
1122 pdata->mchash_table[bitnum / 32] |=
1123 (1 << (bitnum % 32));
1124 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1130 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1132 /* defer register writes to a sleepable context */
1133 schedule_work(&pdata->set_multicast);
1136 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1137 u16 lcladv, u16 rmtadv)
1139 u32 flow = 0, fct_flow = 0;
1143 if (dev->fc_autoneg)
1144 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1146 cap = dev->fc_request_control;
1148 if (cap & FLOW_CTRL_TX)
1149 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1151 if (cap & FLOW_CTRL_RX)
1152 flow |= FLOW_CR_RX_FCEN_;
1154 if (dev->udev->speed == USB_SPEED_SUPER)
1156 else if (dev->udev->speed == USB_SPEED_HIGH)
1159 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1160 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1161 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1163 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1165 /* threshold value should be set before enabling flow */
1166 ret = lan78xx_write_reg(dev, FLOW, flow);
1171 static int lan78xx_link_reset(struct lan78xx_net *dev)
1173 struct phy_device *phydev = dev->net->phydev;
1174 struct ethtool_link_ksettings ecmd;
1175 int ladv, radv, ret, link;
1178 /* clear LAN78xx interrupt status */
1179 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1180 if (unlikely(ret < 0))
1183 mutex_lock(&phydev->lock);
1184 phy_read_status(phydev);
1185 link = phydev->link;
1186 mutex_unlock(&phydev->lock);
1188 if (!link && dev->link_on) {
1189 dev->link_on = false;
1192 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1193 if (unlikely(ret < 0))
1196 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1197 if (unlikely(ret < 0))
1200 del_timer(&dev->stat_monitor);
1201 } else if (link && !dev->link_on) {
1202 dev->link_on = true;
1204 phy_ethtool_ksettings_get(phydev, &ecmd);
1206 if (dev->udev->speed == USB_SPEED_SUPER) {
1207 if (ecmd.base.speed == 1000) {
1209 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1210 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1211 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1213 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1214 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1215 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1217 /* enable U1 & U2 */
1218 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1219 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1220 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1221 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1225 ladv = phy_read(phydev, MII_ADVERTISE);
1229 radv = phy_read(phydev, MII_LPA);
1233 netif_dbg(dev, link, dev->net,
1234 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1235 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1237 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1240 if (!timer_pending(&dev->stat_monitor)) {
1242 mod_timer(&dev->stat_monitor,
1243 jiffies + STAT_UPDATE_TIMER);
1246 tasklet_schedule(&dev->bh);
1252 /* some work can't be done in tasklets, so we use keventd
1254 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1255 * but tasklet_schedule() doesn't. hope the failure is rare.
1257 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1259 set_bit(work, &dev->flags);
1260 if (!schedule_delayed_work(&dev->wq, 0))
1261 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1264 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1268 if (urb->actual_length != 4) {
1269 netdev_warn(dev->net,
1270 "unexpected urb length %d", urb->actual_length);
1274 memcpy(&intdata, urb->transfer_buffer, 4);
1275 le32_to_cpus(&intdata);
1277 if (intdata & INT_ENP_PHY_INT) {
1278 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1279 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1281 if (dev->domain_data.phyirq > 0) {
1282 local_irq_disable();
1283 generic_handle_irq(dev->domain_data.phyirq);
1287 netdev_warn(dev->net,
1288 "unexpected interrupt: 0x%08x\n", intdata);
1291 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1293 return MAX_EEPROM_SIZE;
1296 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1297 struct ethtool_eeprom *ee, u8 *data)
1299 struct lan78xx_net *dev = netdev_priv(netdev);
1302 ret = usb_autopm_get_interface(dev->intf);
1306 ee->magic = LAN78XX_EEPROM_MAGIC;
1308 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1310 usb_autopm_put_interface(dev->intf);
1315 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1316 struct ethtool_eeprom *ee, u8 *data)
1318 struct lan78xx_net *dev = netdev_priv(netdev);
1321 ret = usb_autopm_get_interface(dev->intf);
1325 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1326 * to load data from EEPROM
1328 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1329 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1330 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1331 (ee->offset == 0) &&
1333 (data[0] == OTP_INDICATOR_1))
1334 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1336 usb_autopm_put_interface(dev->intf);
1341 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1344 if (stringset == ETH_SS_STATS)
1345 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1348 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1350 if (sset == ETH_SS_STATS)
1351 return ARRAY_SIZE(lan78xx_gstrings);
1356 static void lan78xx_get_stats(struct net_device *netdev,
1357 struct ethtool_stats *stats, u64 *data)
1359 struct lan78xx_net *dev = netdev_priv(netdev);
1361 lan78xx_update_stats(dev);
1363 mutex_lock(&dev->stats.access_lock);
1364 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1365 mutex_unlock(&dev->stats.access_lock);
1368 static void lan78xx_get_wol(struct net_device *netdev,
1369 struct ethtool_wolinfo *wol)
1371 struct lan78xx_net *dev = netdev_priv(netdev);
1374 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1376 if (usb_autopm_get_interface(dev->intf) < 0)
1379 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1380 if (unlikely(ret < 0)) {
1384 if (buf & USB_CFG_RMT_WKP_) {
1385 wol->supported = WAKE_ALL;
1386 wol->wolopts = pdata->wol;
1393 usb_autopm_put_interface(dev->intf);
1396 static int lan78xx_set_wol(struct net_device *netdev,
1397 struct ethtool_wolinfo *wol)
1399 struct lan78xx_net *dev = netdev_priv(netdev);
1400 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1403 ret = usb_autopm_get_interface(dev->intf);
1407 if (wol->wolopts & ~WAKE_ALL)
1410 pdata->wol = wol->wolopts;
1412 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1414 phy_ethtool_set_wol(netdev->phydev, wol);
1416 usb_autopm_put_interface(dev->intf);
1421 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1423 struct lan78xx_net *dev = netdev_priv(net);
1424 struct phy_device *phydev = net->phydev;
1428 ret = usb_autopm_get_interface(dev->intf);
1432 ret = phy_ethtool_get_eee(phydev, edata);
1436 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1437 if (buf & MAC_CR_EEE_EN_) {
1438 edata->eee_enabled = true;
1439 edata->eee_active = !!(edata->advertised &
1440 edata->lp_advertised);
1441 edata->tx_lpi_enabled = true;
1442 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1443 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1444 edata->tx_lpi_timer = buf;
1446 edata->eee_enabled = false;
1447 edata->eee_active = false;
1448 edata->tx_lpi_enabled = false;
1449 edata->tx_lpi_timer = 0;
1454 usb_autopm_put_interface(dev->intf);
1459 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1461 struct lan78xx_net *dev = netdev_priv(net);
1465 ret = usb_autopm_get_interface(dev->intf);
1469 if (edata->eee_enabled) {
1470 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1471 buf |= MAC_CR_EEE_EN_;
1472 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1474 phy_ethtool_set_eee(net->phydev, edata);
1476 buf = (u32)edata->tx_lpi_timer;
1477 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1479 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1480 buf &= ~MAC_CR_EEE_EN_;
1481 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1484 usb_autopm_put_interface(dev->intf);
1489 static u32 lan78xx_get_link(struct net_device *net)
1493 mutex_lock(&net->phydev->lock);
1494 phy_read_status(net->phydev);
1495 link = net->phydev->link;
1496 mutex_unlock(&net->phydev->lock);
1501 static void lan78xx_get_drvinfo(struct net_device *net,
1502 struct ethtool_drvinfo *info)
1504 struct lan78xx_net *dev = netdev_priv(net);
1506 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1507 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1510 static u32 lan78xx_get_msglevel(struct net_device *net)
1512 struct lan78xx_net *dev = netdev_priv(net);
1514 return dev->msg_enable;
1517 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1519 struct lan78xx_net *dev = netdev_priv(net);
1521 dev->msg_enable = level;
1524 static int lan78xx_get_link_ksettings(struct net_device *net,
1525 struct ethtool_link_ksettings *cmd)
1527 struct lan78xx_net *dev = netdev_priv(net);
1528 struct phy_device *phydev = net->phydev;
1531 ret = usb_autopm_get_interface(dev->intf);
1535 phy_ethtool_ksettings_get(phydev, cmd);
1537 usb_autopm_put_interface(dev->intf);
1542 static int lan78xx_set_link_ksettings(struct net_device *net,
1543 const struct ethtool_link_ksettings *cmd)
1545 struct lan78xx_net *dev = netdev_priv(net);
1546 struct phy_device *phydev = net->phydev;
1550 ret = usb_autopm_get_interface(dev->intf);
1554 /* change speed & duplex */
1555 ret = phy_ethtool_ksettings_set(phydev, cmd);
1557 if (!cmd->base.autoneg) {
1558 /* force link down */
1559 temp = phy_read(phydev, MII_BMCR);
1560 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1562 phy_write(phydev, MII_BMCR, temp);
1565 usb_autopm_put_interface(dev->intf);
1570 static void lan78xx_get_pause(struct net_device *net,
1571 struct ethtool_pauseparam *pause)
1573 struct lan78xx_net *dev = netdev_priv(net);
1574 struct phy_device *phydev = net->phydev;
1575 struct ethtool_link_ksettings ecmd;
1577 phy_ethtool_ksettings_get(phydev, &ecmd);
1579 pause->autoneg = dev->fc_autoneg;
1581 if (dev->fc_request_control & FLOW_CTRL_TX)
1582 pause->tx_pause = 1;
1584 if (dev->fc_request_control & FLOW_CTRL_RX)
1585 pause->rx_pause = 1;
1588 static int lan78xx_set_pause(struct net_device *net,
1589 struct ethtool_pauseparam *pause)
1591 struct lan78xx_net *dev = netdev_priv(net);
1592 struct phy_device *phydev = net->phydev;
1593 struct ethtool_link_ksettings ecmd;
1596 phy_ethtool_ksettings_get(phydev, &ecmd);
1598 if (pause->autoneg && !ecmd.base.autoneg) {
1603 dev->fc_request_control = 0;
1604 if (pause->rx_pause)
1605 dev->fc_request_control |= FLOW_CTRL_RX;
1607 if (pause->tx_pause)
1608 dev->fc_request_control |= FLOW_CTRL_TX;
1610 if (ecmd.base.autoneg) {
1614 ethtool_convert_link_mode_to_legacy_u32(
1615 &advertising, ecmd.link_modes.advertising);
1617 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1618 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1619 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1621 ethtool_convert_legacy_u32_to_link_mode(
1622 ecmd.link_modes.advertising, advertising);
1624 phy_ethtool_ksettings_set(phydev, &ecmd);
1627 dev->fc_autoneg = pause->autoneg;
1634 static int lan78xx_get_regs_len(struct net_device *netdev)
1636 if (!netdev->phydev)
1637 return (sizeof(lan78xx_regs));
1639 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1643 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1648 struct lan78xx_net *dev = netdev_priv(netdev);
1650 /* Read Device/MAC registers */
1651 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1652 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1654 if (!netdev->phydev)
1657 /* Read PHY registers */
1658 for (j = 0; j < 32; i++, j++)
1659 data[i] = phy_read(netdev->phydev, j);
1662 static const struct ethtool_ops lan78xx_ethtool_ops = {
1663 .get_link = lan78xx_get_link,
1664 .nway_reset = phy_ethtool_nway_reset,
1665 .get_drvinfo = lan78xx_get_drvinfo,
1666 .get_msglevel = lan78xx_get_msglevel,
1667 .set_msglevel = lan78xx_set_msglevel,
1668 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1669 .get_eeprom = lan78xx_ethtool_get_eeprom,
1670 .set_eeprom = lan78xx_ethtool_set_eeprom,
1671 .get_ethtool_stats = lan78xx_get_stats,
1672 .get_sset_count = lan78xx_get_sset_count,
1673 .get_strings = lan78xx_get_strings,
1674 .get_wol = lan78xx_get_wol,
1675 .set_wol = lan78xx_set_wol,
1676 .get_eee = lan78xx_get_eee,
1677 .set_eee = lan78xx_set_eee,
1678 .get_pauseparam = lan78xx_get_pause,
1679 .set_pauseparam = lan78xx_set_pause,
1680 .get_link_ksettings = lan78xx_get_link_ksettings,
1681 .set_link_ksettings = lan78xx_set_link_ksettings,
1682 .get_regs_len = lan78xx_get_regs_len,
1683 .get_regs = lan78xx_get_regs,
1686 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1688 if (!netif_running(netdev))
1691 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1694 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1696 u32 addr_lo, addr_hi;
1700 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1701 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1703 addr[0] = addr_lo & 0xFF;
1704 addr[1] = (addr_lo >> 8) & 0xFF;
1705 addr[2] = (addr_lo >> 16) & 0xFF;
1706 addr[3] = (addr_lo >> 24) & 0xFF;
1707 addr[4] = addr_hi & 0xFF;
1708 addr[5] = (addr_hi >> 8) & 0xFF;
1710 if (!is_valid_ether_addr(addr)) {
1711 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1712 /* valid address present in Device Tree */
1713 netif_dbg(dev, ifup, dev->net,
1714 "MAC address read from Device Tree");
1715 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1716 ETH_ALEN, addr) == 0) ||
1717 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1718 ETH_ALEN, addr) == 0)) &&
1719 is_valid_ether_addr(addr)) {
1720 /* eeprom values are valid so use them */
1721 netif_dbg(dev, ifup, dev->net,
1722 "MAC address read from EEPROM");
1724 /* generate random MAC */
1725 eth_random_addr(addr);
1726 netif_dbg(dev, ifup, dev->net,
1727 "MAC address set to random addr");
1730 addr_lo = addr[0] | (addr[1] << 8) |
1731 (addr[2] << 16) | (addr[3] << 24);
1732 addr_hi = addr[4] | (addr[5] << 8);
1734 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1735 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1738 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1739 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1741 ether_addr_copy(dev->net->dev_addr, addr);
1744 /* MDIO read and write wrappers for phylib */
1745 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1747 struct lan78xx_net *dev = bus->priv;
1751 ret = usb_autopm_get_interface(dev->intf);
1755 mutex_lock(&dev->phy_mutex);
1757 /* confirm MII not busy */
1758 ret = lan78xx_phy_wait_not_busy(dev);
1762 /* set the address, index & direction (read from PHY) */
1763 addr = mii_access(phy_id, idx, MII_READ);
1764 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1766 ret = lan78xx_phy_wait_not_busy(dev);
1770 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1772 ret = (int)(val & 0xFFFF);
1775 mutex_unlock(&dev->phy_mutex);
1776 usb_autopm_put_interface(dev->intf);
1781 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1784 struct lan78xx_net *dev = bus->priv;
1788 ret = usb_autopm_get_interface(dev->intf);
1792 mutex_lock(&dev->phy_mutex);
1794 /* confirm MII not busy */
1795 ret = lan78xx_phy_wait_not_busy(dev);
1800 ret = lan78xx_write_reg(dev, MII_DATA, val);
1802 /* set the address, index & direction (write to PHY) */
1803 addr = mii_access(phy_id, idx, MII_WRITE);
1804 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1806 ret = lan78xx_phy_wait_not_busy(dev);
1811 mutex_unlock(&dev->phy_mutex);
1812 usb_autopm_put_interface(dev->intf);
1816 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1818 struct device_node *node;
1821 dev->mdiobus = mdiobus_alloc();
1822 if (!dev->mdiobus) {
1823 netdev_err(dev->net, "can't allocate MDIO bus\n");
1827 dev->mdiobus->priv = (void *)dev;
1828 dev->mdiobus->read = lan78xx_mdiobus_read;
1829 dev->mdiobus->write = lan78xx_mdiobus_write;
1830 dev->mdiobus->name = "lan78xx-mdiobus";
1831 dev->mdiobus->parent = &dev->udev->dev;
1833 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1834 dev->udev->bus->busnum, dev->udev->devnum);
1836 switch (dev->chipid) {
1837 case ID_REV_CHIP_ID_7800_:
1838 case ID_REV_CHIP_ID_7850_:
1839 /* set to internal PHY id */
1840 dev->mdiobus->phy_mask = ~(1 << 1);
1842 case ID_REV_CHIP_ID_7801_:
1843 /* scan thru PHYAD[2..0] */
1844 dev->mdiobus->phy_mask = ~(0xFF);
1848 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1849 ret = of_mdiobus_register(dev->mdiobus, node);
1853 netdev_err(dev->net, "can't register MDIO bus\n");
1857 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1860 mdiobus_free(dev->mdiobus);
1864 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1866 mdiobus_unregister(dev->mdiobus);
1867 mdiobus_free(dev->mdiobus);
1870 static void lan78xx_link_status_change(struct net_device *net)
1872 struct phy_device *phydev = net->phydev;
1875 /* At forced 100 F/H mode, chip may fail to set mode correctly
1876 * when cable is switched between long(~50+m) and short one.
1877 * As workaround, set to 10 before setting to 100
1878 * at forced 100 F/H mode.
1880 if (!phydev->autoneg && (phydev->speed == 100)) {
1881 /* disable phy interrupt */
1882 temp = phy_read(phydev, LAN88XX_INT_MASK);
1883 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1884 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1886 temp = phy_read(phydev, MII_BMCR);
1887 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1888 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1889 temp |= BMCR_SPEED100;
1890 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1892 /* clear pending interrupt generated while workaround */
1893 temp = phy_read(phydev, LAN88XX_INT_STS);
1895 /* enable phy interrupt back */
1896 temp = phy_read(phydev, LAN88XX_INT_MASK);
1897 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1898 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1902 static int irq_map(struct irq_domain *d, unsigned int irq,
1903 irq_hw_number_t hwirq)
1905 struct irq_domain_data *data = d->host_data;
1907 irq_set_chip_data(irq, data);
1908 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1909 irq_set_noprobe(irq);
1914 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1916 irq_set_chip_and_handler(irq, NULL, NULL);
1917 irq_set_chip_data(irq, NULL);
1920 static const struct irq_domain_ops chip_domain_ops = {
1925 static void lan78xx_irq_mask(struct irq_data *irqd)
1927 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1929 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1932 static void lan78xx_irq_unmask(struct irq_data *irqd)
1934 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1936 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1939 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1941 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1943 mutex_lock(&data->irq_lock);
1946 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1948 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1949 struct lan78xx_net *dev =
1950 container_of(data, struct lan78xx_net, domain_data);
1954 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1955 * are only two callbacks executed in non-atomic contex.
1957 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1958 if (buf != data->irqenable)
1959 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1961 mutex_unlock(&data->irq_lock);
1964 static struct irq_chip lan78xx_irqchip = {
1965 .name = "lan78xx-irqs",
1966 .irq_mask = lan78xx_irq_mask,
1967 .irq_unmask = lan78xx_irq_unmask,
1968 .irq_bus_lock = lan78xx_irq_bus_lock,
1969 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1972 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1974 struct device_node *of_node;
1975 struct irq_domain *irqdomain;
1976 unsigned int irqmap = 0;
1980 of_node = dev->udev->dev.parent->of_node;
1982 mutex_init(&dev->domain_data.irq_lock);
1984 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1985 dev->domain_data.irqenable = buf;
1987 dev->domain_data.irqchip = &lan78xx_irqchip;
1988 dev->domain_data.irq_handler = handle_simple_irq;
1990 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1991 &chip_domain_ops, &dev->domain_data);
1993 /* create mapping for PHY interrupt */
1994 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1996 irq_domain_remove(irqdomain);
2005 dev->domain_data.irqdomain = irqdomain;
2006 dev->domain_data.phyirq = irqmap;
2011 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2013 if (dev->domain_data.phyirq > 0) {
2014 irq_dispose_mapping(dev->domain_data.phyirq);
2016 if (dev->domain_data.irqdomain)
2017 irq_domain_remove(dev->domain_data.irqdomain);
2019 dev->domain_data.phyirq = 0;
2020 dev->domain_data.irqdomain = NULL;
2023 static int lan8835_fixup(struct phy_device *phydev)
2027 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2029 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2030 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2033 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2035 /* RGMII MAC TXC Delay Enable */
2036 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2037 MAC_RGMII_ID_TXC_DELAY_EN_);
2039 /* RGMII TX DLL Tune Adjust */
2040 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2042 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2047 static int ksz9031rnx_fixup(struct phy_device *phydev)
2049 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2051 /* Micrel9301RNX PHY configuration */
2052 /* RGMII Control Signal Pad Skew */
2053 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2054 /* RGMII RX Data Pad Skew */
2055 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2056 /* RGMII RX Clock Pad Skew */
2057 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2059 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2064 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2068 struct fixed_phy_status fphy_status = {
2070 .speed = SPEED_1000,
2071 .duplex = DUPLEX_FULL,
2073 struct phy_device *phydev;
2075 phydev = phy_find_first(dev->mdiobus);
2077 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2078 phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
2080 if (IS_ERR(phydev)) {
2081 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2084 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2085 dev->interface = PHY_INTERFACE_MODE_RGMII;
2086 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2087 MAC_RGMII_ID_TXC_DELAY_EN_);
2088 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2089 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2090 buf |= HW_CFG_CLK125_EN_;
2091 buf |= HW_CFG_REFCLK25_EN_;
2092 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2095 netdev_err(dev->net, "no PHY driver found\n");
2098 dev->interface = PHY_INTERFACE_MODE_RGMII;
2099 /* external PHY fixup for KSZ9031RNX */
2100 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2103 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2106 /* external PHY fixup for LAN8835 */
2107 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2110 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2113 /* add more external PHY fixup here if needed */
2115 phydev->is_internal = false;
2120 static int lan78xx_phy_init(struct lan78xx_net *dev)
2124 struct phy_device *phydev;
2126 switch (dev->chipid) {
2127 case ID_REV_CHIP_ID_7801_:
2128 phydev = lan7801_phy_init(dev);
2130 netdev_err(dev->net, "lan7801: PHY Init Failed");
2135 case ID_REV_CHIP_ID_7800_:
2136 case ID_REV_CHIP_ID_7850_:
2137 phydev = phy_find_first(dev->mdiobus);
2139 netdev_err(dev->net, "no PHY found\n");
2142 phydev->is_internal = true;
2143 dev->interface = PHY_INTERFACE_MODE_GMII;
2147 netdev_err(dev->net, "Unknown CHIP ID found\n");
2151 /* if phyirq is not set, use polling mode in phylib */
2152 if (dev->domain_data.phyirq > 0)
2153 phydev->irq = dev->domain_data.phyirq;
2156 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2158 /* set to AUTOMDIX */
2159 phydev->mdix = ETH_TP_MDI_AUTO;
2161 ret = phy_connect_direct(dev->net, phydev,
2162 lan78xx_link_status_change,
2165 netdev_err(dev->net, "can't attach PHY to %s\n",
2167 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2168 if (phy_is_pseudo_fixed_link(phydev)) {
2169 fixed_phy_unregister(phydev);
2171 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2173 phy_unregister_fixup_for_uid(PHY_LAN8835,
2180 /* MAC doesn't support 1000T Half */
2181 phydev->supported &= ~SUPPORTED_1000baseT_Half;
2183 /* support both flow controls */
2184 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2185 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2186 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2187 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2189 if (phydev->mdio.dev.of_node) {
2193 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2194 "microchip,led-modes",
2197 /* Ensure the appropriate LEDs are enabled */
2198 lan78xx_read_reg(dev, HW_CFG, ®);
2199 reg &= ~(HW_CFG_LED0_EN_ |
2203 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2204 (len > 1) * HW_CFG_LED1_EN_ |
2205 (len > 2) * HW_CFG_LED2_EN_ |
2206 (len > 3) * HW_CFG_LED3_EN_;
2207 lan78xx_write_reg(dev, HW_CFG, reg);
2211 genphy_config_aneg(phydev);
2213 dev->fc_autoneg = phydev->autoneg;
2218 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2224 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2226 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2229 buf &= ~MAC_RX_RXEN_;
2230 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2233 /* add 4 to size for FCS */
2234 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2235 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2237 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2240 buf |= MAC_RX_RXEN_;
2241 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2247 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2249 struct sk_buff *skb;
2250 unsigned long flags;
2253 spin_lock_irqsave(&q->lock, flags);
2254 while (!skb_queue_empty(q)) {
2255 struct skb_data *entry;
2259 skb_queue_walk(q, skb) {
2260 entry = (struct skb_data *)skb->cb;
2261 if (entry->state != unlink_start)
2266 entry->state = unlink_start;
2269 /* Get reference count of the URB to avoid it to be
2270 * freed during usb_unlink_urb, which may trigger
2271 * use-after-free problem inside usb_unlink_urb since
2272 * usb_unlink_urb is always racing with .complete
2273 * handler(include defer_bh).
2276 spin_unlock_irqrestore(&q->lock, flags);
2277 /* during some PM-driven resume scenarios,
2278 * these (async) unlinks complete immediately
2280 ret = usb_unlink_urb(urb);
2281 if (ret != -EINPROGRESS && ret != 0)
2282 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2286 spin_lock_irqsave(&q->lock, flags);
2288 spin_unlock_irqrestore(&q->lock, flags);
2292 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2294 struct lan78xx_net *dev = netdev_priv(netdev);
2295 int ll_mtu = new_mtu + netdev->hard_header_len;
2296 int old_hard_mtu = dev->hard_mtu;
2297 int old_rx_urb_size = dev->rx_urb_size;
2300 /* no second zero-length packet read wanted after mtu-sized packets */
2301 if ((ll_mtu % dev->maxpacket) == 0)
2304 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2306 netdev->mtu = new_mtu;
2308 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2309 if (dev->rx_urb_size == old_hard_mtu) {
2310 dev->rx_urb_size = dev->hard_mtu;
2311 if (dev->rx_urb_size > old_rx_urb_size) {
2312 if (netif_running(dev->net)) {
2313 unlink_urbs(dev, &dev->rxq);
2314 tasklet_schedule(&dev->bh);
2322 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2324 struct lan78xx_net *dev = netdev_priv(netdev);
2325 struct sockaddr *addr = p;
2326 u32 addr_lo, addr_hi;
2329 if (netif_running(netdev))
2332 if (!is_valid_ether_addr(addr->sa_data))
2333 return -EADDRNOTAVAIL;
2335 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2337 addr_lo = netdev->dev_addr[0] |
2338 netdev->dev_addr[1] << 8 |
2339 netdev->dev_addr[2] << 16 |
2340 netdev->dev_addr[3] << 24;
2341 addr_hi = netdev->dev_addr[4] |
2342 netdev->dev_addr[5] << 8;
2344 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2345 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2347 /* Added to support MAC address changes */
2348 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2349 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2354 /* Enable or disable Rx checksum offload engine */
2355 static int lan78xx_set_features(struct net_device *netdev,
2356 netdev_features_t features)
2358 struct lan78xx_net *dev = netdev_priv(netdev);
2359 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2360 unsigned long flags;
2363 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2365 if (features & NETIF_F_RXCSUM) {
2366 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2367 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2369 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2370 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2373 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2374 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2376 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2378 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2379 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2381 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2383 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2385 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2390 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2392 struct lan78xx_priv *pdata =
2393 container_of(param, struct lan78xx_priv, set_vlan);
2394 struct lan78xx_net *dev = pdata->dev;
2396 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2397 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2400 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2401 __be16 proto, u16 vid)
2403 struct lan78xx_net *dev = netdev_priv(netdev);
2404 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2406 u16 vid_dword_index;
2408 vid_dword_index = (vid >> 5) & 0x7F;
2409 vid_bit_index = vid & 0x1F;
2411 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2413 /* defer register writes to a sleepable context */
2414 schedule_work(&pdata->set_vlan);
2419 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2420 __be16 proto, u16 vid)
2422 struct lan78xx_net *dev = netdev_priv(netdev);
2423 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2425 u16 vid_dword_index;
2427 vid_dword_index = (vid >> 5) & 0x7F;
2428 vid_bit_index = vid & 0x1F;
2430 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2432 /* defer register writes to a sleepable context */
2433 schedule_work(&pdata->set_vlan);
2438 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2442 u32 regs[6] = { 0 };
2444 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2445 if (buf & USB_CFG1_LTM_ENABLE_) {
2447 /* Get values from EEPROM first */
2448 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2449 if (temp[0] == 24) {
2450 ret = lan78xx_read_raw_eeprom(dev,
2457 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2458 if (temp[0] == 24) {
2459 ret = lan78xx_read_raw_otp(dev,
2469 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2470 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2471 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2472 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2473 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2474 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2477 static int lan78xx_reset(struct lan78xx_net *dev)
2479 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2482 unsigned long timeout;
2485 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2486 buf |= HW_CFG_LRST_;
2487 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2489 timeout = jiffies + HZ;
2492 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2493 if (time_after(jiffies, timeout)) {
2494 netdev_warn(dev->net,
2495 "timeout on completion of LiteReset");
2498 } while (buf & HW_CFG_LRST_);
2500 lan78xx_init_mac_address(dev);
2502 /* save DEVID for later usage */
2503 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2504 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2505 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2507 /* Respond to the IN token with a NAK */
2508 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2509 buf |= USB_CFG_BIR_;
2510 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2513 lan78xx_init_ltm(dev);
2515 if (dev->udev->speed == USB_SPEED_SUPER) {
2516 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2517 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2520 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2521 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2522 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2523 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2524 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2526 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2527 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2532 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2533 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2535 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2537 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2539 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2540 buf |= USB_CFG_BCE_;
2541 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2543 /* set FIFO sizes */
2544 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2545 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2547 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2548 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2550 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2551 ret = lan78xx_write_reg(dev, FLOW, 0);
2552 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2554 /* Don't need rfe_ctl_lock during initialisation */
2555 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2556 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2557 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2559 /* Enable or disable checksum offload engines */
2560 lan78xx_set_features(dev->net, dev->net->features);
2562 lan78xx_set_multicast(dev->net);
2565 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2566 buf |= PMT_CTL_PHY_RST_;
2567 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2569 timeout = jiffies + HZ;
2572 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2573 if (time_after(jiffies, timeout)) {
2574 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2577 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2579 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2580 /* LAN7801 only has RGMII mode */
2581 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2582 buf &= ~MAC_CR_GMII_EN_;
2584 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2585 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2586 if (!ret && sig != EEPROM_INDICATOR) {
2587 /* Implies there is no external eeprom. Set mac speed */
2588 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2589 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2592 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2594 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2595 buf |= MAC_TX_TXEN_;
2596 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2598 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2599 buf |= FCT_TX_CTL_EN_;
2600 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2602 ret = lan78xx_set_rx_max_frame_length(dev,
2603 dev->net->mtu + VLAN_ETH_HLEN);
2605 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2606 buf |= MAC_RX_RXEN_;
2607 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2609 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2610 buf |= FCT_RX_CTL_EN_;
2611 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2616 static void lan78xx_init_stats(struct lan78xx_net *dev)
2621 /* initialize for stats update
2622 * some counters are 20bits and some are 32bits
2624 p = (u32 *)&dev->stats.rollover_max;
2625 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2628 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2629 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2630 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2631 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2632 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2633 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2634 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2635 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2636 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2637 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2639 set_bit(EVENT_STAT_UPDATE, &dev->flags);
2642 static int lan78xx_open(struct net_device *net)
2644 struct lan78xx_net *dev = netdev_priv(net);
2647 ret = usb_autopm_get_interface(dev->intf);
2651 phy_start(net->phydev);
2653 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2655 /* for Link Check */
2656 if (dev->urb_intr) {
2657 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2659 netif_err(dev, ifup, dev->net,
2660 "intr submit %d\n", ret);
2665 lan78xx_init_stats(dev);
2667 set_bit(EVENT_DEV_OPEN, &dev->flags);
2669 netif_start_queue(net);
2671 dev->link_on = false;
2673 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2675 usb_autopm_put_interface(dev->intf);
2681 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2683 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2684 DECLARE_WAITQUEUE(wait, current);
2687 /* ensure there are no more active urbs */
2688 add_wait_queue(&unlink_wakeup, &wait);
2689 set_current_state(TASK_UNINTERRUPTIBLE);
2690 dev->wait = &unlink_wakeup;
2691 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2693 /* maybe wait for deletions to finish. */
2694 while (!skb_queue_empty(&dev->rxq) &&
2695 !skb_queue_empty(&dev->txq) &&
2696 !skb_queue_empty(&dev->done)) {
2697 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2698 set_current_state(TASK_UNINTERRUPTIBLE);
2699 netif_dbg(dev, ifdown, dev->net,
2700 "waited for %d urb completions\n", temp);
2702 set_current_state(TASK_RUNNING);
2704 remove_wait_queue(&unlink_wakeup, &wait);
2707 static int lan78xx_stop(struct net_device *net)
2709 struct lan78xx_net *dev = netdev_priv(net);
2711 if (timer_pending(&dev->stat_monitor))
2712 del_timer_sync(&dev->stat_monitor);
2715 phy_stop(net->phydev);
2717 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2718 netif_stop_queue(net);
2720 netif_info(dev, ifdown, dev->net,
2721 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2722 net->stats.rx_packets, net->stats.tx_packets,
2723 net->stats.rx_errors, net->stats.tx_errors);
2725 lan78xx_terminate_urbs(dev);
2727 usb_kill_urb(dev->urb_intr);
2729 skb_queue_purge(&dev->rxq_pause);
2731 /* deferred work (task, timer, softirq) must also stop.
2732 * can't flush_scheduled_work() until we drop rtnl (later),
2733 * else workers could deadlock; so make workers a NOP.
2736 cancel_delayed_work_sync(&dev->wq);
2737 tasklet_kill(&dev->bh);
2739 usb_autopm_put_interface(dev->intf);
2744 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2745 struct sk_buff *skb, gfp_t flags)
2747 u32 tx_cmd_a, tx_cmd_b;
2749 if (skb_cow_head(skb, TX_OVERHEAD)) {
2750 dev_kfree_skb_any(skb);
2754 if (skb_linearize(skb)) {
2755 dev_kfree_skb_any(skb);
2759 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2761 if (skb->ip_summed == CHECKSUM_PARTIAL)
2762 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2765 if (skb_is_gso(skb)) {
2766 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2768 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2770 tx_cmd_a |= TX_CMD_A_LSO_;
2773 if (skb_vlan_tag_present(skb)) {
2774 tx_cmd_a |= TX_CMD_A_IVTG_;
2775 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2779 cpu_to_le32s(&tx_cmd_b);
2780 memcpy(skb->data, &tx_cmd_b, 4);
2783 cpu_to_le32s(&tx_cmd_a);
2784 memcpy(skb->data, &tx_cmd_a, 4);
2789 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2790 struct sk_buff_head *list, enum skb_state state)
2792 unsigned long flags;
2793 enum skb_state old_state;
2794 struct skb_data *entry = (struct skb_data *)skb->cb;
2796 spin_lock_irqsave(&list->lock, flags);
2797 old_state = entry->state;
2798 entry->state = state;
2800 __skb_unlink(skb, list);
2801 spin_unlock(&list->lock);
2802 spin_lock(&dev->done.lock);
2804 __skb_queue_tail(&dev->done, skb);
2805 if (skb_queue_len(&dev->done) == 1)
2806 tasklet_schedule(&dev->bh);
2807 spin_unlock_irqrestore(&dev->done.lock, flags);
2812 static void tx_complete(struct urb *urb)
2814 struct sk_buff *skb = (struct sk_buff *)urb->context;
2815 struct skb_data *entry = (struct skb_data *)skb->cb;
2816 struct lan78xx_net *dev = entry->dev;
2818 if (urb->status == 0) {
2819 dev->net->stats.tx_packets += entry->num_of_packet;
2820 dev->net->stats.tx_bytes += entry->length;
2822 dev->net->stats.tx_errors++;
2824 switch (urb->status) {
2826 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2829 /* software-driven interface shutdown */
2837 netif_stop_queue(dev->net);
2840 netif_dbg(dev, tx_err, dev->net,
2841 "tx err %d\n", entry->urb->status);
2846 usb_autopm_put_interface_async(dev->intf);
2848 defer_bh(dev, skb, &dev->txq, tx_done);
2851 static void lan78xx_queue_skb(struct sk_buff_head *list,
2852 struct sk_buff *newsk, enum skb_state state)
2854 struct skb_data *entry = (struct skb_data *)newsk->cb;
2856 __skb_queue_tail(list, newsk);
2857 entry->state = state;
2861 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2863 struct lan78xx_net *dev = netdev_priv(net);
2864 struct sk_buff *skb2 = NULL;
2867 skb_tx_timestamp(skb);
2868 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2872 skb_queue_tail(&dev->txq_pend, skb2);
2874 /* throttle TX patch at slower than SUPER SPEED USB */
2875 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2876 (skb_queue_len(&dev->txq_pend) > 10))
2877 netif_stop_queue(net);
2879 netif_dbg(dev, tx_err, dev->net,
2880 "lan78xx_tx_prep return NULL\n");
2881 dev->net->stats.tx_errors++;
2882 dev->net->stats.tx_dropped++;
2885 tasklet_schedule(&dev->bh);
2887 return NETDEV_TX_OK;
2890 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2892 struct lan78xx_priv *pdata = NULL;
2896 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2898 pdata = (struct lan78xx_priv *)(dev->data[0]);
2900 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2906 spin_lock_init(&pdata->rfe_ctl_lock);
2907 mutex_init(&pdata->dataport_mutex);
2909 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2911 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2912 pdata->vlan_table[i] = 0;
2914 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2916 dev->net->features = 0;
2918 if (DEFAULT_TX_CSUM_ENABLE)
2919 dev->net->features |= NETIF_F_HW_CSUM;
2921 if (DEFAULT_RX_CSUM_ENABLE)
2922 dev->net->features |= NETIF_F_RXCSUM;
2924 if (DEFAULT_TSO_CSUM_ENABLE)
2925 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2927 if (DEFAULT_VLAN_RX_OFFLOAD)
2928 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2930 if (DEFAULT_VLAN_FILTER_ENABLE)
2931 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2933 dev->net->hw_features = dev->net->features;
2935 ret = lan78xx_setup_irq_domain(dev);
2937 netdev_warn(dev->net,
2938 "lan78xx_setup_irq_domain() failed : %d", ret);
2942 dev->net->hard_header_len += TX_OVERHEAD;
2943 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2945 /* Init all registers */
2946 ret = lan78xx_reset(dev);
2948 netdev_warn(dev->net, "Registers INIT FAILED....");
2952 ret = lan78xx_mdio_init(dev);
2954 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2958 dev->net->flags |= IFF_MULTICAST;
2960 pdata->wol = WAKE_MAGIC;
2965 lan78xx_remove_irq_domain(dev);
2968 netdev_warn(dev->net, "Bind routine FAILED");
2969 cancel_work_sync(&pdata->set_multicast);
2970 cancel_work_sync(&pdata->set_vlan);
2975 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2977 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2979 lan78xx_remove_irq_domain(dev);
2981 lan78xx_remove_mdio(dev);
2984 cancel_work_sync(&pdata->set_multicast);
2985 cancel_work_sync(&pdata->set_vlan);
2986 netif_dbg(dev, ifdown, dev->net, "free pdata");
2993 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2994 struct sk_buff *skb,
2995 u32 rx_cmd_a, u32 rx_cmd_b)
2997 /* HW Checksum offload appears to be flawed if used when not stripping
2998 * VLAN headers. Drop back to S/W checksums under these conditions.
3000 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3001 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3002 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3003 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3004 skb->ip_summed = CHECKSUM_NONE;
3006 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3007 skb->ip_summed = CHECKSUM_COMPLETE;
3011 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3012 struct sk_buff *skb,
3013 u32 rx_cmd_a, u32 rx_cmd_b)
3015 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3016 (rx_cmd_a & RX_CMD_A_FVTG_))
3017 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3018 (rx_cmd_b & 0xffff));
3021 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3025 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3026 skb_queue_tail(&dev->rxq_pause, skb);
3030 dev->net->stats.rx_packets++;
3031 dev->net->stats.rx_bytes += skb->len;
3033 skb->protocol = eth_type_trans(skb, dev->net);
3035 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3036 skb->len + sizeof(struct ethhdr), skb->protocol);
3037 memset(skb->cb, 0, sizeof(struct skb_data));
3039 if (skb_defer_rx_timestamp(skb))
3042 status = netif_rx(skb);
3043 if (status != NET_RX_SUCCESS)
3044 netif_dbg(dev, rx_err, dev->net,
3045 "netif_rx status %d\n", status);
3048 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3050 if (skb->len < dev->net->hard_header_len)
3053 while (skb->len > 0) {
3054 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3056 struct sk_buff *skb2;
3057 unsigned char *packet;
3059 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
3060 le32_to_cpus(&rx_cmd_a);
3061 skb_pull(skb, sizeof(rx_cmd_a));
3063 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
3064 le32_to_cpus(&rx_cmd_b);
3065 skb_pull(skb, sizeof(rx_cmd_b));
3067 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
3068 le16_to_cpus(&rx_cmd_c);
3069 skb_pull(skb, sizeof(rx_cmd_c));
3073 /* get the packet length */
3074 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3075 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3077 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3078 netif_dbg(dev, rx_err, dev->net,
3079 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3081 /* last frame in this batch */
3082 if (skb->len == size) {
3083 lan78xx_rx_csum_offload(dev, skb,
3084 rx_cmd_a, rx_cmd_b);
3085 lan78xx_rx_vlan_offload(dev, skb,
3086 rx_cmd_a, rx_cmd_b);
3088 skb_trim(skb, skb->len - 4); /* remove fcs */
3089 skb->truesize = size + sizeof(struct sk_buff);
3094 skb2 = skb_clone(skb, GFP_ATOMIC);
3095 if (unlikely(!skb2)) {
3096 netdev_warn(dev->net, "Error allocating skb");
3101 skb2->data = packet;
3102 skb_set_tail_pointer(skb2, size);
3104 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3105 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3107 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3108 skb2->truesize = size + sizeof(struct sk_buff);
3110 lan78xx_skb_return(dev, skb2);
3113 skb_pull(skb, size);
3115 /* padding bytes before the next frame starts */
3117 skb_pull(skb, align_count);
3123 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3125 if (!lan78xx_rx(dev, skb)) {
3126 dev->net->stats.rx_errors++;
3131 lan78xx_skb_return(dev, skb);
3135 netif_dbg(dev, rx_err, dev->net, "drop\n");
3136 dev->net->stats.rx_errors++;
3138 skb_queue_tail(&dev->done, skb);
3141 static void rx_complete(struct urb *urb);
3143 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3145 struct sk_buff *skb;
3146 struct skb_data *entry;
3147 unsigned long lockflags;
3148 size_t size = dev->rx_urb_size;
3151 skb = netdev_alloc_skb_ip_align(dev->net, size);
3157 entry = (struct skb_data *)skb->cb;
3162 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3163 skb->data, size, rx_complete, skb);
3165 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3167 if (netif_device_present(dev->net) &&
3168 netif_running(dev->net) &&
3169 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3170 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3171 ret = usb_submit_urb(urb, GFP_ATOMIC);
3174 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3177 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3180 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3181 netif_device_detach(dev->net);
3187 netif_dbg(dev, rx_err, dev->net,
3188 "rx submit, %d\n", ret);
3189 tasklet_schedule(&dev->bh);
3192 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3195 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3197 dev_kfree_skb_any(skb);
3203 static void rx_complete(struct urb *urb)
3205 struct sk_buff *skb = (struct sk_buff *)urb->context;
3206 struct skb_data *entry = (struct skb_data *)skb->cb;
3207 struct lan78xx_net *dev = entry->dev;
3208 int urb_status = urb->status;
3209 enum skb_state state;
3211 skb_put(skb, urb->actual_length);
3215 switch (urb_status) {
3217 if (skb->len < dev->net->hard_header_len) {
3219 dev->net->stats.rx_errors++;
3220 dev->net->stats.rx_length_errors++;
3221 netif_dbg(dev, rx_err, dev->net,
3222 "rx length %d\n", skb->len);
3224 usb_mark_last_busy(dev->udev);
3227 dev->net->stats.rx_errors++;
3228 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3230 case -ECONNRESET: /* async unlink */
3231 case -ESHUTDOWN: /* hardware gone */
3232 netif_dbg(dev, ifdown, dev->net,
3233 "rx shutdown, code %d\n", urb_status);
3241 dev->net->stats.rx_errors++;
3247 /* data overrun ... flush fifo? */
3249 dev->net->stats.rx_over_errors++;
3254 dev->net->stats.rx_errors++;
3255 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3259 state = defer_bh(dev, skb, &dev->rxq, state);
3262 if (netif_running(dev->net) &&
3263 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3264 state != unlink_start) {
3265 rx_submit(dev, urb, GFP_ATOMIC);
3270 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3273 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3276 struct urb *urb = NULL;
3277 struct skb_data *entry;
3278 unsigned long flags;
3279 struct sk_buff_head *tqp = &dev->txq_pend;
3280 struct sk_buff *skb, *skb2;
3283 int skb_totallen, pkt_cnt;
3289 spin_lock_irqsave(&tqp->lock, flags);
3290 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3291 if (skb_is_gso(skb)) {
3293 /* handle previous packets first */
3297 length = skb->len - TX_OVERHEAD;
3298 __skb_unlink(skb, tqp);
3299 spin_unlock_irqrestore(&tqp->lock, flags);
3303 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3305 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3308 spin_unlock_irqrestore(&tqp->lock, flags);
3310 /* copy to a single skb */
3311 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3315 skb_put(skb, skb_totallen);
3317 for (count = pos = 0; count < pkt_cnt; count++) {
3318 skb2 = skb_dequeue(tqp);
3320 length += (skb2->len - TX_OVERHEAD);
3321 memcpy(skb->data + pos, skb2->data, skb2->len);
3322 pos += roundup(skb2->len, sizeof(u32));
3323 dev_kfree_skb(skb2);
3328 urb = usb_alloc_urb(0, GFP_ATOMIC);
3332 entry = (struct skb_data *)skb->cb;
3335 entry->length = length;
3336 entry->num_of_packet = count;
3338 spin_lock_irqsave(&dev->txq.lock, flags);
3339 ret = usb_autopm_get_interface_async(dev->intf);
3341 spin_unlock_irqrestore(&dev->txq.lock, flags);
3345 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3346 skb->data, skb->len, tx_complete, skb);
3348 if (length % dev->maxpacket == 0) {
3349 /* send USB_ZERO_PACKET */
3350 urb->transfer_flags |= URB_ZERO_PACKET;
3354 /* if this triggers the device is still a sleep */
3355 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3356 /* transmission will be done in resume */
3357 usb_anchor_urb(urb, &dev->deferred);
3358 /* no use to process more packets */
3359 netif_stop_queue(dev->net);
3361 spin_unlock_irqrestore(&dev->txq.lock, flags);
3362 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3367 ret = usb_submit_urb(urb, GFP_ATOMIC);
3370 netif_trans_update(dev->net);
3371 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3372 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3373 netif_stop_queue(dev->net);
3376 netif_stop_queue(dev->net);
3377 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3378 usb_autopm_put_interface_async(dev->intf);
3381 usb_autopm_put_interface_async(dev->intf);
3382 netif_dbg(dev, tx_err, dev->net,
3383 "tx: submit urb err %d\n", ret);
3387 spin_unlock_irqrestore(&dev->txq.lock, flags);
3390 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3392 dev->net->stats.tx_dropped++;
3394 dev_kfree_skb_any(skb);
3397 netif_dbg(dev, tx_queued, dev->net,
3398 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3401 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3406 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3407 for (i = 0; i < 10; i++) {
3408 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3410 urb = usb_alloc_urb(0, GFP_ATOMIC);
3412 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3416 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3417 tasklet_schedule(&dev->bh);
3419 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3420 netif_wake_queue(dev->net);
3423 static void lan78xx_bh(unsigned long param)
3425 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3426 struct sk_buff *skb;
3427 struct skb_data *entry;
3429 while ((skb = skb_dequeue(&dev->done))) {
3430 entry = (struct skb_data *)(skb->cb);
3431 switch (entry->state) {
3433 entry->state = rx_cleanup;
3434 rx_process(dev, skb);
3437 usb_free_urb(entry->urb);
3441 usb_free_urb(entry->urb);
3445 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3450 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3451 /* reset update timer delta */
3452 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3454 mod_timer(&dev->stat_monitor,
3455 jiffies + STAT_UPDATE_TIMER);
3458 if (!skb_queue_empty(&dev->txq_pend))
3461 if (!timer_pending(&dev->delay) &&
3462 !test_bit(EVENT_RX_HALT, &dev->flags))
3467 static void lan78xx_delayedwork(struct work_struct *work)
3470 struct lan78xx_net *dev;
3472 dev = container_of(work, struct lan78xx_net, wq.work);
3474 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3475 unlink_urbs(dev, &dev->txq);
3476 status = usb_autopm_get_interface(dev->intf);
3479 status = usb_clear_halt(dev->udev, dev->pipe_out);
3480 usb_autopm_put_interface(dev->intf);
3483 status != -ESHUTDOWN) {
3484 if (netif_msg_tx_err(dev))
3486 netdev_err(dev->net,
3487 "can't clear tx halt, status %d\n",
3490 clear_bit(EVENT_TX_HALT, &dev->flags);
3491 if (status != -ESHUTDOWN)
3492 netif_wake_queue(dev->net);
3495 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3496 unlink_urbs(dev, &dev->rxq);
3497 status = usb_autopm_get_interface(dev->intf);
3500 status = usb_clear_halt(dev->udev, dev->pipe_in);
3501 usb_autopm_put_interface(dev->intf);
3504 status != -ESHUTDOWN) {
3505 if (netif_msg_rx_err(dev))
3507 netdev_err(dev->net,
3508 "can't clear rx halt, status %d\n",
3511 clear_bit(EVENT_RX_HALT, &dev->flags);
3512 tasklet_schedule(&dev->bh);
3516 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3519 clear_bit(EVENT_LINK_RESET, &dev->flags);
3520 status = usb_autopm_get_interface(dev->intf);
3523 if (lan78xx_link_reset(dev) < 0) {
3524 usb_autopm_put_interface(dev->intf);
3526 netdev_info(dev->net, "link reset failed (%d)\n",
3529 usb_autopm_put_interface(dev->intf);
3533 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3534 lan78xx_update_stats(dev);
3536 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3538 mod_timer(&dev->stat_monitor,
3539 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3541 dev->delta = min((dev->delta * 2), 50);
3545 static void intr_complete(struct urb *urb)
3547 struct lan78xx_net *dev = urb->context;
3548 int status = urb->status;
3553 lan78xx_status(dev, urb);
3556 /* software-driven interface shutdown */
3557 case -ENOENT: /* urb killed */
3558 case -ESHUTDOWN: /* hardware gone */
3559 netif_dbg(dev, ifdown, dev->net,
3560 "intr shutdown, code %d\n", status);
3563 /* NOTE: not throttling like RX/TX, since this endpoint
3564 * already polls infrequently
3567 netdev_dbg(dev->net, "intr status %d\n", status);
3571 if (!netif_running(dev->net))
3574 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3575 status = usb_submit_urb(urb, GFP_ATOMIC);
3577 netif_err(dev, timer, dev->net,
3578 "intr resubmit --> %d\n", status);
3581 static void lan78xx_disconnect(struct usb_interface *intf)
3583 struct lan78xx_net *dev;
3584 struct usb_device *udev;
3585 struct net_device *net;
3586 struct phy_device *phydev;
3588 dev = usb_get_intfdata(intf);
3589 usb_set_intfdata(intf, NULL);
3593 udev = interface_to_usbdev(intf);
3595 phydev = net->phydev;
3597 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3598 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3600 phy_disconnect(net->phydev);
3602 if (phy_is_pseudo_fixed_link(phydev))
3603 fixed_phy_unregister(phydev);
3605 unregister_netdev(net);
3607 cancel_delayed_work_sync(&dev->wq);
3609 usb_scuttle_anchored_urbs(&dev->deferred);
3611 lan78xx_unbind(dev, intf);
3613 usb_kill_urb(dev->urb_intr);
3614 usb_free_urb(dev->urb_intr);
3620 static void lan78xx_tx_timeout(struct net_device *net)
3622 struct lan78xx_net *dev = netdev_priv(net);
3624 unlink_urbs(dev, &dev->txq);
3625 tasklet_schedule(&dev->bh);
3628 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3629 struct net_device *netdev,
3630 netdev_features_t features)
3632 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3633 features &= ~NETIF_F_GSO_MASK;
3635 features = vlan_features_check(skb, features);
3636 features = vxlan_features_check(skb, features);
3641 static const struct net_device_ops lan78xx_netdev_ops = {
3642 .ndo_open = lan78xx_open,
3643 .ndo_stop = lan78xx_stop,
3644 .ndo_start_xmit = lan78xx_start_xmit,
3645 .ndo_tx_timeout = lan78xx_tx_timeout,
3646 .ndo_change_mtu = lan78xx_change_mtu,
3647 .ndo_set_mac_address = lan78xx_set_mac_addr,
3648 .ndo_validate_addr = eth_validate_addr,
3649 .ndo_do_ioctl = lan78xx_ioctl,
3650 .ndo_set_rx_mode = lan78xx_set_multicast,
3651 .ndo_set_features = lan78xx_set_features,
3652 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3653 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3654 .ndo_features_check = lan78xx_features_check,
3657 static void lan78xx_stat_monitor(struct timer_list *t)
3659 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3661 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3664 static int lan78xx_probe(struct usb_interface *intf,
3665 const struct usb_device_id *id)
3667 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3668 struct lan78xx_net *dev;
3669 struct net_device *netdev;
3670 struct usb_device *udev;
3676 udev = interface_to_usbdev(intf);
3677 udev = usb_get_dev(udev);
3679 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3681 dev_err(&intf->dev, "Error: OOM\n");
3686 /* netdev_printk() needs this */
3687 SET_NETDEV_DEV(netdev, &intf->dev);
3689 dev = netdev_priv(netdev);
3693 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3694 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3696 skb_queue_head_init(&dev->rxq);
3697 skb_queue_head_init(&dev->txq);
3698 skb_queue_head_init(&dev->done);
3699 skb_queue_head_init(&dev->rxq_pause);
3700 skb_queue_head_init(&dev->txq_pend);
3701 mutex_init(&dev->phy_mutex);
3703 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3704 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3705 init_usb_anchor(&dev->deferred);
3707 netdev->netdev_ops = &lan78xx_netdev_ops;
3708 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3709 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3712 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3714 mutex_init(&dev->stats.access_lock);
3716 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3721 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3722 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3723 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3728 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3729 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3730 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3735 ep_intr = &intf->cur_altsetting->endpoint[2];
3736 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3741 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3742 usb_endpoint_num(&ep_intr->desc));
3744 ret = lan78xx_bind(dev, intf);
3747 strcpy(netdev->name, "eth%d");
3749 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3750 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3752 /* MTU range: 68 - 9000 */
3753 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3754 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3756 period = ep_intr->desc.bInterval;
3757 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3758 buf = kmalloc(maxp, GFP_KERNEL);
3760 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3761 if (!dev->urb_intr) {
3766 usb_fill_int_urb(dev->urb_intr, dev->udev,
3767 dev->pipe_intr, buf, maxp,
3768 intr_complete, dev, period);
3769 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3773 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3775 /* driver requires remote-wakeup capability during autosuspend. */
3776 intf->needs_remote_wakeup = 1;
3778 ret = lan78xx_phy_init(dev);
3782 ret = register_netdev(netdev);
3784 netif_err(dev, probe, netdev, "couldn't register the device\n");
3788 usb_set_intfdata(intf, dev);
3790 ret = device_set_wakeup_enable(&udev->dev, true);
3792 /* Default delay of 2sec has more overhead than advantage.
3793 * Set to 10sec as default.
3795 pm_runtime_set_autosuspend_delay(&udev->dev,
3796 DEFAULT_AUTOSUSPEND_DELAY);
3801 phy_disconnect(netdev->phydev);
3803 usb_free_urb(dev->urb_intr);
3805 lan78xx_unbind(dev, intf);
3807 free_netdev(netdev);
3814 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3816 const u16 crc16poly = 0x8005;
3822 for (i = 0; i < len; i++) {
3824 for (bit = 0; bit < 8; bit++) {
3828 if (msb ^ (u16)(data & 1)) {
3830 crc |= (u16)0x0001U;
3839 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3847 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3848 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3849 const u8 arp_type[2] = { 0x08, 0x06 };
3851 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3852 buf &= ~MAC_TX_TXEN_;
3853 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3854 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3855 buf &= ~MAC_RX_RXEN_;
3856 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3858 ret = lan78xx_write_reg(dev, WUCSR, 0);
3859 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3860 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3865 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3866 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3867 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3869 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3870 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3873 if (wol & WAKE_PHY) {
3874 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3876 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3877 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3878 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3880 if (wol & WAKE_MAGIC) {
3881 temp_wucsr |= WUCSR_MPEN_;
3883 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3884 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3885 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3887 if (wol & WAKE_BCAST) {
3888 temp_wucsr |= WUCSR_BCST_EN_;
3890 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3891 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3892 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3894 if (wol & WAKE_MCAST) {
3895 temp_wucsr |= WUCSR_WAKE_EN_;
3897 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3898 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3899 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3901 WUF_CFGX_TYPE_MCAST_ |
3902 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3903 (crc & WUF_CFGX_CRC16_MASK_));
3905 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3906 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3907 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3908 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3911 /* for IPv6 Multicast */
3912 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3913 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3915 WUF_CFGX_TYPE_MCAST_ |
3916 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3917 (crc & WUF_CFGX_CRC16_MASK_));
3919 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3920 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3921 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3922 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3925 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3926 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3927 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3929 if (wol & WAKE_UCAST) {
3930 temp_wucsr |= WUCSR_PFDA_EN_;
3932 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3933 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3934 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3936 if (wol & WAKE_ARP) {
3937 temp_wucsr |= WUCSR_WAKE_EN_;
3939 /* set WUF_CFG & WUF_MASK
3940 * for packettype (offset 12,13) = ARP (0x0806)
3942 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3943 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3945 WUF_CFGX_TYPE_ALL_ |
3946 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3947 (crc & WUF_CFGX_CRC16_MASK_));
3949 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3950 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3951 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3952 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3955 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3956 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3957 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3960 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3962 /* when multiple WOL bits are set */
3963 if (hweight_long((unsigned long)wol) > 1) {
3964 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3965 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3966 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3968 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3971 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3972 buf |= PMT_CTL_WUPS_MASK_;
3973 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3975 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3976 buf |= MAC_RX_RXEN_;
3977 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3982 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3984 struct lan78xx_net *dev = usb_get_intfdata(intf);
3985 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3990 event = message.event;
3992 if (!dev->suspend_count++) {
3993 spin_lock_irq(&dev->txq.lock);
3994 /* don't autosuspend while transmitting */
3995 if ((skb_queue_len(&dev->txq) ||
3996 skb_queue_len(&dev->txq_pend)) &&
3997 PMSG_IS_AUTO(message)) {
3998 spin_unlock_irq(&dev->txq.lock);
4002 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4003 spin_unlock_irq(&dev->txq.lock);
4007 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4008 buf &= ~MAC_TX_TXEN_;
4009 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4010 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4011 buf &= ~MAC_RX_RXEN_;
4012 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4014 /* empty out the rx and queues */
4015 netif_device_detach(dev->net);
4016 lan78xx_terminate_urbs(dev);
4017 usb_kill_urb(dev->urb_intr);
4020 netif_device_attach(dev->net);
4023 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4024 del_timer(&dev->stat_monitor);
4026 if (PMSG_IS_AUTO(message)) {
4027 /* auto suspend (selective suspend) */
4028 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4029 buf &= ~MAC_TX_TXEN_;
4030 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4031 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4032 buf &= ~MAC_RX_RXEN_;
4033 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4035 ret = lan78xx_write_reg(dev, WUCSR, 0);
4036 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4037 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4039 /* set goodframe wakeup */
4040 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4042 buf |= WUCSR_RFE_WAKE_EN_;
4043 buf |= WUCSR_STORE_WAKE_;
4045 ret = lan78xx_write_reg(dev, WUCSR, buf);
4047 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4049 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4050 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4052 buf |= PMT_CTL_PHY_WAKE_EN_;
4053 buf |= PMT_CTL_WOL_EN_;
4054 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4055 buf |= PMT_CTL_SUS_MODE_3_;
4057 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4059 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4061 buf |= PMT_CTL_WUPS_MASK_;
4063 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4065 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4066 buf |= MAC_RX_RXEN_;
4067 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4069 lan78xx_set_suspend(dev, pdata->wol);
4078 static int lan78xx_resume(struct usb_interface *intf)
4080 struct lan78xx_net *dev = usb_get_intfdata(intf);
4081 struct sk_buff *skb;
4086 if (!timer_pending(&dev->stat_monitor)) {
4088 mod_timer(&dev->stat_monitor,
4089 jiffies + STAT_UPDATE_TIMER);
4092 if (!--dev->suspend_count) {
4093 /* resume interrupt URBs */
4094 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4095 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4097 spin_lock_irq(&dev->txq.lock);
4098 while ((res = usb_get_from_anchor(&dev->deferred))) {
4099 skb = (struct sk_buff *)res->context;
4100 ret = usb_submit_urb(res, GFP_ATOMIC);
4102 dev_kfree_skb_any(skb);
4104 usb_autopm_put_interface_async(dev->intf);
4106 netif_trans_update(dev->net);
4107 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4111 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4112 spin_unlock_irq(&dev->txq.lock);
4114 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4115 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4116 netif_start_queue(dev->net);
4117 tasklet_schedule(&dev->bh);
4121 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4122 ret = lan78xx_write_reg(dev, WUCSR, 0);
4123 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4125 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4127 WUCSR2_IPV6_TCPSYN_RCD_ |
4128 WUCSR2_IPV4_TCPSYN_RCD_);
4130 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4131 WUCSR_EEE_RX_WAKE_ |
4133 WUCSR_RFE_WAKE_FR_ |
4138 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4139 buf |= MAC_TX_TXEN_;
4140 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4145 static int lan78xx_reset_resume(struct usb_interface *intf)
4147 struct lan78xx_net *dev = usb_get_intfdata(intf);
4151 phy_start(dev->net->phydev);
4153 return lan78xx_resume(intf);
4156 static const struct usb_device_id products[] = {
4158 /* LAN7800 USB Gigabit Ethernet Device */
4159 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4162 /* LAN7850 USB Gigabit Ethernet Device */
4163 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4166 /* LAN7801 USB Gigabit Ethernet Device */
4167 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4171 MODULE_DEVICE_TABLE(usb, products);
4173 static struct usb_driver lan78xx_driver = {
4174 .name = DRIVER_NAME,
4175 .id_table = products,
4176 .probe = lan78xx_probe,
4177 .disconnect = lan78xx_disconnect,
4178 .suspend = lan78xx_suspend,
4179 .resume = lan78xx_resume,
4180 .reset_resume = lan78xx_reset_resume,
4181 .supports_autosuspend = 1,
4182 .disable_hub_initiated_lpm = 1,
4185 module_usb_driver(lan78xx_driver);
4187 MODULE_AUTHOR(DRIVER_AUTHOR);
4188 MODULE_DESCRIPTION(DRIVER_DESC);
4189 MODULE_LICENSE("GPL");