1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2015 Microchip Technology
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/vxlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/irq.h>
27 #include <linux/irqchip/chained_irq.h>
28 #include <linux/microchipphy.h>
29 #include <linux/phy_fixed.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
34 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME "lan78xx"
38 #define TX_TIMEOUT_JIFFIES (5 * HZ)
39 #define THROTTLE_JIFFIES (HZ / 8)
40 #define UNLINK_TIMEOUT_MS 3
42 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
44 #define SS_USB_PKT_SIZE (1024)
45 #define HS_USB_PKT_SIZE (512)
46 #define FS_USB_PKT_SIZE (64)
48 #define MAX_RX_FIFO_SIZE (12 * 1024)
49 #define MAX_TX_FIFO_SIZE (12 * 1024)
50 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
51 #define DEFAULT_BULK_IN_DELAY (0x0800)
52 #define MAX_SINGLE_PACKET_SIZE (9000)
53 #define DEFAULT_TX_CSUM_ENABLE (true)
54 #define DEFAULT_RX_CSUM_ENABLE (true)
55 #define DEFAULT_TSO_CSUM_ENABLE (true)
56 #define DEFAULT_VLAN_FILTER_ENABLE (true)
57 #define DEFAULT_VLAN_RX_OFFLOAD (true)
58 #define TX_OVERHEAD (8)
61 #define LAN78XX_USB_VENDOR_ID (0x0424)
62 #define LAN7800_USB_PRODUCT_ID (0x7800)
63 #define LAN7850_USB_PRODUCT_ID (0x7850)
64 #define LAN7801_USB_PRODUCT_ID (0x7801)
65 #define LAN78XX_EEPROM_MAGIC (0x78A5)
66 #define LAN78XX_OTP_MAGIC (0x78F3)
67 #define AT29M2AF_USB_VENDOR_ID (0x07C9)
68 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
73 #define EEPROM_INDICATOR (0xA5)
74 #define EEPROM_MAC_OFFSET (0x01)
75 #define MAX_EEPROM_SIZE 512
76 #define OTP_INDICATOR_1 (0xF3)
77 #define OTP_INDICATOR_2 (0xF7)
79 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
80 WAKE_MCAST | WAKE_BCAST | \
81 WAKE_ARP | WAKE_MAGIC)
83 /* USB related defines */
84 #define BULK_IN_PIPE 1
85 #define BULK_OUT_PIPE 2
87 /* default autosuspend delay (mSec)*/
88 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
90 /* statistic update interval (mSec) */
91 #define STAT_UPDATE_TIMER (1 * 1000)
93 /* defines interrupts from interrupt EP */
94 #define MAX_INT_EP (32)
95 #define INT_EP_INTEP (31)
96 #define INT_EP_OTP_WR_DONE (28)
97 #define INT_EP_EEE_TX_LPI_START (26)
98 #define INT_EP_EEE_TX_LPI_STOP (25)
99 #define INT_EP_EEE_RX_LPI (24)
100 #define INT_EP_MAC_RESET_TIMEOUT (23)
101 #define INT_EP_RDFO (22)
102 #define INT_EP_TXE (21)
103 #define INT_EP_USB_STATUS (20)
104 #define INT_EP_TX_DIS (19)
105 #define INT_EP_RX_DIS (18)
106 #define INT_EP_PHY (17)
107 #define INT_EP_DP (16)
108 #define INT_EP_MAC_ERR (15)
109 #define INT_EP_TDFU (14)
110 #define INT_EP_TDFO (13)
111 #define INT_EP_UTX (12)
112 #define INT_EP_GPIO_11 (11)
113 #define INT_EP_GPIO_10 (10)
114 #define INT_EP_GPIO_9 (9)
115 #define INT_EP_GPIO_8 (8)
116 #define INT_EP_GPIO_7 (7)
117 #define INT_EP_GPIO_6 (6)
118 #define INT_EP_GPIO_5 (5)
119 #define INT_EP_GPIO_4 (4)
120 #define INT_EP_GPIO_3 (3)
121 #define INT_EP_GPIO_2 (2)
122 #define INT_EP_GPIO_1 (1)
123 #define INT_EP_GPIO_0 (0)
125 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
127 "RX Alignment Errors",
128 "Rx Fragment Errors",
130 "RX Undersize Frame Errors",
131 "RX Oversize Frame Errors",
133 "RX Unicast Byte Count",
134 "RX Broadcast Byte Count",
135 "RX Multicast Byte Count",
137 "RX Broadcast Frames",
138 "RX Multicast Frames",
141 "RX 65 - 127 Byte Frames",
142 "RX 128 - 255 Byte Frames",
143 "RX 256 - 511 Bytes Frames",
144 "RX 512 - 1023 Byte Frames",
145 "RX 1024 - 1518 Byte Frames",
146 "RX Greater 1518 Byte Frames",
147 "EEE RX LPI Transitions",
150 "TX Excess Deferral Errors",
153 "TX Single Collisions",
154 "TX Multiple Collisions",
155 "TX Excessive Collision",
156 "TX Late Collisions",
157 "TX Unicast Byte Count",
158 "TX Broadcast Byte Count",
159 "TX Multicast Byte Count",
161 "TX Broadcast Frames",
162 "TX Multicast Frames",
165 "TX 65 - 127 Byte Frames",
166 "TX 128 - 255 Byte Frames",
167 "TX 256 - 511 Bytes Frames",
168 "TX 512 - 1023 Byte Frames",
169 "TX 1024 - 1518 Byte Frames",
170 "TX Greater 1518 Byte Frames",
171 "EEE TX LPI Transitions",
175 struct lan78xx_statstage {
177 u32 rx_alignment_errors;
178 u32 rx_fragment_errors;
179 u32 rx_jabber_errors;
180 u32 rx_undersize_frame_errors;
181 u32 rx_oversize_frame_errors;
182 u32 rx_dropped_frames;
183 u32 rx_unicast_byte_count;
184 u32 rx_broadcast_byte_count;
185 u32 rx_multicast_byte_count;
186 u32 rx_unicast_frames;
187 u32 rx_broadcast_frames;
188 u32 rx_multicast_frames;
190 u32 rx_64_byte_frames;
191 u32 rx_65_127_byte_frames;
192 u32 rx_128_255_byte_frames;
193 u32 rx_256_511_bytes_frames;
194 u32 rx_512_1023_byte_frames;
195 u32 rx_1024_1518_byte_frames;
196 u32 rx_greater_1518_byte_frames;
197 u32 eee_rx_lpi_transitions;
200 u32 tx_excess_deferral_errors;
201 u32 tx_carrier_errors;
202 u32 tx_bad_byte_count;
203 u32 tx_single_collisions;
204 u32 tx_multiple_collisions;
205 u32 tx_excessive_collision;
206 u32 tx_late_collisions;
207 u32 tx_unicast_byte_count;
208 u32 tx_broadcast_byte_count;
209 u32 tx_multicast_byte_count;
210 u32 tx_unicast_frames;
211 u32 tx_broadcast_frames;
212 u32 tx_multicast_frames;
214 u32 tx_64_byte_frames;
215 u32 tx_65_127_byte_frames;
216 u32 tx_128_255_byte_frames;
217 u32 tx_256_511_bytes_frames;
218 u32 tx_512_1023_byte_frames;
219 u32 tx_1024_1518_byte_frames;
220 u32 tx_greater_1518_byte_frames;
221 u32 eee_tx_lpi_transitions;
225 struct lan78xx_statstage64 {
227 u64 rx_alignment_errors;
228 u64 rx_fragment_errors;
229 u64 rx_jabber_errors;
230 u64 rx_undersize_frame_errors;
231 u64 rx_oversize_frame_errors;
232 u64 rx_dropped_frames;
233 u64 rx_unicast_byte_count;
234 u64 rx_broadcast_byte_count;
235 u64 rx_multicast_byte_count;
236 u64 rx_unicast_frames;
237 u64 rx_broadcast_frames;
238 u64 rx_multicast_frames;
240 u64 rx_64_byte_frames;
241 u64 rx_65_127_byte_frames;
242 u64 rx_128_255_byte_frames;
243 u64 rx_256_511_bytes_frames;
244 u64 rx_512_1023_byte_frames;
245 u64 rx_1024_1518_byte_frames;
246 u64 rx_greater_1518_byte_frames;
247 u64 eee_rx_lpi_transitions;
250 u64 tx_excess_deferral_errors;
251 u64 tx_carrier_errors;
252 u64 tx_bad_byte_count;
253 u64 tx_single_collisions;
254 u64 tx_multiple_collisions;
255 u64 tx_excessive_collision;
256 u64 tx_late_collisions;
257 u64 tx_unicast_byte_count;
258 u64 tx_broadcast_byte_count;
259 u64 tx_multicast_byte_count;
260 u64 tx_unicast_frames;
261 u64 tx_broadcast_frames;
262 u64 tx_multicast_frames;
264 u64 tx_64_byte_frames;
265 u64 tx_65_127_byte_frames;
266 u64 tx_128_255_byte_frames;
267 u64 tx_256_511_bytes_frames;
268 u64 tx_512_1023_byte_frames;
269 u64 tx_1024_1518_byte_frames;
270 u64 tx_greater_1518_byte_frames;
271 u64 eee_tx_lpi_transitions;
275 static u32 lan78xx_regs[] = {
297 #define PHY_REG_SIZE (32 * sizeof(u32))
301 struct lan78xx_priv {
302 struct lan78xx_net *dev;
304 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
305 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
306 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
307 struct mutex dataport_mutex; /* for dataport access */
308 spinlock_t rfe_ctl_lock; /* for rfe register access */
309 struct work_struct set_multicast;
310 struct work_struct set_vlan;
324 struct skb_data { /* skb->cb is one of these */
326 struct lan78xx_net *dev;
327 enum skb_state state;
333 struct usb_ctrlrequest req;
334 struct lan78xx_net *dev;
337 #define EVENT_TX_HALT 0
338 #define EVENT_RX_HALT 1
339 #define EVENT_RX_MEMORY 2
340 #define EVENT_STS_SPLIT 3
341 #define EVENT_LINK_RESET 4
342 #define EVENT_RX_PAUSED 5
343 #define EVENT_DEV_WAKING 6
344 #define EVENT_DEV_ASLEEP 7
345 #define EVENT_DEV_OPEN 8
346 #define EVENT_STAT_UPDATE 9
349 struct mutex access_lock; /* for stats access */
350 struct lan78xx_statstage saved;
351 struct lan78xx_statstage rollover_count;
352 struct lan78xx_statstage rollover_max;
353 struct lan78xx_statstage64 curr_stat;
356 struct irq_domain_data {
357 struct irq_domain *irqdomain;
359 struct irq_chip *irqchip;
360 irq_flow_handler_t irq_handler;
362 struct mutex irq_lock; /* for irq bus access */
366 struct net_device *net;
367 struct usb_device *udev;
368 struct usb_interface *intf;
373 struct sk_buff_head rxq;
374 struct sk_buff_head txq;
375 struct sk_buff_head done;
376 struct sk_buff_head rxq_pause;
377 struct sk_buff_head txq_pend;
379 struct tasklet_struct bh;
380 struct delayed_work wq;
384 struct urb *urb_intr;
385 struct usb_anchor deferred;
387 struct mutex phy_mutex; /* for phy access */
388 unsigned pipe_in, pipe_out, pipe_intr;
390 u32 hard_mtu; /* count any extra framing */
391 size_t rx_urb_size; /* size for rx urbs */
395 wait_queue_head_t *wait;
396 unsigned char suspend_count;
399 struct timer_list delay;
400 struct timer_list stat_monitor;
402 unsigned long data[5];
409 struct mii_bus *mdiobus;
410 phy_interface_t interface;
413 u8 fc_request_control;
416 struct statstage stats;
418 struct irq_domain_data domain_data;
421 /* define external phy id */
422 #define PHY_LAN8835 (0x0007C130)
423 #define PHY_KSZ9031RNX (0x00221620)
425 /* use ethtool to change the level for any given device */
426 static int msg_level = -1;
427 module_param(msg_level, int, 0);
428 MODULE_PARM_DESC(msg_level, "Override default message level");
430 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
432 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
438 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
439 USB_VENDOR_REQUEST_READ_REGISTER,
440 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
441 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
442 if (likely(ret >= 0)) {
446 netdev_warn(dev->net,
447 "Failed to read register index 0x%08x. ret = %d",
456 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
458 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
467 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
468 USB_VENDOR_REQUEST_WRITE_REGISTER,
469 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
470 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
471 if (unlikely(ret < 0)) {
472 netdev_warn(dev->net,
473 "Failed to write register index 0x%08x. ret = %d",
482 static int lan78xx_read_stats(struct lan78xx_net *dev,
483 struct lan78xx_statstage *data)
487 struct lan78xx_statstage *stats;
491 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
495 ret = usb_control_msg(dev->udev,
496 usb_rcvctrlpipe(dev->udev, 0),
497 USB_VENDOR_REQUEST_GET_STATS,
498 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
503 USB_CTRL_SET_TIMEOUT);
504 if (likely(ret >= 0)) {
507 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
508 le32_to_cpus(&src[i]);
512 netdev_warn(dev->net,
513 "Failed to read stat ret = %d", ret);
521 #define check_counter_rollover(struct1, dev_stats, member) { \
522 if (struct1->member < dev_stats.saved.member) \
523 dev_stats.rollover_count.member++; \
526 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
527 struct lan78xx_statstage *stats)
529 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
530 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
531 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
532 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
533 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
534 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
535 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
536 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
537 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
538 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
539 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
540 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
541 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
542 check_counter_rollover(stats, dev->stats, rx_pause_frames);
543 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
544 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
545 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
546 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
547 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
548 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
549 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
550 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
551 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
552 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
553 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
554 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
555 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
556 check_counter_rollover(stats, dev->stats, tx_single_collisions);
557 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
558 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
559 check_counter_rollover(stats, dev->stats, tx_late_collisions);
560 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
561 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
562 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
563 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
564 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
565 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
566 check_counter_rollover(stats, dev->stats, tx_pause_frames);
567 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
568 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
569 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
570 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
571 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
572 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
573 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
574 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
575 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
577 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
580 static void lan78xx_update_stats(struct lan78xx_net *dev)
582 u32 *p, *count, *max;
585 struct lan78xx_statstage lan78xx_stats;
587 if (usb_autopm_get_interface(dev->intf) < 0)
590 p = (u32 *)&lan78xx_stats;
591 count = (u32 *)&dev->stats.rollover_count;
592 max = (u32 *)&dev->stats.rollover_max;
593 data = (u64 *)&dev->stats.curr_stat;
595 mutex_lock(&dev->stats.access_lock);
597 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
598 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
600 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
601 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
603 mutex_unlock(&dev->stats.access_lock);
605 usb_autopm_put_interface(dev->intf);
608 /* Loop until the read is completed with timeout called with phy_mutex held */
609 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
611 unsigned long start_time = jiffies;
616 ret = lan78xx_read_reg(dev, MII_ACC, &val);
617 if (unlikely(ret < 0))
620 if (!(val & MII_ACC_MII_BUSY_))
622 } while (!time_after(jiffies, start_time + HZ));
627 static inline u32 mii_access(int id, int index, int read)
631 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
632 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
634 ret |= MII_ACC_MII_READ_;
636 ret |= MII_ACC_MII_WRITE_;
637 ret |= MII_ACC_MII_BUSY_;
642 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
644 unsigned long start_time = jiffies;
649 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
650 if (unlikely(ret < 0))
653 if (!(val & E2P_CMD_EPC_BUSY_) ||
654 (val & E2P_CMD_EPC_TIMEOUT_))
656 usleep_range(40, 100);
657 } while (!time_after(jiffies, start_time + HZ));
659 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
660 netdev_warn(dev->net, "EEPROM read operation timeout");
667 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
669 unsigned long start_time = jiffies;
674 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
675 if (unlikely(ret < 0))
678 if (!(val & E2P_CMD_EPC_BUSY_))
681 usleep_range(40, 100);
682 } while (!time_after(jiffies, start_time + HZ));
684 netdev_warn(dev->net, "EEPROM is busy");
688 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
689 u32 length, u8 *data)
696 /* depends on chip, some EEPROM pins are muxed with LED function.
697 * disable & restore LED function to access EEPROM.
699 ret = lan78xx_read_reg(dev, HW_CFG, &val);
701 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
702 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
703 ret = lan78xx_write_reg(dev, HW_CFG, val);
706 retval = lan78xx_eeprom_confirm_not_busy(dev);
710 for (i = 0; i < length; i++) {
711 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
712 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
713 ret = lan78xx_write_reg(dev, E2P_CMD, val);
714 if (unlikely(ret < 0)) {
719 retval = lan78xx_wait_eeprom(dev);
723 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
724 if (unlikely(ret < 0)) {
729 data[i] = val & 0xFF;
735 if (dev->chipid == ID_REV_CHIP_ID_7800_)
736 ret = lan78xx_write_reg(dev, HW_CFG, saved);
741 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
742 u32 length, u8 *data)
747 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
748 if ((ret == 0) && (sig == EEPROM_INDICATOR))
749 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
756 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
757 u32 length, u8 *data)
764 /* depends on chip, some EEPROM pins are muxed with LED function.
765 * disable & restore LED function to access EEPROM.
767 ret = lan78xx_read_reg(dev, HW_CFG, &val);
769 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
770 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
771 ret = lan78xx_write_reg(dev, HW_CFG, val);
774 retval = lan78xx_eeprom_confirm_not_busy(dev);
778 /* Issue write/erase enable command */
779 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
780 ret = lan78xx_write_reg(dev, E2P_CMD, val);
781 if (unlikely(ret < 0)) {
786 retval = lan78xx_wait_eeprom(dev);
790 for (i = 0; i < length; i++) {
791 /* Fill data register */
793 ret = lan78xx_write_reg(dev, E2P_DATA, val);
799 /* Send "write" command */
800 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
801 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
802 ret = lan78xx_write_reg(dev, E2P_CMD, val);
808 retval = lan78xx_wait_eeprom(dev);
817 if (dev->chipid == ID_REV_CHIP_ID_7800_)
818 ret = lan78xx_write_reg(dev, HW_CFG, saved);
823 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
824 u32 length, u8 *data)
829 unsigned long timeout;
831 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
833 if (buf & OTP_PWR_DN_PWRDN_N_) {
834 /* clear it and wait to be cleared */
835 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
837 timeout = jiffies + HZ;
840 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
841 if (time_after(jiffies, timeout)) {
842 netdev_warn(dev->net,
843 "timeout on OTP_PWR_DN");
846 } while (buf & OTP_PWR_DN_PWRDN_N_);
849 for (i = 0; i < length; i++) {
850 ret = lan78xx_write_reg(dev, OTP_ADDR1,
851 ((offset + i) >> 8) & OTP_ADDR1_15_11);
852 ret = lan78xx_write_reg(dev, OTP_ADDR2,
853 ((offset + i) & OTP_ADDR2_10_3));
855 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
856 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
858 timeout = jiffies + HZ;
861 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
862 if (time_after(jiffies, timeout)) {
863 netdev_warn(dev->net,
864 "timeout on OTP_STATUS");
867 } while (buf & OTP_STATUS_BUSY_);
869 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
871 data[i] = (u8)(buf & 0xFF);
877 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
878 u32 length, u8 *data)
883 unsigned long timeout;
885 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
887 if (buf & OTP_PWR_DN_PWRDN_N_) {
888 /* clear it and wait to be cleared */
889 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
891 timeout = jiffies + HZ;
894 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
895 if (time_after(jiffies, timeout)) {
896 netdev_warn(dev->net,
897 "timeout on OTP_PWR_DN completion");
900 } while (buf & OTP_PWR_DN_PWRDN_N_);
903 /* set to BYTE program mode */
904 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
906 for (i = 0; i < length; i++) {
907 ret = lan78xx_write_reg(dev, OTP_ADDR1,
908 ((offset + i) >> 8) & OTP_ADDR1_15_11);
909 ret = lan78xx_write_reg(dev, OTP_ADDR2,
910 ((offset + i) & OTP_ADDR2_10_3));
911 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
912 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
913 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
915 timeout = jiffies + HZ;
918 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
919 if (time_after(jiffies, timeout)) {
920 netdev_warn(dev->net,
921 "Timeout on OTP_STATUS completion");
924 } while (buf & OTP_STATUS_BUSY_);
930 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
931 u32 length, u8 *data)
936 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
939 if (sig == OTP_INDICATOR_2)
941 else if (sig != OTP_INDICATOR_1)
944 ret = lan78xx_read_raw_otp(dev, offset, length, data);
950 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
954 for (i = 0; i < 100; i++) {
957 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
958 if (unlikely(ret < 0))
961 if (dp_sel & DP_SEL_DPRDY_)
964 usleep_range(40, 100);
967 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
972 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
973 u32 addr, u32 length, u32 *buf)
975 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
979 if (usb_autopm_get_interface(dev->intf) < 0)
982 mutex_lock(&pdata->dataport_mutex);
984 ret = lan78xx_dataport_wait_not_busy(dev);
988 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
990 dp_sel &= ~DP_SEL_RSEL_MASK_;
991 dp_sel |= ram_select;
992 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
994 for (i = 0; i < length; i++) {
995 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
997 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
999 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1001 ret = lan78xx_dataport_wait_not_busy(dev);
1007 mutex_unlock(&pdata->dataport_mutex);
1008 usb_autopm_put_interface(dev->intf);
1013 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1014 int index, u8 addr[ETH_ALEN])
1018 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1020 temp = addr[2] | (temp << 8);
1021 temp = addr[1] | (temp << 8);
1022 temp = addr[0] | (temp << 8);
1023 pdata->pfilter_table[index][1] = temp;
1025 temp = addr[4] | (temp << 8);
1026 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1027 pdata->pfilter_table[index][0] = temp;
1031 /* returns hash bit number for given MAC address */
1032 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1034 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1037 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1039 struct lan78xx_priv *pdata =
1040 container_of(param, struct lan78xx_priv, set_multicast);
1041 struct lan78xx_net *dev = pdata->dev;
1045 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1048 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1049 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1051 for (i = 1; i < NUM_OF_MAF; i++) {
1052 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1053 ret = lan78xx_write_reg(dev, MAF_LO(i),
1054 pdata->pfilter_table[i][1]);
1055 ret = lan78xx_write_reg(dev, MAF_HI(i),
1056 pdata->pfilter_table[i][0]);
1059 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1062 static void lan78xx_set_multicast(struct net_device *netdev)
1064 struct lan78xx_net *dev = netdev_priv(netdev);
1065 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1066 unsigned long flags;
1069 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1071 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1072 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1074 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1075 pdata->mchash_table[i] = 0;
1076 /* pfilter_table[0] has own HW address */
1077 for (i = 1; i < NUM_OF_MAF; i++) {
1078 pdata->pfilter_table[i][0] =
1079 pdata->pfilter_table[i][1] = 0;
1082 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1084 if (dev->net->flags & IFF_PROMISC) {
1085 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1086 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1088 if (dev->net->flags & IFF_ALLMULTI) {
1089 netif_dbg(dev, drv, dev->net,
1090 "receive all multicast enabled");
1091 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1095 if (netdev_mc_count(dev->net)) {
1096 struct netdev_hw_addr *ha;
1099 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1101 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1104 netdev_for_each_mc_addr(ha, netdev) {
1105 /* set first 32 into Perfect Filter */
1107 lan78xx_set_addr_filter(pdata, i, ha->addr);
1109 u32 bitnum = lan78xx_hash(ha->addr);
1111 pdata->mchash_table[bitnum / 32] |=
1112 (1 << (bitnum % 32));
1113 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1119 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1121 /* defer register writes to a sleepable context */
1122 schedule_work(&pdata->set_multicast);
1125 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1126 u16 lcladv, u16 rmtadv)
1128 u32 flow = 0, fct_flow = 0;
1132 if (dev->fc_autoneg)
1133 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1135 cap = dev->fc_request_control;
1137 if (cap & FLOW_CTRL_TX)
1138 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1140 if (cap & FLOW_CTRL_RX)
1141 flow |= FLOW_CR_RX_FCEN_;
1143 if (dev->udev->speed == USB_SPEED_SUPER)
1145 else if (dev->udev->speed == USB_SPEED_HIGH)
1148 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1149 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1150 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1152 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1154 /* threshold value should be set before enabling flow */
1155 ret = lan78xx_write_reg(dev, FLOW, flow);
1160 static int lan78xx_link_reset(struct lan78xx_net *dev)
1162 struct phy_device *phydev = dev->net->phydev;
1163 struct ethtool_link_ksettings ecmd;
1164 int ladv, radv, ret, link;
1167 /* clear LAN78xx interrupt status */
1168 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1169 if (unlikely(ret < 0))
1172 mutex_lock(&phydev->lock);
1173 phy_read_status(phydev);
1174 link = phydev->link;
1175 mutex_unlock(&phydev->lock);
1177 if (!link && dev->link_on) {
1178 dev->link_on = false;
1181 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1182 if (unlikely(ret < 0))
1185 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1186 if (unlikely(ret < 0))
1189 del_timer(&dev->stat_monitor);
1190 } else if (link && !dev->link_on) {
1191 dev->link_on = true;
1193 phy_ethtool_ksettings_get(phydev, &ecmd);
1195 if (dev->udev->speed == USB_SPEED_SUPER) {
1196 if (ecmd.base.speed == 1000) {
1198 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1199 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1200 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1202 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1203 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1204 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1206 /* enable U1 & U2 */
1207 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1208 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1209 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1210 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1214 ladv = phy_read(phydev, MII_ADVERTISE);
1218 radv = phy_read(phydev, MII_LPA);
1222 netif_dbg(dev, link, dev->net,
1223 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1224 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1226 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1229 if (!timer_pending(&dev->stat_monitor)) {
1231 mod_timer(&dev->stat_monitor,
1232 jiffies + STAT_UPDATE_TIMER);
1235 tasklet_schedule(&dev->bh);
1241 /* some work can't be done in tasklets, so we use keventd
1243 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1244 * but tasklet_schedule() doesn't. hope the failure is rare.
1246 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1248 set_bit(work, &dev->flags);
1249 if (!schedule_delayed_work(&dev->wq, 0))
1250 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1253 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1257 if (urb->actual_length != 4) {
1258 netdev_warn(dev->net,
1259 "unexpected urb length %d", urb->actual_length);
1263 intdata = get_unaligned_le32(urb->transfer_buffer);
1265 if (intdata & INT_ENP_PHY_INT) {
1266 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1267 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1269 if (dev->domain_data.phyirq > 0) {
1270 local_irq_disable();
1271 generic_handle_irq(dev->domain_data.phyirq);
1275 netdev_warn(dev->net,
1276 "unexpected interrupt: 0x%08x\n", intdata);
1279 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1281 return MAX_EEPROM_SIZE;
1284 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1285 struct ethtool_eeprom *ee, u8 *data)
1287 struct lan78xx_net *dev = netdev_priv(netdev);
1290 ret = usb_autopm_get_interface(dev->intf);
1294 ee->magic = LAN78XX_EEPROM_MAGIC;
1296 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1298 usb_autopm_put_interface(dev->intf);
1303 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1304 struct ethtool_eeprom *ee, u8 *data)
1306 struct lan78xx_net *dev = netdev_priv(netdev);
1309 ret = usb_autopm_get_interface(dev->intf);
1313 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1314 * to load data from EEPROM
1316 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1317 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1318 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1319 (ee->offset == 0) &&
1321 (data[0] == OTP_INDICATOR_1))
1322 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1324 usb_autopm_put_interface(dev->intf);
1329 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1332 if (stringset == ETH_SS_STATS)
1333 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1336 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1338 if (sset == ETH_SS_STATS)
1339 return ARRAY_SIZE(lan78xx_gstrings);
1344 static void lan78xx_get_stats(struct net_device *netdev,
1345 struct ethtool_stats *stats, u64 *data)
1347 struct lan78xx_net *dev = netdev_priv(netdev);
1349 lan78xx_update_stats(dev);
1351 mutex_lock(&dev->stats.access_lock);
1352 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1353 mutex_unlock(&dev->stats.access_lock);
1356 static void lan78xx_get_wol(struct net_device *netdev,
1357 struct ethtool_wolinfo *wol)
1359 struct lan78xx_net *dev = netdev_priv(netdev);
1362 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1364 if (usb_autopm_get_interface(dev->intf) < 0)
1367 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1368 if (unlikely(ret < 0)) {
1372 if (buf & USB_CFG_RMT_WKP_) {
1373 wol->supported = WAKE_ALL;
1374 wol->wolopts = pdata->wol;
1381 usb_autopm_put_interface(dev->intf);
1384 static int lan78xx_set_wol(struct net_device *netdev,
1385 struct ethtool_wolinfo *wol)
1387 struct lan78xx_net *dev = netdev_priv(netdev);
1388 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1391 ret = usb_autopm_get_interface(dev->intf);
1395 if (wol->wolopts & ~WAKE_ALL)
1398 pdata->wol = wol->wolopts;
1400 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1402 phy_ethtool_set_wol(netdev->phydev, wol);
1404 usb_autopm_put_interface(dev->intf);
1409 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1411 struct lan78xx_net *dev = netdev_priv(net);
1412 struct phy_device *phydev = net->phydev;
1416 ret = usb_autopm_get_interface(dev->intf);
1420 ret = phy_ethtool_get_eee(phydev, edata);
1424 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1425 if (buf & MAC_CR_EEE_EN_) {
1426 edata->eee_enabled = true;
1427 edata->eee_active = !!(edata->advertised &
1428 edata->lp_advertised);
1429 edata->tx_lpi_enabled = true;
1430 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1431 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1432 edata->tx_lpi_timer = buf;
1434 edata->eee_enabled = false;
1435 edata->eee_active = false;
1436 edata->tx_lpi_enabled = false;
1437 edata->tx_lpi_timer = 0;
1442 usb_autopm_put_interface(dev->intf);
1447 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1449 struct lan78xx_net *dev = netdev_priv(net);
1453 ret = usb_autopm_get_interface(dev->intf);
1457 if (edata->eee_enabled) {
1458 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1459 buf |= MAC_CR_EEE_EN_;
1460 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1462 phy_ethtool_set_eee(net->phydev, edata);
1464 buf = (u32)edata->tx_lpi_timer;
1465 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1467 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1468 buf &= ~MAC_CR_EEE_EN_;
1469 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1472 usb_autopm_put_interface(dev->intf);
1477 static u32 lan78xx_get_link(struct net_device *net)
1481 mutex_lock(&net->phydev->lock);
1482 phy_read_status(net->phydev);
1483 link = net->phydev->link;
1484 mutex_unlock(&net->phydev->lock);
1489 static void lan78xx_get_drvinfo(struct net_device *net,
1490 struct ethtool_drvinfo *info)
1492 struct lan78xx_net *dev = netdev_priv(net);
1494 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1495 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1498 static u32 lan78xx_get_msglevel(struct net_device *net)
1500 struct lan78xx_net *dev = netdev_priv(net);
1502 return dev->msg_enable;
1505 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1507 struct lan78xx_net *dev = netdev_priv(net);
1509 dev->msg_enable = level;
1512 static int lan78xx_get_link_ksettings(struct net_device *net,
1513 struct ethtool_link_ksettings *cmd)
1515 struct lan78xx_net *dev = netdev_priv(net);
1516 struct phy_device *phydev = net->phydev;
1519 ret = usb_autopm_get_interface(dev->intf);
1523 phy_ethtool_ksettings_get(phydev, cmd);
1525 usb_autopm_put_interface(dev->intf);
1530 static int lan78xx_set_link_ksettings(struct net_device *net,
1531 const struct ethtool_link_ksettings *cmd)
1533 struct lan78xx_net *dev = netdev_priv(net);
1534 struct phy_device *phydev = net->phydev;
1538 ret = usb_autopm_get_interface(dev->intf);
1542 /* change speed & duplex */
1543 ret = phy_ethtool_ksettings_set(phydev, cmd);
1545 if (!cmd->base.autoneg) {
1546 /* force link down */
1547 temp = phy_read(phydev, MII_BMCR);
1548 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1550 phy_write(phydev, MII_BMCR, temp);
1553 usb_autopm_put_interface(dev->intf);
1558 static void lan78xx_get_pause(struct net_device *net,
1559 struct ethtool_pauseparam *pause)
1561 struct lan78xx_net *dev = netdev_priv(net);
1562 struct phy_device *phydev = net->phydev;
1563 struct ethtool_link_ksettings ecmd;
1565 phy_ethtool_ksettings_get(phydev, &ecmd);
1567 pause->autoneg = dev->fc_autoneg;
1569 if (dev->fc_request_control & FLOW_CTRL_TX)
1570 pause->tx_pause = 1;
1572 if (dev->fc_request_control & FLOW_CTRL_RX)
1573 pause->rx_pause = 1;
1576 static int lan78xx_set_pause(struct net_device *net,
1577 struct ethtool_pauseparam *pause)
1579 struct lan78xx_net *dev = netdev_priv(net);
1580 struct phy_device *phydev = net->phydev;
1581 struct ethtool_link_ksettings ecmd;
1584 phy_ethtool_ksettings_get(phydev, &ecmd);
1586 if (pause->autoneg && !ecmd.base.autoneg) {
1591 dev->fc_request_control = 0;
1592 if (pause->rx_pause)
1593 dev->fc_request_control |= FLOW_CTRL_RX;
1595 if (pause->tx_pause)
1596 dev->fc_request_control |= FLOW_CTRL_TX;
1598 if (ecmd.base.autoneg) {
1599 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1602 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1603 ecmd.link_modes.advertising);
1604 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1605 ecmd.link_modes.advertising);
1606 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1607 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1608 linkmode_or(ecmd.link_modes.advertising, fc,
1609 ecmd.link_modes.advertising);
1611 phy_ethtool_ksettings_set(phydev, &ecmd);
1614 dev->fc_autoneg = pause->autoneg;
1621 static int lan78xx_get_regs_len(struct net_device *netdev)
1623 if (!netdev->phydev)
1624 return (sizeof(lan78xx_regs));
1626 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1630 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1635 struct lan78xx_net *dev = netdev_priv(netdev);
1637 /* Read Device/MAC registers */
1638 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1639 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1641 if (!netdev->phydev)
1644 /* Read PHY registers */
1645 for (j = 0; j < 32; i++, j++)
1646 data[i] = phy_read(netdev->phydev, j);
1649 static const struct ethtool_ops lan78xx_ethtool_ops = {
1650 .get_link = lan78xx_get_link,
1651 .nway_reset = phy_ethtool_nway_reset,
1652 .get_drvinfo = lan78xx_get_drvinfo,
1653 .get_msglevel = lan78xx_get_msglevel,
1654 .set_msglevel = lan78xx_set_msglevel,
1655 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1656 .get_eeprom = lan78xx_ethtool_get_eeprom,
1657 .set_eeprom = lan78xx_ethtool_set_eeprom,
1658 .get_ethtool_stats = lan78xx_get_stats,
1659 .get_sset_count = lan78xx_get_sset_count,
1660 .get_strings = lan78xx_get_strings,
1661 .get_wol = lan78xx_get_wol,
1662 .set_wol = lan78xx_set_wol,
1663 .get_eee = lan78xx_get_eee,
1664 .set_eee = lan78xx_set_eee,
1665 .get_pauseparam = lan78xx_get_pause,
1666 .set_pauseparam = lan78xx_set_pause,
1667 .get_link_ksettings = lan78xx_get_link_ksettings,
1668 .set_link_ksettings = lan78xx_set_link_ksettings,
1669 .get_regs_len = lan78xx_get_regs_len,
1670 .get_regs = lan78xx_get_regs,
1673 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1675 u32 addr_lo, addr_hi;
1679 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1680 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1682 addr[0] = addr_lo & 0xFF;
1683 addr[1] = (addr_lo >> 8) & 0xFF;
1684 addr[2] = (addr_lo >> 16) & 0xFF;
1685 addr[3] = (addr_lo >> 24) & 0xFF;
1686 addr[4] = addr_hi & 0xFF;
1687 addr[5] = (addr_hi >> 8) & 0xFF;
1689 if (!is_valid_ether_addr(addr)) {
1690 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1691 /* valid address present in Device Tree */
1692 netif_dbg(dev, ifup, dev->net,
1693 "MAC address read from Device Tree");
1694 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1695 ETH_ALEN, addr) == 0) ||
1696 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1697 ETH_ALEN, addr) == 0)) &&
1698 is_valid_ether_addr(addr)) {
1699 /* eeprom values are valid so use them */
1700 netif_dbg(dev, ifup, dev->net,
1701 "MAC address read from EEPROM");
1703 /* generate random MAC */
1704 eth_random_addr(addr);
1705 netif_dbg(dev, ifup, dev->net,
1706 "MAC address set to random addr");
1709 addr_lo = addr[0] | (addr[1] << 8) |
1710 (addr[2] << 16) | (addr[3] << 24);
1711 addr_hi = addr[4] | (addr[5] << 8);
1713 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1714 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1717 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1718 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1720 ether_addr_copy(dev->net->dev_addr, addr);
1723 /* MDIO read and write wrappers for phylib */
1724 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1726 struct lan78xx_net *dev = bus->priv;
1730 ret = usb_autopm_get_interface(dev->intf);
1734 mutex_lock(&dev->phy_mutex);
1736 /* confirm MII not busy */
1737 ret = lan78xx_phy_wait_not_busy(dev);
1741 /* set the address, index & direction (read from PHY) */
1742 addr = mii_access(phy_id, idx, MII_READ);
1743 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1745 ret = lan78xx_phy_wait_not_busy(dev);
1749 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1751 ret = (int)(val & 0xFFFF);
1754 mutex_unlock(&dev->phy_mutex);
1755 usb_autopm_put_interface(dev->intf);
1760 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1763 struct lan78xx_net *dev = bus->priv;
1767 ret = usb_autopm_get_interface(dev->intf);
1771 mutex_lock(&dev->phy_mutex);
1773 /* confirm MII not busy */
1774 ret = lan78xx_phy_wait_not_busy(dev);
1779 ret = lan78xx_write_reg(dev, MII_DATA, val);
1781 /* set the address, index & direction (write to PHY) */
1782 addr = mii_access(phy_id, idx, MII_WRITE);
1783 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1785 ret = lan78xx_phy_wait_not_busy(dev);
1790 mutex_unlock(&dev->phy_mutex);
1791 usb_autopm_put_interface(dev->intf);
1795 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1797 struct device_node *node;
1800 dev->mdiobus = mdiobus_alloc();
1801 if (!dev->mdiobus) {
1802 netdev_err(dev->net, "can't allocate MDIO bus\n");
1806 dev->mdiobus->priv = (void *)dev;
1807 dev->mdiobus->read = lan78xx_mdiobus_read;
1808 dev->mdiobus->write = lan78xx_mdiobus_write;
1809 dev->mdiobus->name = "lan78xx-mdiobus";
1810 dev->mdiobus->parent = &dev->udev->dev;
1812 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1813 dev->udev->bus->busnum, dev->udev->devnum);
1815 switch (dev->chipid) {
1816 case ID_REV_CHIP_ID_7800_:
1817 case ID_REV_CHIP_ID_7850_:
1818 /* set to internal PHY id */
1819 dev->mdiobus->phy_mask = ~(1 << 1);
1821 case ID_REV_CHIP_ID_7801_:
1822 /* scan thru PHYAD[2..0] */
1823 dev->mdiobus->phy_mask = ~(0xFF);
1827 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1828 ret = of_mdiobus_register(dev->mdiobus, node);
1831 netdev_err(dev->net, "can't register MDIO bus\n");
1835 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1838 mdiobus_free(dev->mdiobus);
1842 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1844 mdiobus_unregister(dev->mdiobus);
1845 mdiobus_free(dev->mdiobus);
1848 static void lan78xx_link_status_change(struct net_device *net)
1850 struct phy_device *phydev = net->phydev;
1853 /* At forced 100 F/H mode, chip may fail to set mode correctly
1854 * when cable is switched between long(~50+m) and short one.
1855 * As workaround, set to 10 before setting to 100
1856 * at forced 100 F/H mode.
1858 if (!phydev->autoneg && (phydev->speed == 100)) {
1859 /* disable phy interrupt */
1860 temp = phy_read(phydev, LAN88XX_INT_MASK);
1861 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1862 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1864 temp = phy_read(phydev, MII_BMCR);
1865 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1866 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1867 temp |= BMCR_SPEED100;
1868 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1870 /* clear pending interrupt generated while workaround */
1871 temp = phy_read(phydev, LAN88XX_INT_STS);
1873 /* enable phy interrupt back */
1874 temp = phy_read(phydev, LAN88XX_INT_MASK);
1875 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1876 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1880 static int irq_map(struct irq_domain *d, unsigned int irq,
1881 irq_hw_number_t hwirq)
1883 struct irq_domain_data *data = d->host_data;
1885 irq_set_chip_data(irq, data);
1886 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1887 irq_set_noprobe(irq);
1892 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1894 irq_set_chip_and_handler(irq, NULL, NULL);
1895 irq_set_chip_data(irq, NULL);
1898 static const struct irq_domain_ops chip_domain_ops = {
1903 static void lan78xx_irq_mask(struct irq_data *irqd)
1905 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1907 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1910 static void lan78xx_irq_unmask(struct irq_data *irqd)
1912 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1914 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1917 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1919 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1921 mutex_lock(&data->irq_lock);
1924 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1926 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1927 struct lan78xx_net *dev =
1928 container_of(data, struct lan78xx_net, domain_data);
1932 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1933 * are only two callbacks executed in non-atomic contex.
1935 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1936 if (buf != data->irqenable)
1937 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1939 mutex_unlock(&data->irq_lock);
1942 static struct irq_chip lan78xx_irqchip = {
1943 .name = "lan78xx-irqs",
1944 .irq_mask = lan78xx_irq_mask,
1945 .irq_unmask = lan78xx_irq_unmask,
1946 .irq_bus_lock = lan78xx_irq_bus_lock,
1947 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1950 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1952 struct device_node *of_node;
1953 struct irq_domain *irqdomain;
1954 unsigned int irqmap = 0;
1958 of_node = dev->udev->dev.parent->of_node;
1960 mutex_init(&dev->domain_data.irq_lock);
1962 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1963 dev->domain_data.irqenable = buf;
1965 dev->domain_data.irqchip = &lan78xx_irqchip;
1966 dev->domain_data.irq_handler = handle_simple_irq;
1968 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1969 &chip_domain_ops, &dev->domain_data);
1971 /* create mapping for PHY interrupt */
1972 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1974 irq_domain_remove(irqdomain);
1983 dev->domain_data.irqdomain = irqdomain;
1984 dev->domain_data.phyirq = irqmap;
1989 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1991 if (dev->domain_data.phyirq > 0) {
1992 irq_dispose_mapping(dev->domain_data.phyirq);
1994 if (dev->domain_data.irqdomain)
1995 irq_domain_remove(dev->domain_data.irqdomain);
1997 dev->domain_data.phyirq = 0;
1998 dev->domain_data.irqdomain = NULL;
2001 static int lan8835_fixup(struct phy_device *phydev)
2005 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2007 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2008 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2011 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2013 /* RGMII MAC TXC Delay Enable */
2014 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2015 MAC_RGMII_ID_TXC_DELAY_EN_);
2017 /* RGMII TX DLL Tune Adjust */
2018 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2020 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2025 static int ksz9031rnx_fixup(struct phy_device *phydev)
2027 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2029 /* Micrel9301RNX PHY configuration */
2030 /* RGMII Control Signal Pad Skew */
2031 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2032 /* RGMII RX Data Pad Skew */
2033 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2034 /* RGMII RX Clock Pad Skew */
2035 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2037 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2042 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2046 struct fixed_phy_status fphy_status = {
2048 .speed = SPEED_1000,
2049 .duplex = DUPLEX_FULL,
2051 struct phy_device *phydev;
2053 phydev = phy_find_first(dev->mdiobus);
2055 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2056 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2057 if (IS_ERR(phydev)) {
2058 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2061 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2062 dev->interface = PHY_INTERFACE_MODE_RGMII;
2063 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2064 MAC_RGMII_ID_TXC_DELAY_EN_);
2065 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2066 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2067 buf |= HW_CFG_CLK125_EN_;
2068 buf |= HW_CFG_REFCLK25_EN_;
2069 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2072 netdev_err(dev->net, "no PHY driver found\n");
2075 dev->interface = PHY_INTERFACE_MODE_RGMII;
2076 /* external PHY fixup for KSZ9031RNX */
2077 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2080 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2083 /* external PHY fixup for LAN8835 */
2084 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2087 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2090 /* add more external PHY fixup here if needed */
2092 phydev->is_internal = false;
2097 static int lan78xx_phy_init(struct lan78xx_net *dev)
2099 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2102 struct phy_device *phydev;
2104 switch (dev->chipid) {
2105 case ID_REV_CHIP_ID_7801_:
2106 phydev = lan7801_phy_init(dev);
2108 netdev_err(dev->net, "lan7801: PHY Init Failed");
2113 case ID_REV_CHIP_ID_7800_:
2114 case ID_REV_CHIP_ID_7850_:
2115 phydev = phy_find_first(dev->mdiobus);
2117 netdev_err(dev->net, "no PHY found\n");
2120 phydev->is_internal = true;
2121 dev->interface = PHY_INTERFACE_MODE_GMII;
2125 netdev_err(dev->net, "Unknown CHIP ID found\n");
2129 /* if phyirq is not set, use polling mode in phylib */
2130 if (dev->domain_data.phyirq > 0)
2131 phydev->irq = dev->domain_data.phyirq;
2133 phydev->irq = PHY_POLL;
2134 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2136 /* set to AUTOMDIX */
2137 phydev->mdix = ETH_TP_MDI_AUTO;
2139 ret = phy_connect_direct(dev->net, phydev,
2140 lan78xx_link_status_change,
2143 netdev_err(dev->net, "can't attach PHY to %s\n",
2145 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2146 if (phy_is_pseudo_fixed_link(phydev)) {
2147 fixed_phy_unregister(phydev);
2149 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2151 phy_unregister_fixup_for_uid(PHY_LAN8835,
2158 /* MAC doesn't support 1000T Half */
2159 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2161 /* support both flow controls */
2162 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2163 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2164 phydev->advertising);
2165 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2166 phydev->advertising);
2167 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2168 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2169 linkmode_or(phydev->advertising, fc, phydev->advertising);
2171 if (phydev->mdio.dev.of_node) {
2175 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2176 "microchip,led-modes",
2179 /* Ensure the appropriate LEDs are enabled */
2180 lan78xx_read_reg(dev, HW_CFG, ®);
2181 reg &= ~(HW_CFG_LED0_EN_ |
2185 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2186 (len > 1) * HW_CFG_LED1_EN_ |
2187 (len > 2) * HW_CFG_LED2_EN_ |
2188 (len > 3) * HW_CFG_LED3_EN_;
2189 lan78xx_write_reg(dev, HW_CFG, reg);
2193 genphy_config_aneg(phydev);
2195 dev->fc_autoneg = phydev->autoneg;
2200 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2206 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2208 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2211 buf &= ~MAC_RX_RXEN_;
2212 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2215 /* add 4 to size for FCS */
2216 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2217 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2219 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2222 buf |= MAC_RX_RXEN_;
2223 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2229 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2231 struct sk_buff *skb;
2232 unsigned long flags;
2235 spin_lock_irqsave(&q->lock, flags);
2236 while (!skb_queue_empty(q)) {
2237 struct skb_data *entry;
2241 skb_queue_walk(q, skb) {
2242 entry = (struct skb_data *)skb->cb;
2243 if (entry->state != unlink_start)
2248 entry->state = unlink_start;
2251 /* Get reference count of the URB to avoid it to be
2252 * freed during usb_unlink_urb, which may trigger
2253 * use-after-free problem inside usb_unlink_urb since
2254 * usb_unlink_urb is always racing with .complete
2255 * handler(include defer_bh).
2258 spin_unlock_irqrestore(&q->lock, flags);
2259 /* during some PM-driven resume scenarios,
2260 * these (async) unlinks complete immediately
2262 ret = usb_unlink_urb(urb);
2263 if (ret != -EINPROGRESS && ret != 0)
2264 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2268 spin_lock_irqsave(&q->lock, flags);
2270 spin_unlock_irqrestore(&q->lock, flags);
2274 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2276 struct lan78xx_net *dev = netdev_priv(netdev);
2277 int ll_mtu = new_mtu + netdev->hard_header_len;
2278 int old_hard_mtu = dev->hard_mtu;
2279 int old_rx_urb_size = dev->rx_urb_size;
2282 /* no second zero-length packet read wanted after mtu-sized packets */
2283 if ((ll_mtu % dev->maxpacket) == 0)
2286 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2288 netdev->mtu = new_mtu;
2290 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2291 if (dev->rx_urb_size == old_hard_mtu) {
2292 dev->rx_urb_size = dev->hard_mtu;
2293 if (dev->rx_urb_size > old_rx_urb_size) {
2294 if (netif_running(dev->net)) {
2295 unlink_urbs(dev, &dev->rxq);
2296 tasklet_schedule(&dev->bh);
2304 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2306 struct lan78xx_net *dev = netdev_priv(netdev);
2307 struct sockaddr *addr = p;
2308 u32 addr_lo, addr_hi;
2311 if (netif_running(netdev))
2314 if (!is_valid_ether_addr(addr->sa_data))
2315 return -EADDRNOTAVAIL;
2317 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2319 addr_lo = netdev->dev_addr[0] |
2320 netdev->dev_addr[1] << 8 |
2321 netdev->dev_addr[2] << 16 |
2322 netdev->dev_addr[3] << 24;
2323 addr_hi = netdev->dev_addr[4] |
2324 netdev->dev_addr[5] << 8;
2326 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2327 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2329 /* Added to support MAC address changes */
2330 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2331 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2336 /* Enable or disable Rx checksum offload engine */
2337 static int lan78xx_set_features(struct net_device *netdev,
2338 netdev_features_t features)
2340 struct lan78xx_net *dev = netdev_priv(netdev);
2341 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2342 unsigned long flags;
2345 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2347 if (features & NETIF_F_RXCSUM) {
2348 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2349 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2351 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2352 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2355 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2356 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2358 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2360 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2361 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2363 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2365 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2367 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2372 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2374 struct lan78xx_priv *pdata =
2375 container_of(param, struct lan78xx_priv, set_vlan);
2376 struct lan78xx_net *dev = pdata->dev;
2378 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2379 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2382 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2383 __be16 proto, u16 vid)
2385 struct lan78xx_net *dev = netdev_priv(netdev);
2386 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2388 u16 vid_dword_index;
2390 vid_dword_index = (vid >> 5) & 0x7F;
2391 vid_bit_index = vid & 0x1F;
2393 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2395 /* defer register writes to a sleepable context */
2396 schedule_work(&pdata->set_vlan);
2401 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2402 __be16 proto, u16 vid)
2404 struct lan78xx_net *dev = netdev_priv(netdev);
2405 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2407 u16 vid_dword_index;
2409 vid_dword_index = (vid >> 5) & 0x7F;
2410 vid_bit_index = vid & 0x1F;
2412 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2414 /* defer register writes to a sleepable context */
2415 schedule_work(&pdata->set_vlan);
2420 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2424 u32 regs[6] = { 0 };
2426 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2427 if (buf & USB_CFG1_LTM_ENABLE_) {
2429 /* Get values from EEPROM first */
2430 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2431 if (temp[0] == 24) {
2432 ret = lan78xx_read_raw_eeprom(dev,
2439 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2440 if (temp[0] == 24) {
2441 ret = lan78xx_read_raw_otp(dev,
2451 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2452 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2453 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2454 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2455 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2456 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2459 static int lan78xx_reset(struct lan78xx_net *dev)
2461 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2464 unsigned long timeout;
2467 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2468 buf |= HW_CFG_LRST_;
2469 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2471 timeout = jiffies + HZ;
2474 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2475 if (time_after(jiffies, timeout)) {
2476 netdev_warn(dev->net,
2477 "timeout on completion of LiteReset");
2480 } while (buf & HW_CFG_LRST_);
2482 lan78xx_init_mac_address(dev);
2484 /* save DEVID for later usage */
2485 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2486 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2487 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2489 /* Respond to the IN token with a NAK */
2490 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2491 buf |= USB_CFG_BIR_;
2492 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2495 lan78xx_init_ltm(dev);
2497 if (dev->udev->speed == USB_SPEED_SUPER) {
2498 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2499 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2502 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2503 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2504 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2505 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2506 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2508 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2509 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2514 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2515 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2517 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2519 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2521 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2522 buf |= USB_CFG_BCE_;
2523 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2525 /* set FIFO sizes */
2526 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2527 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2529 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2530 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2532 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2533 ret = lan78xx_write_reg(dev, FLOW, 0);
2534 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2536 /* Don't need rfe_ctl_lock during initialisation */
2537 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2538 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2539 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2541 /* Enable or disable checksum offload engines */
2542 lan78xx_set_features(dev->net, dev->net->features);
2544 lan78xx_set_multicast(dev->net);
2547 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2548 buf |= PMT_CTL_PHY_RST_;
2549 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2551 timeout = jiffies + HZ;
2554 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2555 if (time_after(jiffies, timeout)) {
2556 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2559 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2561 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2562 /* LAN7801 only has RGMII mode */
2563 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2564 buf &= ~MAC_CR_GMII_EN_;
2566 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2567 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2568 if (!ret && sig != EEPROM_INDICATOR) {
2569 /* Implies there is no external eeprom. Set mac speed */
2570 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2571 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2574 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2576 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2577 buf |= MAC_TX_TXEN_;
2578 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2580 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2581 buf |= FCT_TX_CTL_EN_;
2582 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2584 ret = lan78xx_set_rx_max_frame_length(dev,
2585 dev->net->mtu + VLAN_ETH_HLEN);
2587 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2588 buf |= MAC_RX_RXEN_;
2589 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2591 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2592 buf |= FCT_RX_CTL_EN_;
2593 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2598 static void lan78xx_init_stats(struct lan78xx_net *dev)
2603 /* initialize for stats update
2604 * some counters are 20bits and some are 32bits
2606 p = (u32 *)&dev->stats.rollover_max;
2607 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2610 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2611 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2612 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2613 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2614 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2615 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2616 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2617 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2618 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2619 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2621 set_bit(EVENT_STAT_UPDATE, &dev->flags);
2624 static int lan78xx_open(struct net_device *net)
2626 struct lan78xx_net *dev = netdev_priv(net);
2629 ret = usb_autopm_get_interface(dev->intf);
2633 phy_start(net->phydev);
2635 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2637 /* for Link Check */
2638 if (dev->urb_intr) {
2639 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2641 netif_err(dev, ifup, dev->net,
2642 "intr submit %d\n", ret);
2647 lan78xx_init_stats(dev);
2649 set_bit(EVENT_DEV_OPEN, &dev->flags);
2651 netif_start_queue(net);
2653 dev->link_on = false;
2655 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2657 usb_autopm_put_interface(dev->intf);
2663 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2665 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2666 DECLARE_WAITQUEUE(wait, current);
2669 /* ensure there are no more active urbs */
2670 add_wait_queue(&unlink_wakeup, &wait);
2671 set_current_state(TASK_UNINTERRUPTIBLE);
2672 dev->wait = &unlink_wakeup;
2673 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2675 /* maybe wait for deletions to finish. */
2676 while (!skb_queue_empty(&dev->rxq) &&
2677 !skb_queue_empty(&dev->txq) &&
2678 !skb_queue_empty(&dev->done)) {
2679 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2680 set_current_state(TASK_UNINTERRUPTIBLE);
2681 netif_dbg(dev, ifdown, dev->net,
2682 "waited for %d urb completions\n", temp);
2684 set_current_state(TASK_RUNNING);
2686 remove_wait_queue(&unlink_wakeup, &wait);
2689 static int lan78xx_stop(struct net_device *net)
2691 struct lan78xx_net *dev = netdev_priv(net);
2693 if (timer_pending(&dev->stat_monitor))
2694 del_timer_sync(&dev->stat_monitor);
2697 phy_stop(net->phydev);
2699 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2700 netif_stop_queue(net);
2702 netif_info(dev, ifdown, dev->net,
2703 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2704 net->stats.rx_packets, net->stats.tx_packets,
2705 net->stats.rx_errors, net->stats.tx_errors);
2707 lan78xx_terminate_urbs(dev);
2709 usb_kill_urb(dev->urb_intr);
2711 skb_queue_purge(&dev->rxq_pause);
2713 /* deferred work (task, timer, softirq) must also stop.
2714 * can't flush_scheduled_work() until we drop rtnl (later),
2715 * else workers could deadlock; so make workers a NOP.
2718 cancel_delayed_work_sync(&dev->wq);
2719 tasklet_kill(&dev->bh);
2721 usb_autopm_put_interface(dev->intf);
2726 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2727 struct sk_buff *skb, gfp_t flags)
2729 u32 tx_cmd_a, tx_cmd_b;
2732 if (skb_cow_head(skb, TX_OVERHEAD)) {
2733 dev_kfree_skb_any(skb);
2737 if (skb_linearize(skb)) {
2738 dev_kfree_skb_any(skb);
2742 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2744 if (skb->ip_summed == CHECKSUM_PARTIAL)
2745 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2748 if (skb_is_gso(skb)) {
2749 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2751 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2753 tx_cmd_a |= TX_CMD_A_LSO_;
2756 if (skb_vlan_tag_present(skb)) {
2757 tx_cmd_a |= TX_CMD_A_IVTG_;
2758 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2761 ptr = skb_push(skb, 8);
2762 put_unaligned_le32(tx_cmd_a, ptr);
2763 put_unaligned_le32(tx_cmd_b, ptr + 4);
2768 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2769 struct sk_buff_head *list, enum skb_state state)
2771 unsigned long flags;
2772 enum skb_state old_state;
2773 struct skb_data *entry = (struct skb_data *)skb->cb;
2775 spin_lock_irqsave(&list->lock, flags);
2776 old_state = entry->state;
2777 entry->state = state;
2779 __skb_unlink(skb, list);
2780 spin_unlock(&list->lock);
2781 spin_lock(&dev->done.lock);
2783 __skb_queue_tail(&dev->done, skb);
2784 if (skb_queue_len(&dev->done) == 1)
2785 tasklet_schedule(&dev->bh);
2786 spin_unlock_irqrestore(&dev->done.lock, flags);
2791 static void tx_complete(struct urb *urb)
2793 struct sk_buff *skb = (struct sk_buff *)urb->context;
2794 struct skb_data *entry = (struct skb_data *)skb->cb;
2795 struct lan78xx_net *dev = entry->dev;
2797 if (urb->status == 0) {
2798 dev->net->stats.tx_packets += entry->num_of_packet;
2799 dev->net->stats.tx_bytes += entry->length;
2801 dev->net->stats.tx_errors++;
2803 switch (urb->status) {
2805 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2808 /* software-driven interface shutdown */
2816 netif_stop_queue(dev->net);
2819 netif_dbg(dev, tx_err, dev->net,
2820 "tx err %d\n", entry->urb->status);
2825 usb_autopm_put_interface_async(dev->intf);
2827 defer_bh(dev, skb, &dev->txq, tx_done);
2830 static void lan78xx_queue_skb(struct sk_buff_head *list,
2831 struct sk_buff *newsk, enum skb_state state)
2833 struct skb_data *entry = (struct skb_data *)newsk->cb;
2835 __skb_queue_tail(list, newsk);
2836 entry->state = state;
2840 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2842 struct lan78xx_net *dev = netdev_priv(net);
2843 struct sk_buff *skb2 = NULL;
2846 skb_tx_timestamp(skb);
2847 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2851 skb_queue_tail(&dev->txq_pend, skb2);
2853 /* throttle TX patch at slower than SUPER SPEED USB */
2854 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2855 (skb_queue_len(&dev->txq_pend) > 10))
2856 netif_stop_queue(net);
2858 netif_dbg(dev, tx_err, dev->net,
2859 "lan78xx_tx_prep return NULL\n");
2860 dev->net->stats.tx_errors++;
2861 dev->net->stats.tx_dropped++;
2864 tasklet_schedule(&dev->bh);
2866 return NETDEV_TX_OK;
2869 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2871 struct lan78xx_priv *pdata = NULL;
2875 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2877 pdata = (struct lan78xx_priv *)(dev->data[0]);
2879 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2885 spin_lock_init(&pdata->rfe_ctl_lock);
2886 mutex_init(&pdata->dataport_mutex);
2888 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2890 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2891 pdata->vlan_table[i] = 0;
2893 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2895 dev->net->features = 0;
2897 if (DEFAULT_TX_CSUM_ENABLE)
2898 dev->net->features |= NETIF_F_HW_CSUM;
2900 if (DEFAULT_RX_CSUM_ENABLE)
2901 dev->net->features |= NETIF_F_RXCSUM;
2903 if (DEFAULT_TSO_CSUM_ENABLE)
2904 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2906 if (DEFAULT_VLAN_RX_OFFLOAD)
2907 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2909 if (DEFAULT_VLAN_FILTER_ENABLE)
2910 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2912 dev->net->hw_features = dev->net->features;
2914 ret = lan78xx_setup_irq_domain(dev);
2916 netdev_warn(dev->net,
2917 "lan78xx_setup_irq_domain() failed : %d", ret);
2921 dev->net->hard_header_len += TX_OVERHEAD;
2922 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2924 /* Init all registers */
2925 ret = lan78xx_reset(dev);
2927 netdev_warn(dev->net, "Registers INIT FAILED....");
2931 ret = lan78xx_mdio_init(dev);
2933 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2937 dev->net->flags |= IFF_MULTICAST;
2939 pdata->wol = WAKE_MAGIC;
2944 lan78xx_remove_irq_domain(dev);
2947 netdev_warn(dev->net, "Bind routine FAILED");
2948 cancel_work_sync(&pdata->set_multicast);
2949 cancel_work_sync(&pdata->set_vlan);
2954 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2956 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2958 lan78xx_remove_irq_domain(dev);
2960 lan78xx_remove_mdio(dev);
2963 cancel_work_sync(&pdata->set_multicast);
2964 cancel_work_sync(&pdata->set_vlan);
2965 netif_dbg(dev, ifdown, dev->net, "free pdata");
2972 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2973 struct sk_buff *skb,
2974 u32 rx_cmd_a, u32 rx_cmd_b)
2976 /* HW Checksum offload appears to be flawed if used when not stripping
2977 * VLAN headers. Drop back to S/W checksums under these conditions.
2979 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2980 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
2981 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
2982 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
2983 skb->ip_summed = CHECKSUM_NONE;
2985 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2986 skb->ip_summed = CHECKSUM_COMPLETE;
2990 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
2991 struct sk_buff *skb,
2992 u32 rx_cmd_a, u32 rx_cmd_b)
2994 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2995 (rx_cmd_a & RX_CMD_A_FVTG_))
2996 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2997 (rx_cmd_b & 0xffff));
3000 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3004 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3005 skb_queue_tail(&dev->rxq_pause, skb);
3009 dev->net->stats.rx_packets++;
3010 dev->net->stats.rx_bytes += skb->len;
3012 skb->protocol = eth_type_trans(skb, dev->net);
3014 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3015 skb->len + sizeof(struct ethhdr), skb->protocol);
3016 memset(skb->cb, 0, sizeof(struct skb_data));
3018 if (skb_defer_rx_timestamp(skb))
3021 status = netif_rx(skb);
3022 if (status != NET_RX_SUCCESS)
3023 netif_dbg(dev, rx_err, dev->net,
3024 "netif_rx status %d\n", status);
3027 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3029 if (skb->len < dev->net->hard_header_len)
3032 while (skb->len > 0) {
3033 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3035 struct sk_buff *skb2;
3036 unsigned char *packet;
3038 rx_cmd_a = get_unaligned_le32(skb->data);
3039 skb_pull(skb, sizeof(rx_cmd_a));
3041 rx_cmd_b = get_unaligned_le32(skb->data);
3042 skb_pull(skb, sizeof(rx_cmd_b));
3044 rx_cmd_c = get_unaligned_le16(skb->data);
3045 skb_pull(skb, sizeof(rx_cmd_c));
3049 /* get the packet length */
3050 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3051 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3053 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3054 netif_dbg(dev, rx_err, dev->net,
3055 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3057 /* last frame in this batch */
3058 if (skb->len == size) {
3059 lan78xx_rx_csum_offload(dev, skb,
3060 rx_cmd_a, rx_cmd_b);
3061 lan78xx_rx_vlan_offload(dev, skb,
3062 rx_cmd_a, rx_cmd_b);
3064 skb_trim(skb, skb->len - 4); /* remove fcs */
3065 skb->truesize = size + sizeof(struct sk_buff);
3070 skb2 = skb_clone(skb, GFP_ATOMIC);
3071 if (unlikely(!skb2)) {
3072 netdev_warn(dev->net, "Error allocating skb");
3077 skb2->data = packet;
3078 skb_set_tail_pointer(skb2, size);
3080 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3081 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3083 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3084 skb2->truesize = size + sizeof(struct sk_buff);
3086 lan78xx_skb_return(dev, skb2);
3089 skb_pull(skb, size);
3091 /* padding bytes before the next frame starts */
3093 skb_pull(skb, align_count);
3099 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3101 if (!lan78xx_rx(dev, skb)) {
3102 dev->net->stats.rx_errors++;
3107 lan78xx_skb_return(dev, skb);
3111 netif_dbg(dev, rx_err, dev->net, "drop\n");
3112 dev->net->stats.rx_errors++;
3114 skb_queue_tail(&dev->done, skb);
3117 static void rx_complete(struct urb *urb);
3119 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3121 struct sk_buff *skb;
3122 struct skb_data *entry;
3123 unsigned long lockflags;
3124 size_t size = dev->rx_urb_size;
3127 skb = netdev_alloc_skb_ip_align(dev->net, size);
3133 entry = (struct skb_data *)skb->cb;
3138 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3139 skb->data, size, rx_complete, skb);
3141 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3143 if (netif_device_present(dev->net) &&
3144 netif_running(dev->net) &&
3145 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3146 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3147 ret = usb_submit_urb(urb, GFP_ATOMIC);
3150 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3153 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3156 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3157 netif_device_detach(dev->net);
3163 netif_dbg(dev, rx_err, dev->net,
3164 "rx submit, %d\n", ret);
3165 tasklet_schedule(&dev->bh);
3168 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3171 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3173 dev_kfree_skb_any(skb);
3179 static void rx_complete(struct urb *urb)
3181 struct sk_buff *skb = (struct sk_buff *)urb->context;
3182 struct skb_data *entry = (struct skb_data *)skb->cb;
3183 struct lan78xx_net *dev = entry->dev;
3184 int urb_status = urb->status;
3185 enum skb_state state;
3187 skb_put(skb, urb->actual_length);
3191 switch (urb_status) {
3193 if (skb->len < dev->net->hard_header_len) {
3195 dev->net->stats.rx_errors++;
3196 dev->net->stats.rx_length_errors++;
3197 netif_dbg(dev, rx_err, dev->net,
3198 "rx length %d\n", skb->len);
3200 usb_mark_last_busy(dev->udev);
3203 dev->net->stats.rx_errors++;
3204 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3206 case -ECONNRESET: /* async unlink */
3207 case -ESHUTDOWN: /* hardware gone */
3208 netif_dbg(dev, ifdown, dev->net,
3209 "rx shutdown, code %d\n", urb_status);
3217 dev->net->stats.rx_errors++;
3223 /* data overrun ... flush fifo? */
3225 dev->net->stats.rx_over_errors++;
3230 dev->net->stats.rx_errors++;
3231 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3235 state = defer_bh(dev, skb, &dev->rxq, state);
3238 if (netif_running(dev->net) &&
3239 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3240 state != unlink_start) {
3241 rx_submit(dev, urb, GFP_ATOMIC);
3246 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3249 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3252 struct urb *urb = NULL;
3253 struct skb_data *entry;
3254 unsigned long flags;
3255 struct sk_buff_head *tqp = &dev->txq_pend;
3256 struct sk_buff *skb, *skb2;
3259 int skb_totallen, pkt_cnt;
3265 spin_lock_irqsave(&tqp->lock, flags);
3266 skb_queue_walk(tqp, skb) {
3267 if (skb_is_gso(skb)) {
3268 if (!skb_queue_is_first(tqp, skb)) {
3269 /* handle previous packets first */
3273 length = skb->len - TX_OVERHEAD;
3274 __skb_unlink(skb, tqp);
3275 spin_unlock_irqrestore(&tqp->lock, flags);
3279 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3281 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3284 spin_unlock_irqrestore(&tqp->lock, flags);
3286 /* copy to a single skb */
3287 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3291 skb_put(skb, skb_totallen);
3293 for (count = pos = 0; count < pkt_cnt; count++) {
3294 skb2 = skb_dequeue(tqp);
3296 length += (skb2->len - TX_OVERHEAD);
3297 memcpy(skb->data + pos, skb2->data, skb2->len);
3298 pos += roundup(skb2->len, sizeof(u32));
3299 dev_kfree_skb(skb2);
3304 urb = usb_alloc_urb(0, GFP_ATOMIC);
3308 entry = (struct skb_data *)skb->cb;
3311 entry->length = length;
3312 entry->num_of_packet = count;
3314 spin_lock_irqsave(&dev->txq.lock, flags);
3315 ret = usb_autopm_get_interface_async(dev->intf);
3317 spin_unlock_irqrestore(&dev->txq.lock, flags);
3321 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3322 skb->data, skb->len, tx_complete, skb);
3324 if (length % dev->maxpacket == 0) {
3325 /* send USB_ZERO_PACKET */
3326 urb->transfer_flags |= URB_ZERO_PACKET;
3330 /* if this triggers the device is still a sleep */
3331 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3332 /* transmission will be done in resume */
3333 usb_anchor_urb(urb, &dev->deferred);
3334 /* no use to process more packets */
3335 netif_stop_queue(dev->net);
3337 spin_unlock_irqrestore(&dev->txq.lock, flags);
3338 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3343 ret = usb_submit_urb(urb, GFP_ATOMIC);
3346 netif_trans_update(dev->net);
3347 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3348 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3349 netif_stop_queue(dev->net);
3352 netif_stop_queue(dev->net);
3353 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3354 usb_autopm_put_interface_async(dev->intf);
3357 usb_autopm_put_interface_async(dev->intf);
3358 netif_dbg(dev, tx_err, dev->net,
3359 "tx: submit urb err %d\n", ret);
3363 spin_unlock_irqrestore(&dev->txq.lock, flags);
3366 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3368 dev->net->stats.tx_dropped++;
3370 dev_kfree_skb_any(skb);
3373 netif_dbg(dev, tx_queued, dev->net,
3374 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3377 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3382 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3383 for (i = 0; i < 10; i++) {
3384 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3386 urb = usb_alloc_urb(0, GFP_ATOMIC);
3388 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3392 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3393 tasklet_schedule(&dev->bh);
3395 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3396 netif_wake_queue(dev->net);
3399 static void lan78xx_bh(unsigned long param)
3401 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3402 struct sk_buff *skb;
3403 struct skb_data *entry;
3405 while ((skb = skb_dequeue(&dev->done))) {
3406 entry = (struct skb_data *)(skb->cb);
3407 switch (entry->state) {
3409 entry->state = rx_cleanup;
3410 rx_process(dev, skb);
3413 usb_free_urb(entry->urb);
3417 usb_free_urb(entry->urb);
3421 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3426 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3427 /* reset update timer delta */
3428 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3430 mod_timer(&dev->stat_monitor,
3431 jiffies + STAT_UPDATE_TIMER);
3434 if (!skb_queue_empty(&dev->txq_pend))
3437 if (!timer_pending(&dev->delay) &&
3438 !test_bit(EVENT_RX_HALT, &dev->flags))
3443 static void lan78xx_delayedwork(struct work_struct *work)
3446 struct lan78xx_net *dev;
3448 dev = container_of(work, struct lan78xx_net, wq.work);
3450 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3451 unlink_urbs(dev, &dev->txq);
3452 status = usb_autopm_get_interface(dev->intf);
3455 status = usb_clear_halt(dev->udev, dev->pipe_out);
3456 usb_autopm_put_interface(dev->intf);
3459 status != -ESHUTDOWN) {
3460 if (netif_msg_tx_err(dev))
3462 netdev_err(dev->net,
3463 "can't clear tx halt, status %d\n",
3466 clear_bit(EVENT_TX_HALT, &dev->flags);
3467 if (status != -ESHUTDOWN)
3468 netif_wake_queue(dev->net);
3471 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3472 unlink_urbs(dev, &dev->rxq);
3473 status = usb_autopm_get_interface(dev->intf);
3476 status = usb_clear_halt(dev->udev, dev->pipe_in);
3477 usb_autopm_put_interface(dev->intf);
3480 status != -ESHUTDOWN) {
3481 if (netif_msg_rx_err(dev))
3483 netdev_err(dev->net,
3484 "can't clear rx halt, status %d\n",
3487 clear_bit(EVENT_RX_HALT, &dev->flags);
3488 tasklet_schedule(&dev->bh);
3492 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3495 clear_bit(EVENT_LINK_RESET, &dev->flags);
3496 status = usb_autopm_get_interface(dev->intf);
3499 if (lan78xx_link_reset(dev) < 0) {
3500 usb_autopm_put_interface(dev->intf);
3502 netdev_info(dev->net, "link reset failed (%d)\n",
3505 usb_autopm_put_interface(dev->intf);
3509 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3510 lan78xx_update_stats(dev);
3512 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3514 mod_timer(&dev->stat_monitor,
3515 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3517 dev->delta = min((dev->delta * 2), 50);
3521 static void intr_complete(struct urb *urb)
3523 struct lan78xx_net *dev = urb->context;
3524 int status = urb->status;
3529 lan78xx_status(dev, urb);
3532 /* software-driven interface shutdown */
3533 case -ENOENT: /* urb killed */
3534 case -ESHUTDOWN: /* hardware gone */
3535 netif_dbg(dev, ifdown, dev->net,
3536 "intr shutdown, code %d\n", status);
3539 /* NOTE: not throttling like RX/TX, since this endpoint
3540 * already polls infrequently
3543 netdev_dbg(dev->net, "intr status %d\n", status);
3547 if (!netif_running(dev->net))
3550 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3551 status = usb_submit_urb(urb, GFP_ATOMIC);
3553 netif_err(dev, timer, dev->net,
3554 "intr resubmit --> %d\n", status);
3557 static void lan78xx_disconnect(struct usb_interface *intf)
3559 struct lan78xx_net *dev;
3560 struct usb_device *udev;
3561 struct net_device *net;
3562 struct phy_device *phydev;
3564 dev = usb_get_intfdata(intf);
3565 usb_set_intfdata(intf, NULL);
3569 udev = interface_to_usbdev(intf);
3571 phydev = net->phydev;
3573 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3574 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3576 phy_disconnect(net->phydev);
3578 if (phy_is_pseudo_fixed_link(phydev))
3579 fixed_phy_unregister(phydev);
3581 unregister_netdev(net);
3583 cancel_delayed_work_sync(&dev->wq);
3585 usb_scuttle_anchored_urbs(&dev->deferred);
3587 lan78xx_unbind(dev, intf);
3589 usb_kill_urb(dev->urb_intr);
3590 usb_free_urb(dev->urb_intr);
3596 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
3598 struct lan78xx_net *dev = netdev_priv(net);
3600 unlink_urbs(dev, &dev->txq);
3601 tasklet_schedule(&dev->bh);
3604 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3605 struct net_device *netdev,
3606 netdev_features_t features)
3608 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3609 features &= ~NETIF_F_GSO_MASK;
3611 features = vlan_features_check(skb, features);
3612 features = vxlan_features_check(skb, features);
3617 static const struct net_device_ops lan78xx_netdev_ops = {
3618 .ndo_open = lan78xx_open,
3619 .ndo_stop = lan78xx_stop,
3620 .ndo_start_xmit = lan78xx_start_xmit,
3621 .ndo_tx_timeout = lan78xx_tx_timeout,
3622 .ndo_change_mtu = lan78xx_change_mtu,
3623 .ndo_set_mac_address = lan78xx_set_mac_addr,
3624 .ndo_validate_addr = eth_validate_addr,
3625 .ndo_do_ioctl = phy_do_ioctl_running,
3626 .ndo_set_rx_mode = lan78xx_set_multicast,
3627 .ndo_set_features = lan78xx_set_features,
3628 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3629 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3630 .ndo_features_check = lan78xx_features_check,
3633 static void lan78xx_stat_monitor(struct timer_list *t)
3635 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3637 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3640 static int lan78xx_probe(struct usb_interface *intf,
3641 const struct usb_device_id *id)
3643 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3644 struct lan78xx_net *dev;
3645 struct net_device *netdev;
3646 struct usb_device *udev;
3652 udev = interface_to_usbdev(intf);
3653 udev = usb_get_dev(udev);
3655 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3657 dev_err(&intf->dev, "Error: OOM\n");
3662 /* netdev_printk() needs this */
3663 SET_NETDEV_DEV(netdev, &intf->dev);
3665 dev = netdev_priv(netdev);
3669 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3670 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3672 skb_queue_head_init(&dev->rxq);
3673 skb_queue_head_init(&dev->txq);
3674 skb_queue_head_init(&dev->done);
3675 skb_queue_head_init(&dev->rxq_pause);
3676 skb_queue_head_init(&dev->txq_pend);
3677 mutex_init(&dev->phy_mutex);
3679 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3680 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3681 init_usb_anchor(&dev->deferred);
3683 netdev->netdev_ops = &lan78xx_netdev_ops;
3684 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3685 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3688 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3690 mutex_init(&dev->stats.access_lock);
3692 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3697 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3698 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3699 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3704 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3705 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3706 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3711 ep_intr = &intf->cur_altsetting->endpoint[2];
3712 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3717 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3718 usb_endpoint_num(&ep_intr->desc));
3720 ret = lan78xx_bind(dev, intf);
3724 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3725 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3727 /* MTU range: 68 - 9000 */
3728 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3729 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3731 period = ep_intr->desc.bInterval;
3732 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3733 buf = kmalloc(maxp, GFP_KERNEL);
3735 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3736 if (!dev->urb_intr) {
3741 usb_fill_int_urb(dev->urb_intr, dev->udev,
3742 dev->pipe_intr, buf, maxp,
3743 intr_complete, dev, period);
3744 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3748 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3750 /* Reject broken descriptors. */
3751 if (dev->maxpacket == 0) {
3756 /* driver requires remote-wakeup capability during autosuspend. */
3757 intf->needs_remote_wakeup = 1;
3759 ret = lan78xx_phy_init(dev);
3763 ret = register_netdev(netdev);
3765 netif_err(dev, probe, netdev, "couldn't register the device\n");
3769 usb_set_intfdata(intf, dev);
3771 ret = device_set_wakeup_enable(&udev->dev, true);
3773 /* Default delay of 2sec has more overhead than advantage.
3774 * Set to 10sec as default.
3776 pm_runtime_set_autosuspend_delay(&udev->dev,
3777 DEFAULT_AUTOSUSPEND_DELAY);
3782 phy_disconnect(netdev->phydev);
3784 usb_free_urb(dev->urb_intr);
3786 lan78xx_unbind(dev, intf);
3788 free_netdev(netdev);
3795 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3797 const u16 crc16poly = 0x8005;
3803 for (i = 0; i < len; i++) {
3805 for (bit = 0; bit < 8; bit++) {
3809 if (msb ^ (u16)(data & 1)) {
3811 crc |= (u16)0x0001U;
3820 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3828 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3829 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3830 const u8 arp_type[2] = { 0x08, 0x06 };
3832 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3833 buf &= ~MAC_TX_TXEN_;
3834 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3835 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3836 buf &= ~MAC_RX_RXEN_;
3837 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3839 ret = lan78xx_write_reg(dev, WUCSR, 0);
3840 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3841 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3846 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3847 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3848 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3850 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3851 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3854 if (wol & WAKE_PHY) {
3855 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3857 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3858 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3859 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3861 if (wol & WAKE_MAGIC) {
3862 temp_wucsr |= WUCSR_MPEN_;
3864 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3865 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3866 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3868 if (wol & WAKE_BCAST) {
3869 temp_wucsr |= WUCSR_BCST_EN_;
3871 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3872 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3873 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3875 if (wol & WAKE_MCAST) {
3876 temp_wucsr |= WUCSR_WAKE_EN_;
3878 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3879 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3880 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3882 WUF_CFGX_TYPE_MCAST_ |
3883 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3884 (crc & WUF_CFGX_CRC16_MASK_));
3886 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3887 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3888 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3889 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3892 /* for IPv6 Multicast */
3893 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3894 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3896 WUF_CFGX_TYPE_MCAST_ |
3897 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3898 (crc & WUF_CFGX_CRC16_MASK_));
3900 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3901 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3902 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3903 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3906 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3907 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3908 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3910 if (wol & WAKE_UCAST) {
3911 temp_wucsr |= WUCSR_PFDA_EN_;
3913 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3914 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3915 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3917 if (wol & WAKE_ARP) {
3918 temp_wucsr |= WUCSR_WAKE_EN_;
3920 /* set WUF_CFG & WUF_MASK
3921 * for packettype (offset 12,13) = ARP (0x0806)
3923 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3924 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3926 WUF_CFGX_TYPE_ALL_ |
3927 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3928 (crc & WUF_CFGX_CRC16_MASK_));
3930 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3931 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3932 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3933 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3936 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3937 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3938 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3941 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3943 /* when multiple WOL bits are set */
3944 if (hweight_long((unsigned long)wol) > 1) {
3945 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3946 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3947 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3949 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3952 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3953 buf |= PMT_CTL_WUPS_MASK_;
3954 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3956 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3957 buf |= MAC_RX_RXEN_;
3958 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3963 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3965 struct lan78xx_net *dev = usb_get_intfdata(intf);
3966 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3970 if (!dev->suspend_count++) {
3971 spin_lock_irq(&dev->txq.lock);
3972 /* don't autosuspend while transmitting */
3973 if ((skb_queue_len(&dev->txq) ||
3974 skb_queue_len(&dev->txq_pend)) &&
3975 PMSG_IS_AUTO(message)) {
3976 spin_unlock_irq(&dev->txq.lock);
3980 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3981 spin_unlock_irq(&dev->txq.lock);
3985 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3986 buf &= ~MAC_TX_TXEN_;
3987 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3988 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3989 buf &= ~MAC_RX_RXEN_;
3990 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3992 /* empty out the rx and queues */
3993 netif_device_detach(dev->net);
3994 lan78xx_terminate_urbs(dev);
3995 usb_kill_urb(dev->urb_intr);
3998 netif_device_attach(dev->net);
4001 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4002 del_timer(&dev->stat_monitor);
4004 if (PMSG_IS_AUTO(message)) {
4005 /* auto suspend (selective suspend) */
4006 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4007 buf &= ~MAC_TX_TXEN_;
4008 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4009 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4010 buf &= ~MAC_RX_RXEN_;
4011 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4013 ret = lan78xx_write_reg(dev, WUCSR, 0);
4014 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4015 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4017 /* set goodframe wakeup */
4018 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4020 buf |= WUCSR_RFE_WAKE_EN_;
4021 buf |= WUCSR_STORE_WAKE_;
4023 ret = lan78xx_write_reg(dev, WUCSR, buf);
4025 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4027 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4028 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4030 buf |= PMT_CTL_PHY_WAKE_EN_;
4031 buf |= PMT_CTL_WOL_EN_;
4032 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4033 buf |= PMT_CTL_SUS_MODE_3_;
4035 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4037 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4039 buf |= PMT_CTL_WUPS_MASK_;
4041 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4043 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4044 buf |= MAC_RX_RXEN_;
4045 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4047 lan78xx_set_suspend(dev, pdata->wol);
4056 static int lan78xx_resume(struct usb_interface *intf)
4058 struct lan78xx_net *dev = usb_get_intfdata(intf);
4059 struct sk_buff *skb;
4064 if (!timer_pending(&dev->stat_monitor)) {
4066 mod_timer(&dev->stat_monitor,
4067 jiffies + STAT_UPDATE_TIMER);
4070 if (!--dev->suspend_count) {
4071 /* resume interrupt URBs */
4072 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4073 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4075 spin_lock_irq(&dev->txq.lock);
4076 while ((res = usb_get_from_anchor(&dev->deferred))) {
4077 skb = (struct sk_buff *)res->context;
4078 ret = usb_submit_urb(res, GFP_ATOMIC);
4080 dev_kfree_skb_any(skb);
4082 usb_autopm_put_interface_async(dev->intf);
4084 netif_trans_update(dev->net);
4085 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4089 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4090 spin_unlock_irq(&dev->txq.lock);
4092 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4093 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4094 netif_start_queue(dev->net);
4095 tasklet_schedule(&dev->bh);
4099 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4100 ret = lan78xx_write_reg(dev, WUCSR, 0);
4101 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4103 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4105 WUCSR2_IPV6_TCPSYN_RCD_ |
4106 WUCSR2_IPV4_TCPSYN_RCD_);
4108 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4109 WUCSR_EEE_RX_WAKE_ |
4111 WUCSR_RFE_WAKE_FR_ |
4116 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4117 buf |= MAC_TX_TXEN_;
4118 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4123 static int lan78xx_reset_resume(struct usb_interface *intf)
4125 struct lan78xx_net *dev = usb_get_intfdata(intf);
4129 phy_start(dev->net->phydev);
4131 return lan78xx_resume(intf);
4134 static const struct usb_device_id products[] = {
4136 /* LAN7800 USB Gigabit Ethernet Device */
4137 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4140 /* LAN7850 USB Gigabit Ethernet Device */
4141 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4144 /* LAN7801 USB Gigabit Ethernet Device */
4145 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4148 /* ATM2-AF USB Gigabit Ethernet Device */
4149 USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
4153 MODULE_DEVICE_TABLE(usb, products);
4155 static struct usb_driver lan78xx_driver = {
4156 .name = DRIVER_NAME,
4157 .id_table = products,
4158 .probe = lan78xx_probe,
4159 .disconnect = lan78xx_disconnect,
4160 .suspend = lan78xx_suspend,
4161 .resume = lan78xx_resume,
4162 .reset_resume = lan78xx_reset_resume,
4163 .supports_autosuspend = 1,
4164 .disable_hub_initiated_lpm = 1,
4167 module_usb_driver(lan78xx_driver);
4169 MODULE_AUTHOR(DRIVER_AUTHOR);
4170 MODULE_DESCRIPTION(DRIVER_DESC);
4171 MODULE_LICENSE("GPL");