1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2015 Microchip Technology
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/vxlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/irq.h>
27 #include <linux/irqchip/chained_irq.h>
28 #include <linux/microchipphy.h>
29 #include <linux/phy_fixed.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
34 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME "lan78xx"
38 #define TX_TIMEOUT_JIFFIES (5 * HZ)
39 #define THROTTLE_JIFFIES (HZ / 8)
40 #define UNLINK_TIMEOUT_MS 3
42 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
44 #define SS_USB_PKT_SIZE (1024)
45 #define HS_USB_PKT_SIZE (512)
46 #define FS_USB_PKT_SIZE (64)
48 #define MAX_RX_FIFO_SIZE (12 * 1024)
49 #define MAX_TX_FIFO_SIZE (12 * 1024)
50 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
51 #define DEFAULT_BULK_IN_DELAY (0x0800)
52 #define MAX_SINGLE_PACKET_SIZE (9000)
53 #define DEFAULT_TX_CSUM_ENABLE (true)
54 #define DEFAULT_RX_CSUM_ENABLE (true)
55 #define DEFAULT_TSO_CSUM_ENABLE (true)
56 #define DEFAULT_VLAN_FILTER_ENABLE (true)
57 #define DEFAULT_VLAN_RX_OFFLOAD (true)
58 #define TX_OVERHEAD (8)
61 #define LAN78XX_USB_VENDOR_ID (0x0424)
62 #define LAN7800_USB_PRODUCT_ID (0x7800)
63 #define LAN7850_USB_PRODUCT_ID (0x7850)
64 #define LAN7801_USB_PRODUCT_ID (0x7801)
65 #define LAN78XX_EEPROM_MAGIC (0x78A5)
66 #define LAN78XX_OTP_MAGIC (0x78F3)
67 #define AT29M2AF_USB_VENDOR_ID (0x07C9)
68 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
73 #define EEPROM_INDICATOR (0xA5)
74 #define EEPROM_MAC_OFFSET (0x01)
75 #define MAX_EEPROM_SIZE 512
76 #define OTP_INDICATOR_1 (0xF3)
77 #define OTP_INDICATOR_2 (0xF7)
79 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
80 WAKE_MCAST | WAKE_BCAST | \
81 WAKE_ARP | WAKE_MAGIC)
83 /* USB related defines */
84 #define BULK_IN_PIPE 1
85 #define BULK_OUT_PIPE 2
87 /* default autosuspend delay (mSec)*/
88 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
90 /* statistic update interval (mSec) */
91 #define STAT_UPDATE_TIMER (1 * 1000)
93 /* time to wait for MAC or FCT to stop (jiffies) */
94 #define HW_DISABLE_TIMEOUT (HZ / 10)
96 /* time to wait between polling MAC or FCT state (ms) */
97 #define HW_DISABLE_DELAY_MS 1
99 /* defines interrupts from interrupt EP */
100 #define MAX_INT_EP (32)
101 #define INT_EP_INTEP (31)
102 #define INT_EP_OTP_WR_DONE (28)
103 #define INT_EP_EEE_TX_LPI_START (26)
104 #define INT_EP_EEE_TX_LPI_STOP (25)
105 #define INT_EP_EEE_RX_LPI (24)
106 #define INT_EP_MAC_RESET_TIMEOUT (23)
107 #define INT_EP_RDFO (22)
108 #define INT_EP_TXE (21)
109 #define INT_EP_USB_STATUS (20)
110 #define INT_EP_TX_DIS (19)
111 #define INT_EP_RX_DIS (18)
112 #define INT_EP_PHY (17)
113 #define INT_EP_DP (16)
114 #define INT_EP_MAC_ERR (15)
115 #define INT_EP_TDFU (14)
116 #define INT_EP_TDFO (13)
117 #define INT_EP_UTX (12)
118 #define INT_EP_GPIO_11 (11)
119 #define INT_EP_GPIO_10 (10)
120 #define INT_EP_GPIO_9 (9)
121 #define INT_EP_GPIO_8 (8)
122 #define INT_EP_GPIO_7 (7)
123 #define INT_EP_GPIO_6 (6)
124 #define INT_EP_GPIO_5 (5)
125 #define INT_EP_GPIO_4 (4)
126 #define INT_EP_GPIO_3 (3)
127 #define INT_EP_GPIO_2 (2)
128 #define INT_EP_GPIO_1 (1)
129 #define INT_EP_GPIO_0 (0)
131 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
133 "RX Alignment Errors",
134 "Rx Fragment Errors",
136 "RX Undersize Frame Errors",
137 "RX Oversize Frame Errors",
139 "RX Unicast Byte Count",
140 "RX Broadcast Byte Count",
141 "RX Multicast Byte Count",
143 "RX Broadcast Frames",
144 "RX Multicast Frames",
147 "RX 65 - 127 Byte Frames",
148 "RX 128 - 255 Byte Frames",
149 "RX 256 - 511 Bytes Frames",
150 "RX 512 - 1023 Byte Frames",
151 "RX 1024 - 1518 Byte Frames",
152 "RX Greater 1518 Byte Frames",
153 "EEE RX LPI Transitions",
156 "TX Excess Deferral Errors",
159 "TX Single Collisions",
160 "TX Multiple Collisions",
161 "TX Excessive Collision",
162 "TX Late Collisions",
163 "TX Unicast Byte Count",
164 "TX Broadcast Byte Count",
165 "TX Multicast Byte Count",
167 "TX Broadcast Frames",
168 "TX Multicast Frames",
171 "TX 65 - 127 Byte Frames",
172 "TX 128 - 255 Byte Frames",
173 "TX 256 - 511 Bytes Frames",
174 "TX 512 - 1023 Byte Frames",
175 "TX 1024 - 1518 Byte Frames",
176 "TX Greater 1518 Byte Frames",
177 "EEE TX LPI Transitions",
181 struct lan78xx_statstage {
183 u32 rx_alignment_errors;
184 u32 rx_fragment_errors;
185 u32 rx_jabber_errors;
186 u32 rx_undersize_frame_errors;
187 u32 rx_oversize_frame_errors;
188 u32 rx_dropped_frames;
189 u32 rx_unicast_byte_count;
190 u32 rx_broadcast_byte_count;
191 u32 rx_multicast_byte_count;
192 u32 rx_unicast_frames;
193 u32 rx_broadcast_frames;
194 u32 rx_multicast_frames;
196 u32 rx_64_byte_frames;
197 u32 rx_65_127_byte_frames;
198 u32 rx_128_255_byte_frames;
199 u32 rx_256_511_bytes_frames;
200 u32 rx_512_1023_byte_frames;
201 u32 rx_1024_1518_byte_frames;
202 u32 rx_greater_1518_byte_frames;
203 u32 eee_rx_lpi_transitions;
206 u32 tx_excess_deferral_errors;
207 u32 tx_carrier_errors;
208 u32 tx_bad_byte_count;
209 u32 tx_single_collisions;
210 u32 tx_multiple_collisions;
211 u32 tx_excessive_collision;
212 u32 tx_late_collisions;
213 u32 tx_unicast_byte_count;
214 u32 tx_broadcast_byte_count;
215 u32 tx_multicast_byte_count;
216 u32 tx_unicast_frames;
217 u32 tx_broadcast_frames;
218 u32 tx_multicast_frames;
220 u32 tx_64_byte_frames;
221 u32 tx_65_127_byte_frames;
222 u32 tx_128_255_byte_frames;
223 u32 tx_256_511_bytes_frames;
224 u32 tx_512_1023_byte_frames;
225 u32 tx_1024_1518_byte_frames;
226 u32 tx_greater_1518_byte_frames;
227 u32 eee_tx_lpi_transitions;
231 struct lan78xx_statstage64 {
233 u64 rx_alignment_errors;
234 u64 rx_fragment_errors;
235 u64 rx_jabber_errors;
236 u64 rx_undersize_frame_errors;
237 u64 rx_oversize_frame_errors;
238 u64 rx_dropped_frames;
239 u64 rx_unicast_byte_count;
240 u64 rx_broadcast_byte_count;
241 u64 rx_multicast_byte_count;
242 u64 rx_unicast_frames;
243 u64 rx_broadcast_frames;
244 u64 rx_multicast_frames;
246 u64 rx_64_byte_frames;
247 u64 rx_65_127_byte_frames;
248 u64 rx_128_255_byte_frames;
249 u64 rx_256_511_bytes_frames;
250 u64 rx_512_1023_byte_frames;
251 u64 rx_1024_1518_byte_frames;
252 u64 rx_greater_1518_byte_frames;
253 u64 eee_rx_lpi_transitions;
256 u64 tx_excess_deferral_errors;
257 u64 tx_carrier_errors;
258 u64 tx_bad_byte_count;
259 u64 tx_single_collisions;
260 u64 tx_multiple_collisions;
261 u64 tx_excessive_collision;
262 u64 tx_late_collisions;
263 u64 tx_unicast_byte_count;
264 u64 tx_broadcast_byte_count;
265 u64 tx_multicast_byte_count;
266 u64 tx_unicast_frames;
267 u64 tx_broadcast_frames;
268 u64 tx_multicast_frames;
270 u64 tx_64_byte_frames;
271 u64 tx_65_127_byte_frames;
272 u64 tx_128_255_byte_frames;
273 u64 tx_256_511_bytes_frames;
274 u64 tx_512_1023_byte_frames;
275 u64 tx_1024_1518_byte_frames;
276 u64 tx_greater_1518_byte_frames;
277 u64 eee_tx_lpi_transitions;
281 static u32 lan78xx_regs[] = {
303 #define PHY_REG_SIZE (32 * sizeof(u32))
307 struct lan78xx_priv {
308 struct lan78xx_net *dev;
310 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
311 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
312 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
313 struct mutex dataport_mutex; /* for dataport access */
314 spinlock_t rfe_ctl_lock; /* for rfe register access */
315 struct work_struct set_multicast;
316 struct work_struct set_vlan;
330 struct skb_data { /* skb->cb is one of these */
332 struct lan78xx_net *dev;
333 enum skb_state state;
339 struct usb_ctrlrequest req;
340 struct lan78xx_net *dev;
343 #define EVENT_TX_HALT 0
344 #define EVENT_RX_HALT 1
345 #define EVENT_RX_MEMORY 2
346 #define EVENT_STS_SPLIT 3
347 #define EVENT_LINK_RESET 4
348 #define EVENT_RX_PAUSED 5
349 #define EVENT_DEV_WAKING 6
350 #define EVENT_DEV_ASLEEP 7
351 #define EVENT_DEV_OPEN 8
352 #define EVENT_STAT_UPDATE 9
355 struct mutex access_lock; /* for stats access */
356 struct lan78xx_statstage saved;
357 struct lan78xx_statstage rollover_count;
358 struct lan78xx_statstage rollover_max;
359 struct lan78xx_statstage64 curr_stat;
362 struct irq_domain_data {
363 struct irq_domain *irqdomain;
365 struct irq_chip *irqchip;
366 irq_flow_handler_t irq_handler;
368 struct mutex irq_lock; /* for irq bus access */
372 struct net_device *net;
373 struct usb_device *udev;
374 struct usb_interface *intf;
379 struct sk_buff_head rxq;
380 struct sk_buff_head txq;
381 struct sk_buff_head done;
382 struct sk_buff_head rxq_pause;
383 struct sk_buff_head txq_pend;
385 struct tasklet_struct bh;
386 struct delayed_work wq;
390 struct urb *urb_intr;
391 struct usb_anchor deferred;
393 struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */
394 struct mutex phy_mutex; /* for phy access */
395 unsigned int pipe_in, pipe_out, pipe_intr;
397 u32 hard_mtu; /* count any extra framing */
398 size_t rx_urb_size; /* size for rx urbs */
402 wait_queue_head_t *wait;
403 unsigned char suspend_count;
405 unsigned int maxpacket;
406 struct timer_list delay;
407 struct timer_list stat_monitor;
409 unsigned long data[5];
416 struct mii_bus *mdiobus;
417 phy_interface_t interface;
420 u8 fc_request_control;
423 struct statstage stats;
425 struct irq_domain_data domain_data;
428 /* define external phy id */
429 #define PHY_LAN8835 (0x0007C130)
430 #define PHY_KSZ9031RNX (0x00221620)
432 /* use ethtool to change the level for any given device */
433 static int msg_level = -1;
434 module_param(msg_level, int, 0);
435 MODULE_PARM_DESC(msg_level, "Override default message level");
437 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
439 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
445 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
446 USB_VENDOR_REQUEST_READ_REGISTER,
447 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
448 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
449 if (likely(ret >= 0)) {
453 netdev_warn(dev->net,
454 "Failed to read register index 0x%08x. ret = %d",
463 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
465 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
474 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
475 USB_VENDOR_REQUEST_WRITE_REGISTER,
476 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
477 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
478 if (unlikely(ret < 0)) {
479 netdev_warn(dev->net,
480 "Failed to write register index 0x%08x. ret = %d",
489 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
495 ret = lan78xx_read_reg(dev, reg, &buf);
500 buf |= (mask & data);
502 ret = lan78xx_write_reg(dev, reg, buf);
509 static int lan78xx_read_stats(struct lan78xx_net *dev,
510 struct lan78xx_statstage *data)
514 struct lan78xx_statstage *stats;
518 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
522 ret = usb_control_msg(dev->udev,
523 usb_rcvctrlpipe(dev->udev, 0),
524 USB_VENDOR_REQUEST_GET_STATS,
525 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
530 USB_CTRL_SET_TIMEOUT);
531 if (likely(ret >= 0)) {
534 for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
535 le32_to_cpus(&src[i]);
539 netdev_warn(dev->net,
540 "Failed to read stat ret = %d", ret);
548 #define check_counter_rollover(struct1, dev_stats, member) \
550 if ((struct1)->member < (dev_stats).saved.member) \
551 (dev_stats).rollover_count.member++; \
554 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
555 struct lan78xx_statstage *stats)
557 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
558 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
559 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
560 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
561 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
562 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
563 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
564 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
565 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
566 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
567 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
568 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
569 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
570 check_counter_rollover(stats, dev->stats, rx_pause_frames);
571 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
572 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
573 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
574 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
575 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
576 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
577 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
578 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
579 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
580 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
581 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
582 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
583 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
584 check_counter_rollover(stats, dev->stats, tx_single_collisions);
585 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
586 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
587 check_counter_rollover(stats, dev->stats, tx_late_collisions);
588 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
589 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
590 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
591 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
592 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
593 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
594 check_counter_rollover(stats, dev->stats, tx_pause_frames);
595 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
596 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
597 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
598 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
599 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
600 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
601 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
602 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
603 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
605 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
608 static void lan78xx_update_stats(struct lan78xx_net *dev)
610 u32 *p, *count, *max;
613 struct lan78xx_statstage lan78xx_stats;
615 if (usb_autopm_get_interface(dev->intf) < 0)
618 p = (u32 *)&lan78xx_stats;
619 count = (u32 *)&dev->stats.rollover_count;
620 max = (u32 *)&dev->stats.rollover_max;
621 data = (u64 *)&dev->stats.curr_stat;
623 mutex_lock(&dev->stats.access_lock);
625 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
626 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
628 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
629 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
631 mutex_unlock(&dev->stats.access_lock);
633 usb_autopm_put_interface(dev->intf);
636 /* Loop until the read is completed with timeout called with phy_mutex held */
637 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
639 unsigned long start_time = jiffies;
644 ret = lan78xx_read_reg(dev, MII_ACC, &val);
645 if (unlikely(ret < 0))
648 if (!(val & MII_ACC_MII_BUSY_))
650 } while (!time_after(jiffies, start_time + HZ));
655 static inline u32 mii_access(int id, int index, int read)
659 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
660 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
662 ret |= MII_ACC_MII_READ_;
664 ret |= MII_ACC_MII_WRITE_;
665 ret |= MII_ACC_MII_BUSY_;
670 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
672 unsigned long start_time = jiffies;
677 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
678 if (unlikely(ret < 0))
681 if (!(val & E2P_CMD_EPC_BUSY_) ||
682 (val & E2P_CMD_EPC_TIMEOUT_))
684 usleep_range(40, 100);
685 } while (!time_after(jiffies, start_time + HZ));
687 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
688 netdev_warn(dev->net, "EEPROM read operation timeout");
695 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
697 unsigned long start_time = jiffies;
702 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
703 if (unlikely(ret < 0))
706 if (!(val & E2P_CMD_EPC_BUSY_))
709 usleep_range(40, 100);
710 } while (!time_after(jiffies, start_time + HZ));
712 netdev_warn(dev->net, "EEPROM is busy");
716 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
717 u32 length, u8 *data)
724 /* depends on chip, some EEPROM pins are muxed with LED function.
725 * disable & restore LED function to access EEPROM.
727 ret = lan78xx_read_reg(dev, HW_CFG, &val);
729 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
730 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
731 ret = lan78xx_write_reg(dev, HW_CFG, val);
734 retval = lan78xx_eeprom_confirm_not_busy(dev);
738 for (i = 0; i < length; i++) {
739 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
740 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
741 ret = lan78xx_write_reg(dev, E2P_CMD, val);
742 if (unlikely(ret < 0)) {
747 retval = lan78xx_wait_eeprom(dev);
751 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
752 if (unlikely(ret < 0)) {
757 data[i] = val & 0xFF;
763 if (dev->chipid == ID_REV_CHIP_ID_7800_)
764 ret = lan78xx_write_reg(dev, HW_CFG, saved);
769 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
770 u32 length, u8 *data)
775 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
776 if ((ret == 0) && (sig == EEPROM_INDICATOR))
777 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
784 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
785 u32 length, u8 *data)
792 /* depends on chip, some EEPROM pins are muxed with LED function.
793 * disable & restore LED function to access EEPROM.
795 ret = lan78xx_read_reg(dev, HW_CFG, &val);
797 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
798 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
799 ret = lan78xx_write_reg(dev, HW_CFG, val);
802 retval = lan78xx_eeprom_confirm_not_busy(dev);
806 /* Issue write/erase enable command */
807 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
808 ret = lan78xx_write_reg(dev, E2P_CMD, val);
809 if (unlikely(ret < 0)) {
814 retval = lan78xx_wait_eeprom(dev);
818 for (i = 0; i < length; i++) {
819 /* Fill data register */
821 ret = lan78xx_write_reg(dev, E2P_DATA, val);
827 /* Send "write" command */
828 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
829 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
830 ret = lan78xx_write_reg(dev, E2P_CMD, val);
836 retval = lan78xx_wait_eeprom(dev);
845 if (dev->chipid == ID_REV_CHIP_ID_7800_)
846 ret = lan78xx_write_reg(dev, HW_CFG, saved);
851 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
852 u32 length, u8 *data)
856 unsigned long timeout;
858 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
860 if (buf & OTP_PWR_DN_PWRDN_N_) {
861 /* clear it and wait to be cleared */
862 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
864 timeout = jiffies + HZ;
867 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
868 if (time_after(jiffies, timeout)) {
869 netdev_warn(dev->net,
870 "timeout on OTP_PWR_DN");
873 } while (buf & OTP_PWR_DN_PWRDN_N_);
876 for (i = 0; i < length; i++) {
877 lan78xx_write_reg(dev, OTP_ADDR1,
878 ((offset + i) >> 8) & OTP_ADDR1_15_11);
879 lan78xx_write_reg(dev, OTP_ADDR2,
880 ((offset + i) & OTP_ADDR2_10_3));
882 lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
883 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
885 timeout = jiffies + HZ;
888 lan78xx_read_reg(dev, OTP_STATUS, &buf);
889 if (time_after(jiffies, timeout)) {
890 netdev_warn(dev->net,
891 "timeout on OTP_STATUS");
894 } while (buf & OTP_STATUS_BUSY_);
896 lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
898 data[i] = (u8)(buf & 0xFF);
904 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
905 u32 length, u8 *data)
909 unsigned long timeout;
911 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
913 if (buf & OTP_PWR_DN_PWRDN_N_) {
914 /* clear it and wait to be cleared */
915 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
917 timeout = jiffies + HZ;
920 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
921 if (time_after(jiffies, timeout)) {
922 netdev_warn(dev->net,
923 "timeout on OTP_PWR_DN completion");
926 } while (buf & OTP_PWR_DN_PWRDN_N_);
929 /* set to BYTE program mode */
930 lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
932 for (i = 0; i < length; i++) {
933 lan78xx_write_reg(dev, OTP_ADDR1,
934 ((offset + i) >> 8) & OTP_ADDR1_15_11);
935 lan78xx_write_reg(dev, OTP_ADDR2,
936 ((offset + i) & OTP_ADDR2_10_3));
937 lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
938 lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
939 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
941 timeout = jiffies + HZ;
944 lan78xx_read_reg(dev, OTP_STATUS, &buf);
945 if (time_after(jiffies, timeout)) {
946 netdev_warn(dev->net,
947 "Timeout on OTP_STATUS completion");
950 } while (buf & OTP_STATUS_BUSY_);
956 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
957 u32 length, u8 *data)
962 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
965 if (sig == OTP_INDICATOR_2)
967 else if (sig != OTP_INDICATOR_1)
970 ret = lan78xx_read_raw_otp(dev, offset, length, data);
976 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
980 for (i = 0; i < 100; i++) {
983 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
984 if (unlikely(ret < 0))
987 if (dp_sel & DP_SEL_DPRDY_)
990 usleep_range(40, 100);
993 netdev_warn(dev->net, "%s timed out", __func__);
998 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
999 u32 addr, u32 length, u32 *buf)
1001 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1005 if (usb_autopm_get_interface(dev->intf) < 0)
1008 mutex_lock(&pdata->dataport_mutex);
1010 ret = lan78xx_dataport_wait_not_busy(dev);
1014 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1016 dp_sel &= ~DP_SEL_RSEL_MASK_;
1017 dp_sel |= ram_select;
1018 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1020 for (i = 0; i < length; i++) {
1021 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1023 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1025 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1027 ret = lan78xx_dataport_wait_not_busy(dev);
1033 mutex_unlock(&pdata->dataport_mutex);
1034 usb_autopm_put_interface(dev->intf);
1039 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1040 int index, u8 addr[ETH_ALEN])
1044 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1046 temp = addr[2] | (temp << 8);
1047 temp = addr[1] | (temp << 8);
1048 temp = addr[0] | (temp << 8);
1049 pdata->pfilter_table[index][1] = temp;
1051 temp = addr[4] | (temp << 8);
1052 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1053 pdata->pfilter_table[index][0] = temp;
1057 /* returns hash bit number for given MAC address */
1058 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1060 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1063 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1065 struct lan78xx_priv *pdata =
1066 container_of(param, struct lan78xx_priv, set_multicast);
1067 struct lan78xx_net *dev = pdata->dev;
1070 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1073 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1074 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1076 for (i = 1; i < NUM_OF_MAF; i++) {
1077 lan78xx_write_reg(dev, MAF_HI(i), 0);
1078 lan78xx_write_reg(dev, MAF_LO(i),
1079 pdata->pfilter_table[i][1]);
1080 lan78xx_write_reg(dev, MAF_HI(i),
1081 pdata->pfilter_table[i][0]);
1084 lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1087 static void lan78xx_set_multicast(struct net_device *netdev)
1089 struct lan78xx_net *dev = netdev_priv(netdev);
1090 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1091 unsigned long flags;
1094 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1096 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1097 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1099 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1100 pdata->mchash_table[i] = 0;
1102 /* pfilter_table[0] has own HW address */
1103 for (i = 1; i < NUM_OF_MAF; i++) {
1104 pdata->pfilter_table[i][0] = 0;
1105 pdata->pfilter_table[i][1] = 0;
1108 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1110 if (dev->net->flags & IFF_PROMISC) {
1111 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1112 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1114 if (dev->net->flags & IFF_ALLMULTI) {
1115 netif_dbg(dev, drv, dev->net,
1116 "receive all multicast enabled");
1117 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1121 if (netdev_mc_count(dev->net)) {
1122 struct netdev_hw_addr *ha;
1125 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1127 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1130 netdev_for_each_mc_addr(ha, netdev) {
1131 /* set first 32 into Perfect Filter */
1133 lan78xx_set_addr_filter(pdata, i, ha->addr);
1135 u32 bitnum = lan78xx_hash(ha->addr);
1137 pdata->mchash_table[bitnum / 32] |=
1138 (1 << (bitnum % 32));
1139 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1145 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1147 /* defer register writes to a sleepable context */
1148 schedule_work(&pdata->set_multicast);
1151 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1152 u16 lcladv, u16 rmtadv)
1154 u32 flow = 0, fct_flow = 0;
1157 if (dev->fc_autoneg)
1158 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1160 cap = dev->fc_request_control;
1162 if (cap & FLOW_CTRL_TX)
1163 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1165 if (cap & FLOW_CTRL_RX)
1166 flow |= FLOW_CR_RX_FCEN_;
1168 if (dev->udev->speed == USB_SPEED_SUPER)
1170 else if (dev->udev->speed == USB_SPEED_HIGH)
1173 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1174 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1175 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1177 lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1179 /* threshold value should be set before enabling flow */
1180 lan78xx_write_reg(dev, FLOW, flow);
1185 static int lan78xx_link_reset(struct lan78xx_net *dev)
1187 struct phy_device *phydev = dev->net->phydev;
1188 struct ethtool_link_ksettings ecmd;
1189 int ladv, radv, ret, link;
1192 /* clear LAN78xx interrupt status */
1193 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1194 if (unlikely(ret < 0))
1197 mutex_lock(&phydev->lock);
1198 phy_read_status(phydev);
1199 link = phydev->link;
1200 mutex_unlock(&phydev->lock);
1202 if (!link && dev->link_on) {
1203 dev->link_on = false;
1206 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1207 if (unlikely(ret < 0))
1210 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1211 if (unlikely(ret < 0))
1214 del_timer(&dev->stat_monitor);
1215 } else if (link && !dev->link_on) {
1216 dev->link_on = true;
1218 phy_ethtool_ksettings_get(phydev, &ecmd);
1220 if (dev->udev->speed == USB_SPEED_SUPER) {
1221 if (ecmd.base.speed == 1000) {
1223 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1226 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1227 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1231 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1234 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1235 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1239 /* enable U1 & U2 */
1240 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1243 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1244 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1245 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1251 ladv = phy_read(phydev, MII_ADVERTISE);
1255 radv = phy_read(phydev, MII_LPA);
1259 netif_dbg(dev, link, dev->net,
1260 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1261 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1263 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1268 if (!timer_pending(&dev->stat_monitor)) {
1270 mod_timer(&dev->stat_monitor,
1271 jiffies + STAT_UPDATE_TIMER);
1274 tasklet_schedule(&dev->bh);
1280 /* some work can't be done in tasklets, so we use keventd
1282 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1283 * but tasklet_schedule() doesn't. hope the failure is rare.
1285 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1287 set_bit(work, &dev->flags);
1288 if (!schedule_delayed_work(&dev->wq, 0))
1289 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1292 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1296 if (urb->actual_length != 4) {
1297 netdev_warn(dev->net,
1298 "unexpected urb length %d", urb->actual_length);
1302 intdata = get_unaligned_le32(urb->transfer_buffer);
1304 if (intdata & INT_ENP_PHY_INT) {
1305 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1306 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1308 if (dev->domain_data.phyirq > 0) {
1309 local_irq_disable();
1310 generic_handle_irq(dev->domain_data.phyirq);
1314 netdev_warn(dev->net,
1315 "unexpected interrupt: 0x%08x\n", intdata);
1319 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1321 return MAX_EEPROM_SIZE;
1324 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1325 struct ethtool_eeprom *ee, u8 *data)
1327 struct lan78xx_net *dev = netdev_priv(netdev);
1330 ret = usb_autopm_get_interface(dev->intf);
1334 ee->magic = LAN78XX_EEPROM_MAGIC;
1336 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1338 usb_autopm_put_interface(dev->intf);
1343 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1344 struct ethtool_eeprom *ee, u8 *data)
1346 struct lan78xx_net *dev = netdev_priv(netdev);
1349 ret = usb_autopm_get_interface(dev->intf);
1353 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1354 * to load data from EEPROM
1356 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1357 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1358 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1359 (ee->offset == 0) &&
1361 (data[0] == OTP_INDICATOR_1))
1362 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1364 usb_autopm_put_interface(dev->intf);
1369 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1372 if (stringset == ETH_SS_STATS)
1373 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1376 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1378 if (sset == ETH_SS_STATS)
1379 return ARRAY_SIZE(lan78xx_gstrings);
1384 static void lan78xx_get_stats(struct net_device *netdev,
1385 struct ethtool_stats *stats, u64 *data)
1387 struct lan78xx_net *dev = netdev_priv(netdev);
1389 lan78xx_update_stats(dev);
1391 mutex_lock(&dev->stats.access_lock);
1392 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1393 mutex_unlock(&dev->stats.access_lock);
1396 static void lan78xx_get_wol(struct net_device *netdev,
1397 struct ethtool_wolinfo *wol)
1399 struct lan78xx_net *dev = netdev_priv(netdev);
1402 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1404 if (usb_autopm_get_interface(dev->intf) < 0)
1407 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1408 if (unlikely(ret < 0)) {
1412 if (buf & USB_CFG_RMT_WKP_) {
1413 wol->supported = WAKE_ALL;
1414 wol->wolopts = pdata->wol;
1421 usb_autopm_put_interface(dev->intf);
1424 static int lan78xx_set_wol(struct net_device *netdev,
1425 struct ethtool_wolinfo *wol)
1427 struct lan78xx_net *dev = netdev_priv(netdev);
1428 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1431 ret = usb_autopm_get_interface(dev->intf);
1435 if (wol->wolopts & ~WAKE_ALL)
1438 pdata->wol = wol->wolopts;
1440 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1442 phy_ethtool_set_wol(netdev->phydev, wol);
1444 usb_autopm_put_interface(dev->intf);
1449 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1451 struct lan78xx_net *dev = netdev_priv(net);
1452 struct phy_device *phydev = net->phydev;
1456 ret = usb_autopm_get_interface(dev->intf);
1460 ret = phy_ethtool_get_eee(phydev, edata);
1464 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1465 if (buf & MAC_CR_EEE_EN_) {
1466 edata->eee_enabled = true;
1467 edata->eee_active = !!(edata->advertised &
1468 edata->lp_advertised);
1469 edata->tx_lpi_enabled = true;
1470 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1471 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1472 edata->tx_lpi_timer = buf;
1474 edata->eee_enabled = false;
1475 edata->eee_active = false;
1476 edata->tx_lpi_enabled = false;
1477 edata->tx_lpi_timer = 0;
1482 usb_autopm_put_interface(dev->intf);
1487 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1489 struct lan78xx_net *dev = netdev_priv(net);
1493 ret = usb_autopm_get_interface(dev->intf);
1497 if (edata->eee_enabled) {
1498 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1499 buf |= MAC_CR_EEE_EN_;
1500 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1502 phy_ethtool_set_eee(net->phydev, edata);
1504 buf = (u32)edata->tx_lpi_timer;
1505 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1507 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1508 buf &= ~MAC_CR_EEE_EN_;
1509 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1512 usb_autopm_put_interface(dev->intf);
1517 static u32 lan78xx_get_link(struct net_device *net)
1521 mutex_lock(&net->phydev->lock);
1522 phy_read_status(net->phydev);
1523 link = net->phydev->link;
1524 mutex_unlock(&net->phydev->lock);
1529 static void lan78xx_get_drvinfo(struct net_device *net,
1530 struct ethtool_drvinfo *info)
1532 struct lan78xx_net *dev = netdev_priv(net);
1534 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1535 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1538 static u32 lan78xx_get_msglevel(struct net_device *net)
1540 struct lan78xx_net *dev = netdev_priv(net);
1542 return dev->msg_enable;
1545 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1547 struct lan78xx_net *dev = netdev_priv(net);
1549 dev->msg_enable = level;
1552 static int lan78xx_get_link_ksettings(struct net_device *net,
1553 struct ethtool_link_ksettings *cmd)
1555 struct lan78xx_net *dev = netdev_priv(net);
1556 struct phy_device *phydev = net->phydev;
1559 ret = usb_autopm_get_interface(dev->intf);
1563 phy_ethtool_ksettings_get(phydev, cmd);
1565 usb_autopm_put_interface(dev->intf);
1570 static int lan78xx_set_link_ksettings(struct net_device *net,
1571 const struct ethtool_link_ksettings *cmd)
1573 struct lan78xx_net *dev = netdev_priv(net);
1574 struct phy_device *phydev = net->phydev;
1578 ret = usb_autopm_get_interface(dev->intf);
1582 /* change speed & duplex */
1583 ret = phy_ethtool_ksettings_set(phydev, cmd);
1585 if (!cmd->base.autoneg) {
1586 /* force link down */
1587 temp = phy_read(phydev, MII_BMCR);
1588 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1590 phy_write(phydev, MII_BMCR, temp);
1593 usb_autopm_put_interface(dev->intf);
1598 static void lan78xx_get_pause(struct net_device *net,
1599 struct ethtool_pauseparam *pause)
1601 struct lan78xx_net *dev = netdev_priv(net);
1602 struct phy_device *phydev = net->phydev;
1603 struct ethtool_link_ksettings ecmd;
1605 phy_ethtool_ksettings_get(phydev, &ecmd);
1607 pause->autoneg = dev->fc_autoneg;
1609 if (dev->fc_request_control & FLOW_CTRL_TX)
1610 pause->tx_pause = 1;
1612 if (dev->fc_request_control & FLOW_CTRL_RX)
1613 pause->rx_pause = 1;
1616 static int lan78xx_set_pause(struct net_device *net,
1617 struct ethtool_pauseparam *pause)
1619 struct lan78xx_net *dev = netdev_priv(net);
1620 struct phy_device *phydev = net->phydev;
1621 struct ethtool_link_ksettings ecmd;
1624 phy_ethtool_ksettings_get(phydev, &ecmd);
1626 if (pause->autoneg && !ecmd.base.autoneg) {
1631 dev->fc_request_control = 0;
1632 if (pause->rx_pause)
1633 dev->fc_request_control |= FLOW_CTRL_RX;
1635 if (pause->tx_pause)
1636 dev->fc_request_control |= FLOW_CTRL_TX;
1638 if (ecmd.base.autoneg) {
1639 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1642 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1643 ecmd.link_modes.advertising);
1644 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1645 ecmd.link_modes.advertising);
1646 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1647 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1648 linkmode_or(ecmd.link_modes.advertising, fc,
1649 ecmd.link_modes.advertising);
1651 phy_ethtool_ksettings_set(phydev, &ecmd);
1654 dev->fc_autoneg = pause->autoneg;
1661 static int lan78xx_get_regs_len(struct net_device *netdev)
1663 if (!netdev->phydev)
1664 return (sizeof(lan78xx_regs));
1666 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1670 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1675 struct lan78xx_net *dev = netdev_priv(netdev);
1677 /* Read Device/MAC registers */
1678 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1679 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1681 if (!netdev->phydev)
1684 /* Read PHY registers */
1685 for (j = 0; j < 32; i++, j++)
1686 data[i] = phy_read(netdev->phydev, j);
1689 static const struct ethtool_ops lan78xx_ethtool_ops = {
1690 .get_link = lan78xx_get_link,
1691 .nway_reset = phy_ethtool_nway_reset,
1692 .get_drvinfo = lan78xx_get_drvinfo,
1693 .get_msglevel = lan78xx_get_msglevel,
1694 .set_msglevel = lan78xx_set_msglevel,
1695 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1696 .get_eeprom = lan78xx_ethtool_get_eeprom,
1697 .set_eeprom = lan78xx_ethtool_set_eeprom,
1698 .get_ethtool_stats = lan78xx_get_stats,
1699 .get_sset_count = lan78xx_get_sset_count,
1700 .get_strings = lan78xx_get_strings,
1701 .get_wol = lan78xx_get_wol,
1702 .set_wol = lan78xx_set_wol,
1703 .get_eee = lan78xx_get_eee,
1704 .set_eee = lan78xx_set_eee,
1705 .get_pauseparam = lan78xx_get_pause,
1706 .set_pauseparam = lan78xx_set_pause,
1707 .get_link_ksettings = lan78xx_get_link_ksettings,
1708 .set_link_ksettings = lan78xx_set_link_ksettings,
1709 .get_regs_len = lan78xx_get_regs_len,
1710 .get_regs = lan78xx_get_regs,
1713 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1715 u32 addr_lo, addr_hi;
1718 lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1719 lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1721 addr[0] = addr_lo & 0xFF;
1722 addr[1] = (addr_lo >> 8) & 0xFF;
1723 addr[2] = (addr_lo >> 16) & 0xFF;
1724 addr[3] = (addr_lo >> 24) & 0xFF;
1725 addr[4] = addr_hi & 0xFF;
1726 addr[5] = (addr_hi >> 8) & 0xFF;
1728 if (!is_valid_ether_addr(addr)) {
1729 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1730 /* valid address present in Device Tree */
1731 netif_dbg(dev, ifup, dev->net,
1732 "MAC address read from Device Tree");
1733 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1734 ETH_ALEN, addr) == 0) ||
1735 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1736 ETH_ALEN, addr) == 0)) &&
1737 is_valid_ether_addr(addr)) {
1738 /* eeprom values are valid so use them */
1739 netif_dbg(dev, ifup, dev->net,
1740 "MAC address read from EEPROM");
1742 /* generate random MAC */
1743 eth_random_addr(addr);
1744 netif_dbg(dev, ifup, dev->net,
1745 "MAC address set to random addr");
1748 addr_lo = addr[0] | (addr[1] << 8) |
1749 (addr[2] << 16) | (addr[3] << 24);
1750 addr_hi = addr[4] | (addr[5] << 8);
1752 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1753 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1756 lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1757 lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1759 ether_addr_copy(dev->net->dev_addr, addr);
1762 /* MDIO read and write wrappers for phylib */
1763 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1765 struct lan78xx_net *dev = bus->priv;
1769 ret = usb_autopm_get_interface(dev->intf);
1773 mutex_lock(&dev->phy_mutex);
1775 /* confirm MII not busy */
1776 ret = lan78xx_phy_wait_not_busy(dev);
1780 /* set the address, index & direction (read from PHY) */
1781 addr = mii_access(phy_id, idx, MII_READ);
1782 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1784 ret = lan78xx_phy_wait_not_busy(dev);
1788 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1790 ret = (int)(val & 0xFFFF);
1793 mutex_unlock(&dev->phy_mutex);
1794 usb_autopm_put_interface(dev->intf);
1799 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1802 struct lan78xx_net *dev = bus->priv;
1806 ret = usb_autopm_get_interface(dev->intf);
1810 mutex_lock(&dev->phy_mutex);
1812 /* confirm MII not busy */
1813 ret = lan78xx_phy_wait_not_busy(dev);
1818 ret = lan78xx_write_reg(dev, MII_DATA, val);
1820 /* set the address, index & direction (write to PHY) */
1821 addr = mii_access(phy_id, idx, MII_WRITE);
1822 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1824 ret = lan78xx_phy_wait_not_busy(dev);
1829 mutex_unlock(&dev->phy_mutex);
1830 usb_autopm_put_interface(dev->intf);
1834 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1836 struct device_node *node;
1839 dev->mdiobus = mdiobus_alloc();
1840 if (!dev->mdiobus) {
1841 netdev_err(dev->net, "can't allocate MDIO bus\n");
1845 dev->mdiobus->priv = (void *)dev;
1846 dev->mdiobus->read = lan78xx_mdiobus_read;
1847 dev->mdiobus->write = lan78xx_mdiobus_write;
1848 dev->mdiobus->name = "lan78xx-mdiobus";
1849 dev->mdiobus->parent = &dev->udev->dev;
1851 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1852 dev->udev->bus->busnum, dev->udev->devnum);
1854 switch (dev->chipid) {
1855 case ID_REV_CHIP_ID_7800_:
1856 case ID_REV_CHIP_ID_7850_:
1857 /* set to internal PHY id */
1858 dev->mdiobus->phy_mask = ~(1 << 1);
1860 case ID_REV_CHIP_ID_7801_:
1861 /* scan thru PHYAD[2..0] */
1862 dev->mdiobus->phy_mask = ~(0xFF);
1866 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1867 ret = of_mdiobus_register(dev->mdiobus, node);
1870 netdev_err(dev->net, "can't register MDIO bus\n");
1874 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1877 mdiobus_free(dev->mdiobus);
1881 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1883 mdiobus_unregister(dev->mdiobus);
1884 mdiobus_free(dev->mdiobus);
1887 static void lan78xx_link_status_change(struct net_device *net)
1889 struct phy_device *phydev = net->phydev;
1891 phy_print_status(phydev);
1894 static int irq_map(struct irq_domain *d, unsigned int irq,
1895 irq_hw_number_t hwirq)
1897 struct irq_domain_data *data = d->host_data;
1899 irq_set_chip_data(irq, data);
1900 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1901 irq_set_noprobe(irq);
1906 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1908 irq_set_chip_and_handler(irq, NULL, NULL);
1909 irq_set_chip_data(irq, NULL);
1912 static const struct irq_domain_ops chip_domain_ops = {
1917 static void lan78xx_irq_mask(struct irq_data *irqd)
1919 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1921 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1924 static void lan78xx_irq_unmask(struct irq_data *irqd)
1926 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1928 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1931 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1933 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1935 mutex_lock(&data->irq_lock);
1938 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1940 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1941 struct lan78xx_net *dev =
1942 container_of(data, struct lan78xx_net, domain_data);
1945 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1946 * are only two callbacks executed in non-atomic contex.
1948 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1949 if (buf != data->irqenable)
1950 lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1952 mutex_unlock(&data->irq_lock);
1955 static struct irq_chip lan78xx_irqchip = {
1956 .name = "lan78xx-irqs",
1957 .irq_mask = lan78xx_irq_mask,
1958 .irq_unmask = lan78xx_irq_unmask,
1959 .irq_bus_lock = lan78xx_irq_bus_lock,
1960 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1963 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1965 struct device_node *of_node;
1966 struct irq_domain *irqdomain;
1967 unsigned int irqmap = 0;
1971 of_node = dev->udev->dev.parent->of_node;
1973 mutex_init(&dev->domain_data.irq_lock);
1975 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1976 dev->domain_data.irqenable = buf;
1978 dev->domain_data.irqchip = &lan78xx_irqchip;
1979 dev->domain_data.irq_handler = handle_simple_irq;
1981 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1982 &chip_domain_ops, &dev->domain_data);
1984 /* create mapping for PHY interrupt */
1985 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1987 irq_domain_remove(irqdomain);
1996 dev->domain_data.irqdomain = irqdomain;
1997 dev->domain_data.phyirq = irqmap;
2002 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2004 if (dev->domain_data.phyirq > 0) {
2005 irq_dispose_mapping(dev->domain_data.phyirq);
2007 if (dev->domain_data.irqdomain)
2008 irq_domain_remove(dev->domain_data.irqdomain);
2010 dev->domain_data.phyirq = 0;
2011 dev->domain_data.irqdomain = NULL;
2014 static int lan8835_fixup(struct phy_device *phydev)
2017 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2019 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2020 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2023 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2025 /* RGMII MAC TXC Delay Enable */
2026 lan78xx_write_reg(dev, MAC_RGMII_ID,
2027 MAC_RGMII_ID_TXC_DELAY_EN_);
2029 /* RGMII TX DLL Tune Adjust */
2030 lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2032 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2037 static int ksz9031rnx_fixup(struct phy_device *phydev)
2039 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2041 /* Micrel9301RNX PHY configuration */
2042 /* RGMII Control Signal Pad Skew */
2043 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2044 /* RGMII RX Data Pad Skew */
2045 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2046 /* RGMII RX Clock Pad Skew */
2047 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2049 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2054 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2058 struct fixed_phy_status fphy_status = {
2060 .speed = SPEED_1000,
2061 .duplex = DUPLEX_FULL,
2063 struct phy_device *phydev;
2065 phydev = phy_find_first(dev->mdiobus);
2067 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2068 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2069 if (IS_ERR(phydev)) {
2070 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2073 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2074 dev->interface = PHY_INTERFACE_MODE_RGMII;
2075 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2076 MAC_RGMII_ID_TXC_DELAY_EN_);
2077 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2078 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2079 buf |= HW_CFG_CLK125_EN_;
2080 buf |= HW_CFG_REFCLK25_EN_;
2081 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2084 netdev_err(dev->net, "no PHY driver found\n");
2087 dev->interface = PHY_INTERFACE_MODE_RGMII;
2088 /* external PHY fixup for KSZ9031RNX */
2089 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2092 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2095 /* external PHY fixup for LAN8835 */
2096 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2099 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2102 /* add more external PHY fixup here if needed */
2104 phydev->is_internal = false;
2109 static int lan78xx_phy_init(struct lan78xx_net *dev)
2111 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2114 struct phy_device *phydev;
2116 switch (dev->chipid) {
2117 case ID_REV_CHIP_ID_7801_:
2118 phydev = lan7801_phy_init(dev);
2120 netdev_err(dev->net, "lan7801: PHY Init Failed");
2125 case ID_REV_CHIP_ID_7800_:
2126 case ID_REV_CHIP_ID_7850_:
2127 phydev = phy_find_first(dev->mdiobus);
2129 netdev_err(dev->net, "no PHY found\n");
2132 phydev->is_internal = true;
2133 dev->interface = PHY_INTERFACE_MODE_GMII;
2137 netdev_err(dev->net, "Unknown CHIP ID found\n");
2141 /* if phyirq is not set, use polling mode in phylib */
2142 if (dev->domain_data.phyirq > 0)
2143 phydev->irq = dev->domain_data.phyirq;
2145 phydev->irq = PHY_POLL;
2146 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2148 /* set to AUTOMDIX */
2149 phydev->mdix = ETH_TP_MDI_AUTO;
2151 ret = phy_connect_direct(dev->net, phydev,
2152 lan78xx_link_status_change,
2155 netdev_err(dev->net, "can't attach PHY to %s\n",
2157 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2158 if (phy_is_pseudo_fixed_link(phydev)) {
2159 fixed_phy_unregister(phydev);
2161 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2163 phy_unregister_fixup_for_uid(PHY_LAN8835,
2170 /* MAC doesn't support 1000T Half */
2171 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2173 /* support both flow controls */
2174 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2175 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2176 phydev->advertising);
2177 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2178 phydev->advertising);
2179 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2180 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2181 linkmode_or(phydev->advertising, fc, phydev->advertising);
2183 if (phydev->mdio.dev.of_node) {
2187 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2188 "microchip,led-modes",
2191 /* Ensure the appropriate LEDs are enabled */
2192 lan78xx_read_reg(dev, HW_CFG, ®);
2193 reg &= ~(HW_CFG_LED0_EN_ |
2197 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2198 (len > 1) * HW_CFG_LED1_EN_ |
2199 (len > 2) * HW_CFG_LED2_EN_ |
2200 (len > 3) * HW_CFG_LED3_EN_;
2201 lan78xx_write_reg(dev, HW_CFG, reg);
2205 genphy_config_aneg(phydev);
2207 dev->fc_autoneg = phydev->autoneg;
2212 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2217 lan78xx_read_reg(dev, MAC_RX, &buf);
2219 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2222 buf &= ~MAC_RX_RXEN_;
2223 lan78xx_write_reg(dev, MAC_RX, buf);
2226 /* add 4 to size for FCS */
2227 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2228 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2230 lan78xx_write_reg(dev, MAC_RX, buf);
2233 buf |= MAC_RX_RXEN_;
2234 lan78xx_write_reg(dev, MAC_RX, buf);
2240 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2242 struct sk_buff *skb;
2243 unsigned long flags;
2246 spin_lock_irqsave(&q->lock, flags);
2247 while (!skb_queue_empty(q)) {
2248 struct skb_data *entry;
2252 skb_queue_walk(q, skb) {
2253 entry = (struct skb_data *)skb->cb;
2254 if (entry->state != unlink_start)
2259 entry->state = unlink_start;
2262 /* Get reference count of the URB to avoid it to be
2263 * freed during usb_unlink_urb, which may trigger
2264 * use-after-free problem inside usb_unlink_urb since
2265 * usb_unlink_urb is always racing with .complete
2266 * handler(include defer_bh).
2269 spin_unlock_irqrestore(&q->lock, flags);
2270 /* during some PM-driven resume scenarios,
2271 * these (async) unlinks complete immediately
2273 ret = usb_unlink_urb(urb);
2274 if (ret != -EINPROGRESS && ret != 0)
2275 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2279 spin_lock_irqsave(&q->lock, flags);
2281 spin_unlock_irqrestore(&q->lock, flags);
2285 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2287 struct lan78xx_net *dev = netdev_priv(netdev);
2288 int ll_mtu = new_mtu + netdev->hard_header_len;
2289 int old_hard_mtu = dev->hard_mtu;
2290 int old_rx_urb_size = dev->rx_urb_size;
2293 /* no second zero-length packet read wanted after mtu-sized packets */
2294 if ((ll_mtu % dev->maxpacket) == 0)
2297 ret = usb_autopm_get_interface(dev->intf);
2301 lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2303 netdev->mtu = new_mtu;
2305 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2306 if (dev->rx_urb_size == old_hard_mtu) {
2307 dev->rx_urb_size = dev->hard_mtu;
2308 if (dev->rx_urb_size > old_rx_urb_size) {
2309 if (netif_running(dev->net)) {
2310 unlink_urbs(dev, &dev->rxq);
2311 tasklet_schedule(&dev->bh);
2316 usb_autopm_put_interface(dev->intf);
2321 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2323 struct lan78xx_net *dev = netdev_priv(netdev);
2324 struct sockaddr *addr = p;
2325 u32 addr_lo, addr_hi;
2327 if (netif_running(netdev))
2330 if (!is_valid_ether_addr(addr->sa_data))
2331 return -EADDRNOTAVAIL;
2333 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2335 addr_lo = netdev->dev_addr[0] |
2336 netdev->dev_addr[1] << 8 |
2337 netdev->dev_addr[2] << 16 |
2338 netdev->dev_addr[3] << 24;
2339 addr_hi = netdev->dev_addr[4] |
2340 netdev->dev_addr[5] << 8;
2342 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2343 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2345 /* Added to support MAC address changes */
2346 lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2347 lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2352 /* Enable or disable Rx checksum offload engine */
2353 static int lan78xx_set_features(struct net_device *netdev,
2354 netdev_features_t features)
2356 struct lan78xx_net *dev = netdev_priv(netdev);
2357 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2358 unsigned long flags;
2360 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2362 if (features & NETIF_F_RXCSUM) {
2363 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2364 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2366 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2367 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2370 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2371 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2373 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2375 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2376 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2378 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2380 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2382 lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2387 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2389 struct lan78xx_priv *pdata =
2390 container_of(param, struct lan78xx_priv, set_vlan);
2391 struct lan78xx_net *dev = pdata->dev;
2393 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2394 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2397 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2398 __be16 proto, u16 vid)
2400 struct lan78xx_net *dev = netdev_priv(netdev);
2401 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2403 u16 vid_dword_index;
2405 vid_dword_index = (vid >> 5) & 0x7F;
2406 vid_bit_index = vid & 0x1F;
2408 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2410 /* defer register writes to a sleepable context */
2411 schedule_work(&pdata->set_vlan);
2416 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2417 __be16 proto, u16 vid)
2419 struct lan78xx_net *dev = netdev_priv(netdev);
2420 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2422 u16 vid_dword_index;
2424 vid_dword_index = (vid >> 5) & 0x7F;
2425 vid_bit_index = vid & 0x1F;
2427 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2429 /* defer register writes to a sleepable context */
2430 schedule_work(&pdata->set_vlan);
2435 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2439 u32 regs[6] = { 0 };
2441 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2442 if (buf & USB_CFG1_LTM_ENABLE_) {
2444 /* Get values from EEPROM first */
2445 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2446 if (temp[0] == 24) {
2447 ret = lan78xx_read_raw_eeprom(dev,
2454 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2455 if (temp[0] == 24) {
2456 ret = lan78xx_read_raw_otp(dev,
2466 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2467 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2468 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2469 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2470 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2471 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2474 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2476 return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2479 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2482 unsigned long timeout;
2483 bool stopped = true;
2487 /* Stop the h/w block (if not already stopped) */
2489 ret = lan78xx_read_reg(dev, reg, &buf);
2493 if (buf & hw_enabled) {
2496 ret = lan78xx_write_reg(dev, reg, buf);
2501 timeout = jiffies + HW_DISABLE_TIMEOUT;
2503 ret = lan78xx_read_reg(dev, reg, &buf);
2507 if (buf & hw_disabled)
2510 msleep(HW_DISABLE_DELAY_MS);
2511 } while (!stopped && !time_after(jiffies, timeout));
2514 ret = stopped ? 0 : -ETIME;
2519 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2521 return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2524 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2528 netif_dbg(dev, drv, dev->net, "start tx path");
2530 /* Start the MAC transmitter */
2532 ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2536 /* Start the Tx FIFO */
2538 ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2545 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2549 netif_dbg(dev, drv, dev->net, "stop tx path");
2551 /* Stop the Tx FIFO */
2553 ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2557 /* Stop the MAC transmitter */
2559 ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2566 /* The caller must ensure the Tx path is stopped before calling
2567 * lan78xx_flush_tx_fifo().
2569 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2571 return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2574 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2578 netif_dbg(dev, drv, dev->net, "start rx path");
2580 /* Start the Rx FIFO */
2582 ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2586 /* Start the MAC receiver*/
2588 ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2595 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2599 netif_dbg(dev, drv, dev->net, "stop rx path");
2601 /* Stop the MAC receiver */
2603 ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2607 /* Stop the Rx FIFO */
2609 ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2616 /* The caller must ensure the Rx path is stopped before calling
2617 * lan78xx_flush_rx_fifo().
2619 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2621 return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2624 static int lan78xx_reset(struct lan78xx_net *dev)
2626 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2627 unsigned long timeout;
2632 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2636 buf |= HW_CFG_LRST_;
2638 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2642 timeout = jiffies + HZ;
2645 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2649 if (time_after(jiffies, timeout)) {
2650 netdev_warn(dev->net,
2651 "timeout on completion of LiteReset");
2655 } while (buf & HW_CFG_LRST_);
2657 lan78xx_init_mac_address(dev);
2659 /* save DEVID for later usage */
2660 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2664 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2665 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2667 /* Respond to the IN token with a NAK */
2668 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2672 buf |= USB_CFG_BIR_;
2674 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2679 lan78xx_init_ltm(dev);
2681 if (dev->udev->speed == USB_SPEED_SUPER) {
2682 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2683 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2686 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2687 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2688 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2689 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2690 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2692 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2693 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2698 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2702 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2706 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2712 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2716 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2720 buf |= USB_CFG_BCE_;
2722 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2726 /* set FIFO sizes */
2727 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2729 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2733 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2735 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2739 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2743 ret = lan78xx_write_reg(dev, FLOW, 0);
2747 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2751 /* Don't need rfe_ctl_lock during initialisation */
2752 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2756 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2758 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2762 /* Enable or disable checksum offload engines */
2763 ret = lan78xx_set_features(dev->net, dev->net->features);
2767 lan78xx_set_multicast(dev->net);
2770 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2774 buf |= PMT_CTL_PHY_RST_;
2776 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2780 timeout = jiffies + HZ;
2783 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2787 if (time_after(jiffies, timeout)) {
2788 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2792 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2794 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2798 /* LAN7801 only has RGMII mode */
2799 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2800 buf &= ~MAC_CR_GMII_EN_;
2802 if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
2803 dev->chipid == ID_REV_CHIP_ID_7850_) {
2804 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2805 if (!ret && sig != EEPROM_INDICATOR) {
2806 /* Implies there is no external eeprom. Set mac speed */
2807 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2808 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2811 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2815 ret = lan78xx_set_rx_max_frame_length(dev,
2816 dev->net->mtu + VLAN_ETH_HLEN);
2821 static void lan78xx_init_stats(struct lan78xx_net *dev)
2826 /* initialize for stats update
2827 * some counters are 20bits and some are 32bits
2829 p = (u32 *)&dev->stats.rollover_max;
2830 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2833 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2834 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2835 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2836 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2837 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2838 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2839 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2840 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2841 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2842 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2844 set_bit(EVENT_STAT_UPDATE, &dev->flags);
2847 static int lan78xx_open(struct net_device *net)
2849 struct lan78xx_net *dev = netdev_priv(net);
2852 netif_dbg(dev, ifup, dev->net, "open device");
2854 ret = usb_autopm_get_interface(dev->intf);
2858 mutex_lock(&dev->dev_mutex);
2860 phy_start(net->phydev);
2862 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2864 /* for Link Check */
2865 if (dev->urb_intr) {
2866 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2868 netif_err(dev, ifup, dev->net,
2869 "intr submit %d\n", ret);
2874 ret = lan78xx_flush_rx_fifo(dev);
2877 ret = lan78xx_flush_tx_fifo(dev);
2881 ret = lan78xx_start_tx_path(dev);
2884 ret = lan78xx_start_rx_path(dev);
2888 lan78xx_init_stats(dev);
2890 set_bit(EVENT_DEV_OPEN, &dev->flags);
2892 netif_start_queue(net);
2894 dev->link_on = false;
2896 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2898 mutex_unlock(&dev->dev_mutex);
2901 usb_autopm_put_interface(dev->intf);
2906 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2908 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2909 DECLARE_WAITQUEUE(wait, current);
2912 /* ensure there are no more active urbs */
2913 add_wait_queue(&unlink_wakeup, &wait);
2914 set_current_state(TASK_UNINTERRUPTIBLE);
2915 dev->wait = &unlink_wakeup;
2916 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2918 /* maybe wait for deletions to finish. */
2919 while (!skb_queue_empty(&dev->rxq) ||
2920 !skb_queue_empty(&dev->txq)) {
2921 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2922 set_current_state(TASK_UNINTERRUPTIBLE);
2923 netif_dbg(dev, ifdown, dev->net,
2924 "waited for %d urb completions", temp);
2926 set_current_state(TASK_RUNNING);
2928 remove_wait_queue(&unlink_wakeup, &wait);
2930 while (!skb_queue_empty(&dev->done)) {
2931 struct skb_data *entry;
2932 struct sk_buff *skb;
2934 skb = skb_dequeue(&dev->done);
2935 entry = (struct skb_data *)(skb->cb);
2936 usb_free_urb(entry->urb);
2941 static int lan78xx_stop(struct net_device *net)
2943 struct lan78xx_net *dev = netdev_priv(net);
2945 netif_dbg(dev, ifup, dev->net, "stop device");
2947 mutex_lock(&dev->dev_mutex);
2949 if (timer_pending(&dev->stat_monitor))
2950 del_timer_sync(&dev->stat_monitor);
2952 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2953 netif_stop_queue(net);
2954 tasklet_kill(&dev->bh);
2956 lan78xx_terminate_urbs(dev);
2958 netif_info(dev, ifdown, dev->net,
2959 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2960 net->stats.rx_packets, net->stats.tx_packets,
2961 net->stats.rx_errors, net->stats.tx_errors);
2963 /* ignore errors that occur stopping the Tx and Rx data paths */
2964 lan78xx_stop_tx_path(dev);
2965 lan78xx_stop_rx_path(dev);
2968 phy_stop(net->phydev);
2970 usb_kill_urb(dev->urb_intr);
2972 skb_queue_purge(&dev->rxq_pause);
2974 /* deferred work (task, timer, softirq) must also stop.
2975 * can't flush_scheduled_work() until we drop rtnl (later),
2976 * else workers could deadlock; so make workers a NOP.
2978 clear_bit(EVENT_TX_HALT, &dev->flags);
2979 clear_bit(EVENT_RX_HALT, &dev->flags);
2980 clear_bit(EVENT_LINK_RESET, &dev->flags);
2981 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
2983 cancel_delayed_work_sync(&dev->wq);
2985 usb_autopm_put_interface(dev->intf);
2987 mutex_unlock(&dev->dev_mutex);
2992 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2993 struct sk_buff *skb, gfp_t flags)
2995 u32 tx_cmd_a, tx_cmd_b;
2998 if (skb_cow_head(skb, TX_OVERHEAD)) {
2999 dev_kfree_skb_any(skb);
3003 if (skb_linearize(skb)) {
3004 dev_kfree_skb_any(skb);
3008 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3010 if (skb->ip_summed == CHECKSUM_PARTIAL)
3011 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3014 if (skb_is_gso(skb)) {
3015 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3017 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3019 tx_cmd_a |= TX_CMD_A_LSO_;
3022 if (skb_vlan_tag_present(skb)) {
3023 tx_cmd_a |= TX_CMD_A_IVTG_;
3024 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3027 ptr = skb_push(skb, 8);
3028 put_unaligned_le32(tx_cmd_a, ptr);
3029 put_unaligned_le32(tx_cmd_b, ptr + 4);
3034 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3035 struct sk_buff_head *list, enum skb_state state)
3037 unsigned long flags;
3038 enum skb_state old_state;
3039 struct skb_data *entry = (struct skb_data *)skb->cb;
3041 spin_lock_irqsave(&list->lock, flags);
3042 old_state = entry->state;
3043 entry->state = state;
3045 __skb_unlink(skb, list);
3046 spin_unlock(&list->lock);
3047 spin_lock(&dev->done.lock);
3049 __skb_queue_tail(&dev->done, skb);
3050 if (skb_queue_len(&dev->done) == 1)
3051 tasklet_schedule(&dev->bh);
3052 spin_unlock_irqrestore(&dev->done.lock, flags);
3057 static void tx_complete(struct urb *urb)
3059 struct sk_buff *skb = (struct sk_buff *)urb->context;
3060 struct skb_data *entry = (struct skb_data *)skb->cb;
3061 struct lan78xx_net *dev = entry->dev;
3063 if (urb->status == 0) {
3064 dev->net->stats.tx_packets += entry->num_of_packet;
3065 dev->net->stats.tx_bytes += entry->length;
3067 dev->net->stats.tx_errors++;
3069 switch (urb->status) {
3071 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3074 /* software-driven interface shutdown */
3082 netif_stop_queue(dev->net);
3085 netif_dbg(dev, tx_err, dev->net,
3086 "tx err %d\n", entry->urb->status);
3091 usb_autopm_put_interface_async(dev->intf);
3093 defer_bh(dev, skb, &dev->txq, tx_done);
3096 static void lan78xx_queue_skb(struct sk_buff_head *list,
3097 struct sk_buff *newsk, enum skb_state state)
3099 struct skb_data *entry = (struct skb_data *)newsk->cb;
3101 __skb_queue_tail(list, newsk);
3102 entry->state = state;
3106 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3108 struct lan78xx_net *dev = netdev_priv(net);
3109 struct sk_buff *skb2 = NULL;
3111 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3112 schedule_delayed_work(&dev->wq, 0);
3115 skb_tx_timestamp(skb);
3116 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
3120 skb_queue_tail(&dev->txq_pend, skb2);
3122 /* throttle TX patch at slower than SUPER SPEED USB */
3123 if ((dev->udev->speed < USB_SPEED_SUPER) &&
3124 (skb_queue_len(&dev->txq_pend) > 10))
3125 netif_stop_queue(net);
3127 netif_dbg(dev, tx_err, dev->net,
3128 "lan78xx_tx_prep return NULL\n");
3129 dev->net->stats.tx_errors++;
3130 dev->net->stats.tx_dropped++;
3133 tasklet_schedule(&dev->bh);
3135 return NETDEV_TX_OK;
3138 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3140 struct lan78xx_priv *pdata = NULL;
3144 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3146 pdata = (struct lan78xx_priv *)(dev->data[0]);
3148 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3154 spin_lock_init(&pdata->rfe_ctl_lock);
3155 mutex_init(&pdata->dataport_mutex);
3157 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3159 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3160 pdata->vlan_table[i] = 0;
3162 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3164 dev->net->features = 0;
3166 if (DEFAULT_TX_CSUM_ENABLE)
3167 dev->net->features |= NETIF_F_HW_CSUM;
3169 if (DEFAULT_RX_CSUM_ENABLE)
3170 dev->net->features |= NETIF_F_RXCSUM;
3172 if (DEFAULT_TSO_CSUM_ENABLE)
3173 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3175 if (DEFAULT_VLAN_RX_OFFLOAD)
3176 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3178 if (DEFAULT_VLAN_FILTER_ENABLE)
3179 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3181 dev->net->hw_features = dev->net->features;
3183 ret = lan78xx_setup_irq_domain(dev);
3185 netdev_warn(dev->net,
3186 "lan78xx_setup_irq_domain() failed : %d", ret);
3190 dev->net->hard_header_len += TX_OVERHEAD;
3191 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3193 /* Init all registers */
3194 ret = lan78xx_reset(dev);
3196 netdev_warn(dev->net, "Registers INIT FAILED....");
3200 ret = lan78xx_mdio_init(dev);
3202 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3206 dev->net->flags |= IFF_MULTICAST;
3208 pdata->wol = WAKE_MAGIC;
3213 lan78xx_remove_irq_domain(dev);
3216 netdev_warn(dev->net, "Bind routine FAILED");
3217 cancel_work_sync(&pdata->set_multicast);
3218 cancel_work_sync(&pdata->set_vlan);
3223 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3225 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3227 lan78xx_remove_irq_domain(dev);
3229 lan78xx_remove_mdio(dev);
3232 cancel_work_sync(&pdata->set_multicast);
3233 cancel_work_sync(&pdata->set_vlan);
3234 netif_dbg(dev, ifdown, dev->net, "free pdata");
3241 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3242 struct sk_buff *skb,
3243 u32 rx_cmd_a, u32 rx_cmd_b)
3245 /* HW Checksum offload appears to be flawed if used when not stripping
3246 * VLAN headers. Drop back to S/W checksums under these conditions.
3248 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3249 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3250 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3251 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3252 skb->ip_summed = CHECKSUM_NONE;
3254 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3255 skb->ip_summed = CHECKSUM_COMPLETE;
3259 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3260 struct sk_buff *skb,
3261 u32 rx_cmd_a, u32 rx_cmd_b)
3263 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3264 (rx_cmd_a & RX_CMD_A_FVTG_))
3265 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3266 (rx_cmd_b & 0xffff));
3269 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3273 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3274 skb_queue_tail(&dev->rxq_pause, skb);
3278 dev->net->stats.rx_packets++;
3279 dev->net->stats.rx_bytes += skb->len;
3281 skb->protocol = eth_type_trans(skb, dev->net);
3283 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3284 skb->len + sizeof(struct ethhdr), skb->protocol);
3285 memset(skb->cb, 0, sizeof(struct skb_data));
3287 if (skb_defer_rx_timestamp(skb))
3290 status = netif_rx(skb);
3291 if (status != NET_RX_SUCCESS)
3292 netif_dbg(dev, rx_err, dev->net,
3293 "netif_rx status %d\n", status);
3296 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3298 if (skb->len < dev->net->hard_header_len)
3301 while (skb->len > 0) {
3302 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3304 struct sk_buff *skb2;
3305 unsigned char *packet;
3307 rx_cmd_a = get_unaligned_le32(skb->data);
3308 skb_pull(skb, sizeof(rx_cmd_a));
3310 rx_cmd_b = get_unaligned_le32(skb->data);
3311 skb_pull(skb, sizeof(rx_cmd_b));
3313 rx_cmd_c = get_unaligned_le16(skb->data);
3314 skb_pull(skb, sizeof(rx_cmd_c));
3318 /* get the packet length */
3319 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3320 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3322 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3323 netif_dbg(dev, rx_err, dev->net,
3324 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3326 /* last frame in this batch */
3327 if (skb->len == size) {
3328 lan78xx_rx_csum_offload(dev, skb,
3329 rx_cmd_a, rx_cmd_b);
3330 lan78xx_rx_vlan_offload(dev, skb,
3331 rx_cmd_a, rx_cmd_b);
3333 skb_trim(skb, skb->len - 4); /* remove fcs */
3334 skb->truesize = size + sizeof(struct sk_buff);
3339 skb2 = skb_clone(skb, GFP_ATOMIC);
3340 if (unlikely(!skb2)) {
3341 netdev_warn(dev->net, "Error allocating skb");
3346 skb2->data = packet;
3347 skb_set_tail_pointer(skb2, size);
3349 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3350 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3352 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3353 skb2->truesize = size + sizeof(struct sk_buff);
3355 lan78xx_skb_return(dev, skb2);
3358 skb_pull(skb, size);
3360 /* padding bytes before the next frame starts */
3362 skb_pull(skb, align_count);
3368 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3370 if (!lan78xx_rx(dev, skb)) {
3371 dev->net->stats.rx_errors++;
3376 lan78xx_skb_return(dev, skb);
3380 netif_dbg(dev, rx_err, dev->net, "drop\n");
3381 dev->net->stats.rx_errors++;
3383 skb_queue_tail(&dev->done, skb);
3386 static void rx_complete(struct urb *urb);
3388 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3390 struct sk_buff *skb;
3391 struct skb_data *entry;
3392 unsigned long lockflags;
3393 size_t size = dev->rx_urb_size;
3396 skb = netdev_alloc_skb_ip_align(dev->net, size);
3402 entry = (struct skb_data *)skb->cb;
3407 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3408 skb->data, size, rx_complete, skb);
3410 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3412 if (netif_device_present(dev->net) &&
3413 netif_running(dev->net) &&
3414 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3415 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3416 ret = usb_submit_urb(urb, GFP_ATOMIC);
3419 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3422 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3425 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3426 netif_device_detach(dev->net);
3432 netif_dbg(dev, rx_err, dev->net,
3433 "rx submit, %d\n", ret);
3434 tasklet_schedule(&dev->bh);
3437 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3440 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3442 dev_kfree_skb_any(skb);
3448 static void rx_complete(struct urb *urb)
3450 struct sk_buff *skb = (struct sk_buff *)urb->context;
3451 struct skb_data *entry = (struct skb_data *)skb->cb;
3452 struct lan78xx_net *dev = entry->dev;
3453 int urb_status = urb->status;
3454 enum skb_state state;
3456 skb_put(skb, urb->actual_length);
3460 switch (urb_status) {
3462 if (skb->len < dev->net->hard_header_len) {
3464 dev->net->stats.rx_errors++;
3465 dev->net->stats.rx_length_errors++;
3466 netif_dbg(dev, rx_err, dev->net,
3467 "rx length %d\n", skb->len);
3469 usb_mark_last_busy(dev->udev);
3472 dev->net->stats.rx_errors++;
3473 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3475 case -ECONNRESET: /* async unlink */
3476 case -ESHUTDOWN: /* hardware gone */
3477 netif_dbg(dev, ifdown, dev->net,
3478 "rx shutdown, code %d\n", urb_status);
3486 dev->net->stats.rx_errors++;
3492 /* data overrun ... flush fifo? */
3494 dev->net->stats.rx_over_errors++;
3499 dev->net->stats.rx_errors++;
3500 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3504 state = defer_bh(dev, skb, &dev->rxq, state);
3507 if (netif_running(dev->net) &&
3508 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3509 state != unlink_start) {
3510 rx_submit(dev, urb, GFP_ATOMIC);
3515 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3518 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3521 struct urb *urb = NULL;
3522 struct skb_data *entry;
3523 unsigned long flags;
3524 struct sk_buff_head *tqp = &dev->txq_pend;
3525 struct sk_buff *skb, *skb2;
3528 int skb_totallen, pkt_cnt;
3534 spin_lock_irqsave(&tqp->lock, flags);
3535 skb_queue_walk(tqp, skb) {
3536 if (skb_is_gso(skb)) {
3537 if (!skb_queue_is_first(tqp, skb)) {
3538 /* handle previous packets first */
3542 length = skb->len - TX_OVERHEAD;
3543 __skb_unlink(skb, tqp);
3544 spin_unlock_irqrestore(&tqp->lock, flags);
3548 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3550 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3553 spin_unlock_irqrestore(&tqp->lock, flags);
3555 /* copy to a single skb */
3556 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3560 skb_put(skb, skb_totallen);
3562 for (count = pos = 0; count < pkt_cnt; count++) {
3563 skb2 = skb_dequeue(tqp);
3565 length += (skb2->len - TX_OVERHEAD);
3566 memcpy(skb->data + pos, skb2->data, skb2->len);
3567 pos += roundup(skb2->len, sizeof(u32));
3568 dev_kfree_skb(skb2);
3573 urb = usb_alloc_urb(0, GFP_ATOMIC);
3577 entry = (struct skb_data *)skb->cb;
3580 entry->length = length;
3581 entry->num_of_packet = count;
3583 spin_lock_irqsave(&dev->txq.lock, flags);
3584 ret = usb_autopm_get_interface_async(dev->intf);
3586 spin_unlock_irqrestore(&dev->txq.lock, flags);
3590 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3591 skb->data, skb->len, tx_complete, skb);
3593 if (length % dev->maxpacket == 0) {
3594 /* send USB_ZERO_PACKET */
3595 urb->transfer_flags |= URB_ZERO_PACKET;
3599 /* if this triggers the device is still a sleep */
3600 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3601 /* transmission will be done in resume */
3602 usb_anchor_urb(urb, &dev->deferred);
3603 /* no use to process more packets */
3604 netif_stop_queue(dev->net);
3606 spin_unlock_irqrestore(&dev->txq.lock, flags);
3607 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3612 ret = usb_submit_urb(urb, GFP_ATOMIC);
3615 netif_trans_update(dev->net);
3616 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3617 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3618 netif_stop_queue(dev->net);
3621 netif_stop_queue(dev->net);
3622 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3623 usb_autopm_put_interface_async(dev->intf);
3626 usb_autopm_put_interface_async(dev->intf);
3627 netif_dbg(dev, tx_err, dev->net,
3628 "tx: submit urb err %d\n", ret);
3632 spin_unlock_irqrestore(&dev->txq.lock, flags);
3635 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3637 dev->net->stats.tx_dropped++;
3639 dev_kfree_skb_any(skb);
3642 netif_dbg(dev, tx_queued, dev->net,
3643 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3647 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3652 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3653 for (i = 0; i < 10; i++) {
3654 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3656 urb = usb_alloc_urb(0, GFP_ATOMIC);
3658 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3662 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3663 tasklet_schedule(&dev->bh);
3665 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3666 netif_wake_queue(dev->net);
3669 static void lan78xx_bh(unsigned long param)
3671 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3672 struct sk_buff *skb;
3673 struct skb_data *entry;
3675 while ((skb = skb_dequeue(&dev->done))) {
3676 entry = (struct skb_data *)(skb->cb);
3677 switch (entry->state) {
3679 entry->state = rx_cleanup;
3680 rx_process(dev, skb);
3683 usb_free_urb(entry->urb);
3687 usb_free_urb(entry->urb);
3691 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3696 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3697 /* reset update timer delta */
3698 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3700 mod_timer(&dev->stat_monitor,
3701 jiffies + STAT_UPDATE_TIMER);
3704 if (!skb_queue_empty(&dev->txq_pend))
3707 if (!timer_pending(&dev->delay) &&
3708 !test_bit(EVENT_RX_HALT, &dev->flags))
3713 static void lan78xx_delayedwork(struct work_struct *work)
3716 struct lan78xx_net *dev;
3718 dev = container_of(work, struct lan78xx_net, wq.work);
3720 if (usb_autopm_get_interface(dev->intf) < 0)
3723 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3724 unlink_urbs(dev, &dev->txq);
3726 status = usb_clear_halt(dev->udev, dev->pipe_out);
3729 status != -ESHUTDOWN) {
3730 if (netif_msg_tx_err(dev))
3731 netdev_err(dev->net,
3732 "can't clear tx halt, status %d\n",
3735 clear_bit(EVENT_TX_HALT, &dev->flags);
3736 if (status != -ESHUTDOWN)
3737 netif_wake_queue(dev->net);
3741 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3742 unlink_urbs(dev, &dev->rxq);
3743 status = usb_clear_halt(dev->udev, dev->pipe_in);
3746 status != -ESHUTDOWN) {
3747 if (netif_msg_rx_err(dev))
3748 netdev_err(dev->net,
3749 "can't clear rx halt, status %d\n",
3752 clear_bit(EVENT_RX_HALT, &dev->flags);
3753 tasklet_schedule(&dev->bh);
3757 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3760 clear_bit(EVENT_LINK_RESET, &dev->flags);
3761 if (lan78xx_link_reset(dev) < 0) {
3762 netdev_info(dev->net, "link reset failed (%d)\n",
3767 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3768 lan78xx_update_stats(dev);
3770 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3772 mod_timer(&dev->stat_monitor,
3773 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3775 dev->delta = min((dev->delta * 2), 50);
3778 usb_autopm_put_interface(dev->intf);
3781 static void intr_complete(struct urb *urb)
3783 struct lan78xx_net *dev = urb->context;
3784 int status = urb->status;
3789 lan78xx_status(dev, urb);
3792 /* software-driven interface shutdown */
3793 case -ENOENT: /* urb killed */
3794 case -ESHUTDOWN: /* hardware gone */
3795 netif_dbg(dev, ifdown, dev->net,
3796 "intr shutdown, code %d\n", status);
3799 /* NOTE: not throttling like RX/TX, since this endpoint
3800 * already polls infrequently
3803 netdev_dbg(dev->net, "intr status %d\n", status);
3807 if (!netif_running(dev->net))
3810 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3811 status = usb_submit_urb(urb, GFP_ATOMIC);
3813 netif_err(dev, timer, dev->net,
3814 "intr resubmit --> %d\n", status);
3817 static void lan78xx_disconnect(struct usb_interface *intf)
3819 struct lan78xx_net *dev;
3820 struct usb_device *udev;
3821 struct net_device *net;
3822 struct phy_device *phydev;
3824 dev = usb_get_intfdata(intf);
3825 usb_set_intfdata(intf, NULL);
3829 udev = interface_to_usbdev(intf);
3831 phydev = net->phydev;
3833 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3834 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3836 phy_disconnect(net->phydev);
3838 if (phy_is_pseudo_fixed_link(phydev))
3839 fixed_phy_unregister(phydev);
3841 unregister_netdev(net);
3843 cancel_delayed_work_sync(&dev->wq);
3845 usb_scuttle_anchored_urbs(&dev->deferred);
3847 lan78xx_unbind(dev, intf);
3849 usb_kill_urb(dev->urb_intr);
3850 usb_free_urb(dev->urb_intr);
3856 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
3858 struct lan78xx_net *dev = netdev_priv(net);
3860 unlink_urbs(dev, &dev->txq);
3861 tasklet_schedule(&dev->bh);
3864 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3865 struct net_device *netdev,
3866 netdev_features_t features)
3868 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3869 features &= ~NETIF_F_GSO_MASK;
3871 features = vlan_features_check(skb, features);
3872 features = vxlan_features_check(skb, features);
3877 static const struct net_device_ops lan78xx_netdev_ops = {
3878 .ndo_open = lan78xx_open,
3879 .ndo_stop = lan78xx_stop,
3880 .ndo_start_xmit = lan78xx_start_xmit,
3881 .ndo_tx_timeout = lan78xx_tx_timeout,
3882 .ndo_change_mtu = lan78xx_change_mtu,
3883 .ndo_set_mac_address = lan78xx_set_mac_addr,
3884 .ndo_validate_addr = eth_validate_addr,
3885 .ndo_do_ioctl = phy_do_ioctl_running,
3886 .ndo_set_rx_mode = lan78xx_set_multicast,
3887 .ndo_set_features = lan78xx_set_features,
3888 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3889 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3890 .ndo_features_check = lan78xx_features_check,
3893 static void lan78xx_stat_monitor(struct timer_list *t)
3895 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3897 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3900 static int lan78xx_probe(struct usb_interface *intf,
3901 const struct usb_device_id *id)
3903 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3904 struct lan78xx_net *dev;
3905 struct net_device *netdev;
3906 struct usb_device *udev;
3909 unsigned int period;
3912 udev = interface_to_usbdev(intf);
3913 udev = usb_get_dev(udev);
3915 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3917 dev_err(&intf->dev, "Error: OOM\n");
3922 /* netdev_printk() needs this */
3923 SET_NETDEV_DEV(netdev, &intf->dev);
3925 dev = netdev_priv(netdev);
3929 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3930 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3932 skb_queue_head_init(&dev->rxq);
3933 skb_queue_head_init(&dev->txq);
3934 skb_queue_head_init(&dev->done);
3935 skb_queue_head_init(&dev->rxq_pause);
3936 skb_queue_head_init(&dev->txq_pend);
3937 mutex_init(&dev->phy_mutex);
3938 mutex_init(&dev->dev_mutex);
3940 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3941 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3942 init_usb_anchor(&dev->deferred);
3944 netdev->netdev_ops = &lan78xx_netdev_ops;
3945 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3946 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3949 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3951 mutex_init(&dev->stats.access_lock);
3953 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3958 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3959 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3960 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3965 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3966 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3967 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3972 ep_intr = &intf->cur_altsetting->endpoint[2];
3973 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3978 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3979 usb_endpoint_num(&ep_intr->desc));
3981 ret = lan78xx_bind(dev, intf);
3985 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3986 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3988 /* MTU range: 68 - 9000 */
3989 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3990 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3992 period = ep_intr->desc.bInterval;
3993 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3994 buf = kmalloc(maxp, GFP_KERNEL);
3996 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3997 if (!dev->urb_intr) {
4002 usb_fill_int_urb(dev->urb_intr, dev->udev,
4003 dev->pipe_intr, buf, maxp,
4004 intr_complete, dev, period);
4005 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4009 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
4011 /* Reject broken descriptors. */
4012 if (dev->maxpacket == 0) {
4017 /* driver requires remote-wakeup capability during autosuspend. */
4018 intf->needs_remote_wakeup = 1;
4020 ret = lan78xx_phy_init(dev);
4024 ret = register_netdev(netdev);
4026 netif_err(dev, probe, netdev, "couldn't register the device\n");
4030 usb_set_intfdata(intf, dev);
4032 ret = device_set_wakeup_enable(&udev->dev, true);
4034 /* Default delay of 2sec has more overhead than advantage.
4035 * Set to 10sec as default.
4037 pm_runtime_set_autosuspend_delay(&udev->dev,
4038 DEFAULT_AUTOSUSPEND_DELAY);
4043 phy_disconnect(netdev->phydev);
4045 usb_free_urb(dev->urb_intr);
4047 lan78xx_unbind(dev, intf);
4049 free_netdev(netdev);
4056 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4058 const u16 crc16poly = 0x8005;
4064 for (i = 0; i < len; i++) {
4066 for (bit = 0; bit < 8; bit++) {
4070 if (msb ^ (u16)(data & 1)) {
4072 crc |= (u16)0x0001U;
4081 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4086 ret = lan78xx_stop_tx_path(dev);
4090 ret = lan78xx_stop_rx_path(dev);
4094 /* auto suspend (selective suspend) */
4096 ret = lan78xx_write_reg(dev, WUCSR, 0);
4099 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4102 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4106 /* set goodframe wakeup */
4108 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4112 buf |= WUCSR_RFE_WAKE_EN_;
4113 buf |= WUCSR_STORE_WAKE_;
4115 ret = lan78xx_write_reg(dev, WUCSR, buf);
4119 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4123 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4124 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4125 buf |= PMT_CTL_PHY_WAKE_EN_;
4126 buf |= PMT_CTL_WOL_EN_;
4127 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4128 buf |= PMT_CTL_SUS_MODE_3_;
4130 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4134 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4138 buf |= PMT_CTL_WUPS_MASK_;
4140 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4144 ret = lan78xx_start_rx_path(dev);
4149 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4151 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4152 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4153 const u8 arp_type[2] = { 0x08, 0x06 };
4161 ret = lan78xx_stop_tx_path(dev);
4164 ret = lan78xx_stop_rx_path(dev);
4168 ret = lan78xx_write_reg(dev, WUCSR, 0);
4171 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4174 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4182 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4186 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4187 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4189 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4190 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4196 if (wol & WAKE_PHY) {
4197 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4199 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4200 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4201 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4203 if (wol & WAKE_MAGIC) {
4204 temp_wucsr |= WUCSR_MPEN_;
4206 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4207 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4208 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4210 if (wol & WAKE_BCAST) {
4211 temp_wucsr |= WUCSR_BCST_EN_;
4213 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4214 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4215 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4217 if (wol & WAKE_MCAST) {
4218 temp_wucsr |= WUCSR_WAKE_EN_;
4220 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4221 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4222 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4224 WUF_CFGX_TYPE_MCAST_ |
4225 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4226 (crc & WUF_CFGX_CRC16_MASK_));
4230 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4233 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4236 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4239 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4245 /* for IPv6 Multicast */
4246 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4247 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4249 WUF_CFGX_TYPE_MCAST_ |
4250 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4251 (crc & WUF_CFGX_CRC16_MASK_));
4255 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4258 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4261 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4264 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4270 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4271 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4272 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4274 if (wol & WAKE_UCAST) {
4275 temp_wucsr |= WUCSR_PFDA_EN_;
4277 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4278 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4279 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4281 if (wol & WAKE_ARP) {
4282 temp_wucsr |= WUCSR_WAKE_EN_;
4284 /* set WUF_CFG & WUF_MASK
4285 * for packettype (offset 12,13) = ARP (0x0806)
4287 crc = lan78xx_wakeframe_crc16(arp_type, 2);
4288 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4290 WUF_CFGX_TYPE_ALL_ |
4291 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4292 (crc & WUF_CFGX_CRC16_MASK_));
4296 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4299 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4302 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4305 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4311 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4312 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4313 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4316 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4320 /* when multiple WOL bits are set */
4321 if (hweight_long((unsigned long)wol) > 1) {
4322 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4323 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4324 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4326 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4331 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4335 buf |= PMT_CTL_WUPS_MASK_;
4337 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4341 ret = lan78xx_start_rx_path(dev);
4346 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4348 struct lan78xx_net *dev = usb_get_intfdata(intf);
4352 mutex_lock(&dev->dev_mutex);
4354 netif_dbg(dev, ifdown, dev->net,
4355 "suspending: pm event %#x", message.event);
4357 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4360 spin_lock_irq(&dev->txq.lock);
4361 /* don't autosuspend while transmitting */
4362 if ((skb_queue_len(&dev->txq) ||
4363 skb_queue_len(&dev->txq_pend)) &&
4364 PMSG_IS_AUTO(message)) {
4365 spin_unlock_irq(&dev->txq.lock);
4369 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4370 spin_unlock_irq(&dev->txq.lock);
4374 ret = lan78xx_stop_rx_path(dev);
4378 ret = lan78xx_flush_rx_fifo(dev);
4383 ret = lan78xx_stop_tx_path(dev);
4387 /* empty out the Rx and Tx queues */
4388 netif_device_detach(dev->net);
4389 lan78xx_terminate_urbs(dev);
4390 usb_kill_urb(dev->urb_intr);
4393 netif_device_attach(dev->net);
4395 del_timer(&dev->stat_monitor);
4397 if (PMSG_IS_AUTO(message)) {
4398 ret = lan78xx_set_auto_suspend(dev);
4402 struct lan78xx_priv *pdata;
4404 pdata = (struct lan78xx_priv *)(dev->data[0]);
4405 netif_carrier_off(dev->net);
4406 ret = lan78xx_set_suspend(dev, pdata->wol);
4411 /* Interface is down; don't allow WOL and PHY
4412 * events to wake up the host
4416 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4418 ret = lan78xx_write_reg(dev, WUCSR, 0);
4421 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4425 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4429 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4430 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4431 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4432 buf |= PMT_CTL_SUS_MODE_3_;
4434 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4438 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4442 buf |= PMT_CTL_WUPS_MASK_;
4444 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4451 mutex_unlock(&dev->dev_mutex);
4456 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4458 bool pipe_halted = false;
4461 while ((urb = usb_get_from_anchor(&dev->deferred))) {
4462 struct sk_buff *skb = urb->context;
4465 if (!netif_device_present(dev->net) ||
4466 !netif_carrier_ok(dev->net) ||
4473 ret = usb_submit_urb(urb, GFP_ATOMIC);
4476 netif_trans_update(dev->net);
4477 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4482 if (ret == -EPIPE) {
4483 netif_stop_queue(dev->net);
4485 } else if (ret == -ENODEV) {
4486 netif_device_detach(dev->net);
4494 static int lan78xx_resume(struct usb_interface *intf)
4496 struct lan78xx_net *dev = usb_get_intfdata(intf);
4500 mutex_lock(&dev->dev_mutex);
4502 netif_dbg(dev, ifup, dev->net, "resuming device");
4504 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4507 bool pipe_halted = false;
4509 ret = lan78xx_flush_tx_fifo(dev);
4513 if (dev->urb_intr) {
4514 int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4518 netif_device_detach(dev->net);
4520 netdev_warn(dev->net, "Failed to submit intr URB");
4524 spin_lock_irq(&dev->txq.lock);
4526 if (netif_device_present(dev->net)) {
4527 pipe_halted = lan78xx_submit_deferred_urbs(dev);
4530 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4533 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4535 spin_unlock_irq(&dev->txq.lock);
4538 netif_device_present(dev->net) &&
4539 (skb_queue_len(&dev->txq) < dev->tx_qlen))
4540 netif_start_queue(dev->net);
4542 ret = lan78xx_start_tx_path(dev);
4546 tasklet_schedule(&dev->bh);
4548 if (!timer_pending(&dev->stat_monitor)) {
4550 mod_timer(&dev->stat_monitor,
4551 jiffies + STAT_UPDATE_TIMER);
4555 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4558 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4561 ret = lan78xx_write_reg(dev, WUCSR, 0);
4564 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4568 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4570 WUCSR2_IPV6_TCPSYN_RCD_ |
4571 WUCSR2_IPV4_TCPSYN_RCD_);
4575 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4576 WUCSR_EEE_RX_WAKE_ |
4578 WUCSR_RFE_WAKE_FR_ |
4587 mutex_unlock(&dev->dev_mutex);
4592 static int lan78xx_reset_resume(struct usb_interface *intf)
4594 struct lan78xx_net *dev = usb_get_intfdata(intf);
4597 netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
4599 ret = lan78xx_reset(dev);
4603 phy_start(dev->net->phydev);
4605 ret = lan78xx_resume(intf);
4610 static const struct usb_device_id products[] = {
4612 /* LAN7800 USB Gigabit Ethernet Device */
4613 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4616 /* LAN7850 USB Gigabit Ethernet Device */
4617 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4620 /* LAN7801 USB Gigabit Ethernet Device */
4621 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4624 /* ATM2-AF USB Gigabit Ethernet Device */
4625 USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
4629 MODULE_DEVICE_TABLE(usb, products);
4631 static struct usb_driver lan78xx_driver = {
4632 .name = DRIVER_NAME,
4633 .id_table = products,
4634 .probe = lan78xx_probe,
4635 .disconnect = lan78xx_disconnect,
4636 .suspend = lan78xx_suspend,
4637 .resume = lan78xx_resume,
4638 .reset_resume = lan78xx_reset_resume,
4639 .supports_autosuspend = 1,
4640 .disable_hub_initiated_lpm = 1,
4643 module_usb_driver(lan78xx_driver);
4645 MODULE_AUTHOR(DRIVER_AUTHOR);
4646 MODULE_DESCRIPTION(DRIVER_DESC);
4647 MODULE_LICENSE("GPL");