2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <net/vxlan.h>
35 #include <linux/interrupt.h>
36 #include <linux/irqdomain.h>
37 #include <linux/irq.h>
38 #include <linux/irqchip/chained_irq.h>
39 #include <linux/microchipphy.h>
40 #include <linux/phy.h>
41 #include <linux/of_net.h>
44 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
45 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
46 #define DRIVER_NAME "lan78xx"
47 #define DRIVER_VERSION "1.0.6"
49 #define TX_TIMEOUT_JIFFIES (5 * HZ)
50 #define THROTTLE_JIFFIES (HZ / 8)
51 #define UNLINK_TIMEOUT_MS 3
53 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
55 #define SS_USB_PKT_SIZE (1024)
56 #define HS_USB_PKT_SIZE (512)
57 #define FS_USB_PKT_SIZE (64)
59 #define MAX_RX_FIFO_SIZE (12 * 1024)
60 #define MAX_TX_FIFO_SIZE (12 * 1024)
61 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
62 #define DEFAULT_BULK_IN_DELAY (0x0800)
63 #define MAX_SINGLE_PACKET_SIZE (9000)
64 #define DEFAULT_TX_CSUM_ENABLE (true)
65 #define DEFAULT_RX_CSUM_ENABLE (true)
66 #define DEFAULT_TSO_CSUM_ENABLE (true)
67 #define DEFAULT_VLAN_FILTER_ENABLE (true)
68 #define TX_OVERHEAD (8)
71 #define LAN78XX_USB_VENDOR_ID (0x0424)
72 #define LAN7800_USB_PRODUCT_ID (0x7800)
73 #define LAN7850_USB_PRODUCT_ID (0x7850)
74 #define LAN7801_USB_PRODUCT_ID (0x7801)
75 #define LAN78XX_EEPROM_MAGIC (0x78A5)
76 #define LAN78XX_OTP_MAGIC (0x78F3)
81 #define EEPROM_INDICATOR (0xA5)
82 #define EEPROM_MAC_OFFSET (0x01)
83 #define MAX_EEPROM_SIZE 512
84 #define OTP_INDICATOR_1 (0xF3)
85 #define OTP_INDICATOR_2 (0xF7)
87 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
88 WAKE_MCAST | WAKE_BCAST | \
89 WAKE_ARP | WAKE_MAGIC)
91 /* USB related defines */
92 #define BULK_IN_PIPE 1
93 #define BULK_OUT_PIPE 2
95 /* default autosuspend delay (mSec)*/
96 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
98 /* statistic update interval (mSec) */
99 #define STAT_UPDATE_TIMER (1 * 1000)
101 /* defines interrupts from interrupt EP */
102 #define MAX_INT_EP (32)
103 #define INT_EP_INTEP (31)
104 #define INT_EP_OTP_WR_DONE (28)
105 #define INT_EP_EEE_TX_LPI_START (26)
106 #define INT_EP_EEE_TX_LPI_STOP (25)
107 #define INT_EP_EEE_RX_LPI (24)
108 #define INT_EP_MAC_RESET_TIMEOUT (23)
109 #define INT_EP_RDFO (22)
110 #define INT_EP_TXE (21)
111 #define INT_EP_USB_STATUS (20)
112 #define INT_EP_TX_DIS (19)
113 #define INT_EP_RX_DIS (18)
114 #define INT_EP_PHY (17)
115 #define INT_EP_DP (16)
116 #define INT_EP_MAC_ERR (15)
117 #define INT_EP_TDFU (14)
118 #define INT_EP_TDFO (13)
119 #define INT_EP_UTX (12)
120 #define INT_EP_GPIO_11 (11)
121 #define INT_EP_GPIO_10 (10)
122 #define INT_EP_GPIO_9 (9)
123 #define INT_EP_GPIO_8 (8)
124 #define INT_EP_GPIO_7 (7)
125 #define INT_EP_GPIO_6 (6)
126 #define INT_EP_GPIO_5 (5)
127 #define INT_EP_GPIO_4 (4)
128 #define INT_EP_GPIO_3 (3)
129 #define INT_EP_GPIO_2 (2)
130 #define INT_EP_GPIO_1 (1)
131 #define INT_EP_GPIO_0 (0)
133 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
135 "RX Alignment Errors",
136 "Rx Fragment Errors",
138 "RX Undersize Frame Errors",
139 "RX Oversize Frame Errors",
141 "RX Unicast Byte Count",
142 "RX Broadcast Byte Count",
143 "RX Multicast Byte Count",
145 "RX Broadcast Frames",
146 "RX Multicast Frames",
149 "RX 65 - 127 Byte Frames",
150 "RX 128 - 255 Byte Frames",
151 "RX 256 - 511 Bytes Frames",
152 "RX 512 - 1023 Byte Frames",
153 "RX 1024 - 1518 Byte Frames",
154 "RX Greater 1518 Byte Frames",
155 "EEE RX LPI Transitions",
158 "TX Excess Deferral Errors",
161 "TX Single Collisions",
162 "TX Multiple Collisions",
163 "TX Excessive Collision",
164 "TX Late Collisions",
165 "TX Unicast Byte Count",
166 "TX Broadcast Byte Count",
167 "TX Multicast Byte Count",
169 "TX Broadcast Frames",
170 "TX Multicast Frames",
173 "TX 65 - 127 Byte Frames",
174 "TX 128 - 255 Byte Frames",
175 "TX 256 - 511 Bytes Frames",
176 "TX 512 - 1023 Byte Frames",
177 "TX 1024 - 1518 Byte Frames",
178 "TX Greater 1518 Byte Frames",
179 "EEE TX LPI Transitions",
183 struct lan78xx_statstage {
185 u32 rx_alignment_errors;
186 u32 rx_fragment_errors;
187 u32 rx_jabber_errors;
188 u32 rx_undersize_frame_errors;
189 u32 rx_oversize_frame_errors;
190 u32 rx_dropped_frames;
191 u32 rx_unicast_byte_count;
192 u32 rx_broadcast_byte_count;
193 u32 rx_multicast_byte_count;
194 u32 rx_unicast_frames;
195 u32 rx_broadcast_frames;
196 u32 rx_multicast_frames;
198 u32 rx_64_byte_frames;
199 u32 rx_65_127_byte_frames;
200 u32 rx_128_255_byte_frames;
201 u32 rx_256_511_bytes_frames;
202 u32 rx_512_1023_byte_frames;
203 u32 rx_1024_1518_byte_frames;
204 u32 rx_greater_1518_byte_frames;
205 u32 eee_rx_lpi_transitions;
208 u32 tx_excess_deferral_errors;
209 u32 tx_carrier_errors;
210 u32 tx_bad_byte_count;
211 u32 tx_single_collisions;
212 u32 tx_multiple_collisions;
213 u32 tx_excessive_collision;
214 u32 tx_late_collisions;
215 u32 tx_unicast_byte_count;
216 u32 tx_broadcast_byte_count;
217 u32 tx_multicast_byte_count;
218 u32 tx_unicast_frames;
219 u32 tx_broadcast_frames;
220 u32 tx_multicast_frames;
222 u32 tx_64_byte_frames;
223 u32 tx_65_127_byte_frames;
224 u32 tx_128_255_byte_frames;
225 u32 tx_256_511_bytes_frames;
226 u32 tx_512_1023_byte_frames;
227 u32 tx_1024_1518_byte_frames;
228 u32 tx_greater_1518_byte_frames;
229 u32 eee_tx_lpi_transitions;
233 struct lan78xx_statstage64 {
235 u64 rx_alignment_errors;
236 u64 rx_fragment_errors;
237 u64 rx_jabber_errors;
238 u64 rx_undersize_frame_errors;
239 u64 rx_oversize_frame_errors;
240 u64 rx_dropped_frames;
241 u64 rx_unicast_byte_count;
242 u64 rx_broadcast_byte_count;
243 u64 rx_multicast_byte_count;
244 u64 rx_unicast_frames;
245 u64 rx_broadcast_frames;
246 u64 rx_multicast_frames;
248 u64 rx_64_byte_frames;
249 u64 rx_65_127_byte_frames;
250 u64 rx_128_255_byte_frames;
251 u64 rx_256_511_bytes_frames;
252 u64 rx_512_1023_byte_frames;
253 u64 rx_1024_1518_byte_frames;
254 u64 rx_greater_1518_byte_frames;
255 u64 eee_rx_lpi_transitions;
258 u64 tx_excess_deferral_errors;
259 u64 tx_carrier_errors;
260 u64 tx_bad_byte_count;
261 u64 tx_single_collisions;
262 u64 tx_multiple_collisions;
263 u64 tx_excessive_collision;
264 u64 tx_late_collisions;
265 u64 tx_unicast_byte_count;
266 u64 tx_broadcast_byte_count;
267 u64 tx_multicast_byte_count;
268 u64 tx_unicast_frames;
269 u64 tx_broadcast_frames;
270 u64 tx_multicast_frames;
272 u64 tx_64_byte_frames;
273 u64 tx_65_127_byte_frames;
274 u64 tx_128_255_byte_frames;
275 u64 tx_256_511_bytes_frames;
276 u64 tx_512_1023_byte_frames;
277 u64 tx_1024_1518_byte_frames;
278 u64 tx_greater_1518_byte_frames;
279 u64 eee_tx_lpi_transitions;
285 struct lan78xx_priv {
286 struct lan78xx_net *dev;
288 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
289 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
290 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
291 struct mutex dataport_mutex; /* for dataport access */
292 spinlock_t rfe_ctl_lock; /* for rfe register access */
293 struct work_struct set_multicast;
294 struct work_struct set_vlan;
308 struct skb_data { /* skb->cb is one of these */
310 struct lan78xx_net *dev;
311 enum skb_state state;
317 struct usb_ctrlrequest req;
318 struct lan78xx_net *dev;
321 #define EVENT_TX_HALT 0
322 #define EVENT_RX_HALT 1
323 #define EVENT_RX_MEMORY 2
324 #define EVENT_STS_SPLIT 3
325 #define EVENT_LINK_RESET 4
326 #define EVENT_RX_PAUSED 5
327 #define EVENT_DEV_WAKING 6
328 #define EVENT_DEV_ASLEEP 7
329 #define EVENT_DEV_OPEN 8
330 #define EVENT_STAT_UPDATE 9
333 struct mutex access_lock; /* for stats access */
334 struct lan78xx_statstage saved;
335 struct lan78xx_statstage rollover_count;
336 struct lan78xx_statstage rollover_max;
337 struct lan78xx_statstage64 curr_stat;
340 struct irq_domain_data {
341 struct irq_domain *irqdomain;
343 struct irq_chip *irqchip;
344 irq_flow_handler_t irq_handler;
346 struct mutex irq_lock; /* for irq bus access */
350 struct net_device *net;
351 struct usb_device *udev;
352 struct usb_interface *intf;
357 struct sk_buff_head rxq;
358 struct sk_buff_head txq;
359 struct sk_buff_head done;
360 struct sk_buff_head rxq_pause;
361 struct sk_buff_head txq_pend;
363 struct tasklet_struct bh;
364 struct delayed_work wq;
368 struct urb *urb_intr;
369 struct usb_anchor deferred;
371 struct mutex phy_mutex; /* for phy access */
372 unsigned pipe_in, pipe_out, pipe_intr;
374 u32 hard_mtu; /* count any extra framing */
375 size_t rx_urb_size; /* size for rx urbs */
379 wait_queue_head_t *wait;
380 unsigned char suspend_count;
383 struct timer_list delay;
384 struct timer_list stat_monitor;
386 unsigned long data[5];
393 struct mii_bus *mdiobus;
394 phy_interface_t interface;
397 u8 fc_request_control;
400 struct statstage stats;
402 struct irq_domain_data domain_data;
405 /* define external phy id */
406 #define PHY_LAN8835 (0x0007C130)
407 #define PHY_KSZ9031RNX (0x00221620)
409 /* use ethtool to change the level for any given device */
410 static int msg_level = -1;
411 module_param(msg_level, int, 0);
412 MODULE_PARM_DESC(msg_level, "Override default message level");
414 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
416 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
422 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
423 USB_VENDOR_REQUEST_READ_REGISTER,
424 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
425 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
426 if (likely(ret >= 0)) {
430 netdev_warn(dev->net,
431 "Failed to read register index 0x%08x. ret = %d",
440 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
442 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
451 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
452 USB_VENDOR_REQUEST_WRITE_REGISTER,
453 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
454 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
455 if (unlikely(ret < 0)) {
456 netdev_warn(dev->net,
457 "Failed to write register index 0x%08x. ret = %d",
466 static int lan78xx_read_stats(struct lan78xx_net *dev,
467 struct lan78xx_statstage *data)
471 struct lan78xx_statstage *stats;
475 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
479 ret = usb_control_msg(dev->udev,
480 usb_rcvctrlpipe(dev->udev, 0),
481 USB_VENDOR_REQUEST_GET_STATS,
482 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
487 USB_CTRL_SET_TIMEOUT);
488 if (likely(ret >= 0)) {
491 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
492 le32_to_cpus(&src[i]);
496 netdev_warn(dev->net,
497 "Failed to read stat ret = %d", ret);
505 #define check_counter_rollover(struct1, dev_stats, member) { \
506 if (struct1->member < dev_stats.saved.member) \
507 dev_stats.rollover_count.member++; \
510 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
511 struct lan78xx_statstage *stats)
513 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
514 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
515 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
516 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
517 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
518 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
519 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
520 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
521 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
522 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
523 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
524 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
525 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
526 check_counter_rollover(stats, dev->stats, rx_pause_frames);
527 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
528 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
529 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
530 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
531 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
532 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
533 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
534 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
535 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
536 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
537 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
538 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
539 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
540 check_counter_rollover(stats, dev->stats, tx_single_collisions);
541 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
542 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
543 check_counter_rollover(stats, dev->stats, tx_late_collisions);
544 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
545 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
546 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
547 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
548 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
549 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
550 check_counter_rollover(stats, dev->stats, tx_pause_frames);
551 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
552 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
553 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
554 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
555 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
556 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
557 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
558 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
559 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
561 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
564 static void lan78xx_update_stats(struct lan78xx_net *dev)
566 u32 *p, *count, *max;
569 struct lan78xx_statstage lan78xx_stats;
571 if (usb_autopm_get_interface(dev->intf) < 0)
574 p = (u32 *)&lan78xx_stats;
575 count = (u32 *)&dev->stats.rollover_count;
576 max = (u32 *)&dev->stats.rollover_max;
577 data = (u64 *)&dev->stats.curr_stat;
579 mutex_lock(&dev->stats.access_lock);
581 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
582 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
584 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
585 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
587 mutex_unlock(&dev->stats.access_lock);
589 usb_autopm_put_interface(dev->intf);
592 /* Loop until the read is completed with timeout called with phy_mutex held */
593 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
595 unsigned long start_time = jiffies;
600 ret = lan78xx_read_reg(dev, MII_ACC, &val);
601 if (unlikely(ret < 0))
604 if (!(val & MII_ACC_MII_BUSY_))
606 } while (!time_after(jiffies, start_time + HZ));
611 static inline u32 mii_access(int id, int index, int read)
615 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
616 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
618 ret |= MII_ACC_MII_READ_;
620 ret |= MII_ACC_MII_WRITE_;
621 ret |= MII_ACC_MII_BUSY_;
626 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
628 unsigned long start_time = jiffies;
633 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
634 if (unlikely(ret < 0))
637 if (!(val & E2P_CMD_EPC_BUSY_) ||
638 (val & E2P_CMD_EPC_TIMEOUT_))
640 usleep_range(40, 100);
641 } while (!time_after(jiffies, start_time + HZ));
643 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
644 netdev_warn(dev->net, "EEPROM read operation timeout");
651 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
653 unsigned long start_time = jiffies;
658 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
659 if (unlikely(ret < 0))
662 if (!(val & E2P_CMD_EPC_BUSY_))
665 usleep_range(40, 100);
666 } while (!time_after(jiffies, start_time + HZ));
668 netdev_warn(dev->net, "EEPROM is busy");
672 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
673 u32 length, u8 *data)
680 /* depends on chip, some EEPROM pins are muxed with LED function.
681 * disable & restore LED function to access EEPROM.
683 ret = lan78xx_read_reg(dev, HW_CFG, &val);
685 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
686 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
687 ret = lan78xx_write_reg(dev, HW_CFG, val);
690 retval = lan78xx_eeprom_confirm_not_busy(dev);
694 for (i = 0; i < length; i++) {
695 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
696 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
697 ret = lan78xx_write_reg(dev, E2P_CMD, val);
698 if (unlikely(ret < 0)) {
703 retval = lan78xx_wait_eeprom(dev);
707 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
708 if (unlikely(ret < 0)) {
713 data[i] = val & 0xFF;
719 if (dev->chipid == ID_REV_CHIP_ID_7800_)
720 ret = lan78xx_write_reg(dev, HW_CFG, saved);
725 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
726 u32 length, u8 *data)
731 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
732 if ((ret == 0) && (sig == EEPROM_INDICATOR))
733 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
740 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
741 u32 length, u8 *data)
748 /* depends on chip, some EEPROM pins are muxed with LED function.
749 * disable & restore LED function to access EEPROM.
751 ret = lan78xx_read_reg(dev, HW_CFG, &val);
753 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
754 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
755 ret = lan78xx_write_reg(dev, HW_CFG, val);
758 retval = lan78xx_eeprom_confirm_not_busy(dev);
762 /* Issue write/erase enable command */
763 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
764 ret = lan78xx_write_reg(dev, E2P_CMD, val);
765 if (unlikely(ret < 0)) {
770 retval = lan78xx_wait_eeprom(dev);
774 for (i = 0; i < length; i++) {
775 /* Fill data register */
777 ret = lan78xx_write_reg(dev, E2P_DATA, val);
783 /* Send "write" command */
784 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
785 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
786 ret = lan78xx_write_reg(dev, E2P_CMD, val);
792 retval = lan78xx_wait_eeprom(dev);
801 if (dev->chipid == ID_REV_CHIP_ID_7800_)
802 ret = lan78xx_write_reg(dev, HW_CFG, saved);
807 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
808 u32 length, u8 *data)
813 unsigned long timeout;
815 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
817 if (buf & OTP_PWR_DN_PWRDN_N_) {
818 /* clear it and wait to be cleared */
819 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
821 timeout = jiffies + HZ;
824 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
825 if (time_after(jiffies, timeout)) {
826 netdev_warn(dev->net,
827 "timeout on OTP_PWR_DN");
830 } while (buf & OTP_PWR_DN_PWRDN_N_);
833 for (i = 0; i < length; i++) {
834 ret = lan78xx_write_reg(dev, OTP_ADDR1,
835 ((offset + i) >> 8) & OTP_ADDR1_15_11);
836 ret = lan78xx_write_reg(dev, OTP_ADDR2,
837 ((offset + i) & OTP_ADDR2_10_3));
839 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
840 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
842 timeout = jiffies + HZ;
845 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
846 if (time_after(jiffies, timeout)) {
847 netdev_warn(dev->net,
848 "timeout on OTP_STATUS");
851 } while (buf & OTP_STATUS_BUSY_);
853 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
855 data[i] = (u8)(buf & 0xFF);
861 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
862 u32 length, u8 *data)
867 unsigned long timeout;
869 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
871 if (buf & OTP_PWR_DN_PWRDN_N_) {
872 /* clear it and wait to be cleared */
873 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
875 timeout = jiffies + HZ;
878 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
879 if (time_after(jiffies, timeout)) {
880 netdev_warn(dev->net,
881 "timeout on OTP_PWR_DN completion");
884 } while (buf & OTP_PWR_DN_PWRDN_N_);
887 /* set to BYTE program mode */
888 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
890 for (i = 0; i < length; i++) {
891 ret = lan78xx_write_reg(dev, OTP_ADDR1,
892 ((offset + i) >> 8) & OTP_ADDR1_15_11);
893 ret = lan78xx_write_reg(dev, OTP_ADDR2,
894 ((offset + i) & OTP_ADDR2_10_3));
895 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
896 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
897 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
899 timeout = jiffies + HZ;
902 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
903 if (time_after(jiffies, timeout)) {
904 netdev_warn(dev->net,
905 "Timeout on OTP_STATUS completion");
908 } while (buf & OTP_STATUS_BUSY_);
914 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
915 u32 length, u8 *data)
920 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
923 if (sig == OTP_INDICATOR_2)
925 else if (sig != OTP_INDICATOR_1)
928 ret = lan78xx_read_raw_otp(dev, offset, length, data);
934 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
938 for (i = 0; i < 100; i++) {
941 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
942 if (unlikely(ret < 0))
945 if (dp_sel & DP_SEL_DPRDY_)
948 usleep_range(40, 100);
951 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
956 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
957 u32 addr, u32 length, u32 *buf)
959 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
963 if (usb_autopm_get_interface(dev->intf) < 0)
966 mutex_lock(&pdata->dataport_mutex);
968 ret = lan78xx_dataport_wait_not_busy(dev);
972 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
974 dp_sel &= ~DP_SEL_RSEL_MASK_;
975 dp_sel |= ram_select;
976 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
978 for (i = 0; i < length; i++) {
979 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
981 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
983 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
985 ret = lan78xx_dataport_wait_not_busy(dev);
991 mutex_unlock(&pdata->dataport_mutex);
992 usb_autopm_put_interface(dev->intf);
997 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
998 int index, u8 addr[ETH_ALEN])
1002 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1004 temp = addr[2] | (temp << 8);
1005 temp = addr[1] | (temp << 8);
1006 temp = addr[0] | (temp << 8);
1007 pdata->pfilter_table[index][1] = temp;
1009 temp = addr[4] | (temp << 8);
1010 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1011 pdata->pfilter_table[index][0] = temp;
1015 /* returns hash bit number for given MAC address */
1016 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1018 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1021 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1023 struct lan78xx_priv *pdata =
1024 container_of(param, struct lan78xx_priv, set_multicast);
1025 struct lan78xx_net *dev = pdata->dev;
1029 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1032 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1033 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1035 for (i = 1; i < NUM_OF_MAF; i++) {
1036 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1037 ret = lan78xx_write_reg(dev, MAF_LO(i),
1038 pdata->pfilter_table[i][1]);
1039 ret = lan78xx_write_reg(dev, MAF_HI(i),
1040 pdata->pfilter_table[i][0]);
1043 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1046 static void lan78xx_set_multicast(struct net_device *netdev)
1048 struct lan78xx_net *dev = netdev_priv(netdev);
1049 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1050 unsigned long flags;
1053 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1055 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1056 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1058 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1059 pdata->mchash_table[i] = 0;
1060 /* pfilter_table[0] has own HW address */
1061 for (i = 1; i < NUM_OF_MAF; i++) {
1062 pdata->pfilter_table[i][0] =
1063 pdata->pfilter_table[i][1] = 0;
1066 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1068 if (dev->net->flags & IFF_PROMISC) {
1069 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1070 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1072 if (dev->net->flags & IFF_ALLMULTI) {
1073 netif_dbg(dev, drv, dev->net,
1074 "receive all multicast enabled");
1075 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1079 if (netdev_mc_count(dev->net)) {
1080 struct netdev_hw_addr *ha;
1083 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1085 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1088 netdev_for_each_mc_addr(ha, netdev) {
1089 /* set first 32 into Perfect Filter */
1091 lan78xx_set_addr_filter(pdata, i, ha->addr);
1093 u32 bitnum = lan78xx_hash(ha->addr);
1095 pdata->mchash_table[bitnum / 32] |=
1096 (1 << (bitnum % 32));
1097 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1103 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1105 /* defer register writes to a sleepable context */
1106 schedule_work(&pdata->set_multicast);
1109 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1110 u16 lcladv, u16 rmtadv)
1112 u32 flow = 0, fct_flow = 0;
1116 if (dev->fc_autoneg)
1117 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1119 cap = dev->fc_request_control;
1121 if (cap & FLOW_CTRL_TX)
1122 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1124 if (cap & FLOW_CTRL_RX)
1125 flow |= FLOW_CR_RX_FCEN_;
1127 if (dev->udev->speed == USB_SPEED_SUPER)
1129 else if (dev->udev->speed == USB_SPEED_HIGH)
1132 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1133 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1134 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1136 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1138 /* threshold value should be set before enabling flow */
1139 ret = lan78xx_write_reg(dev, FLOW, flow);
1144 static int lan78xx_link_reset(struct lan78xx_net *dev)
1146 struct phy_device *phydev = dev->net->phydev;
1147 struct ethtool_link_ksettings ecmd;
1148 int ladv, radv, ret, link;
1151 /* clear LAN78xx interrupt status */
1152 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1153 if (unlikely(ret < 0))
1156 mutex_lock(&phydev->lock);
1157 phy_read_status(phydev);
1158 link = phydev->link;
1159 mutex_unlock(&phydev->lock);
1161 if (!link && dev->link_on) {
1162 dev->link_on = false;
1165 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1166 if (unlikely(ret < 0))
1169 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1170 if (unlikely(ret < 0))
1173 del_timer(&dev->stat_monitor);
1174 } else if (link && !dev->link_on) {
1175 dev->link_on = true;
1177 phy_ethtool_ksettings_get(phydev, &ecmd);
1179 if (dev->udev->speed == USB_SPEED_SUPER) {
1180 if (ecmd.base.speed == 1000) {
1182 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1183 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1184 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1186 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1187 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1188 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1190 /* enable U1 & U2 */
1191 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1192 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1193 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1194 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1198 ladv = phy_read(phydev, MII_ADVERTISE);
1202 radv = phy_read(phydev, MII_LPA);
1206 netif_dbg(dev, link, dev->net,
1207 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1208 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1210 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1213 if (!timer_pending(&dev->stat_monitor)) {
1215 mod_timer(&dev->stat_monitor,
1216 jiffies + STAT_UPDATE_TIMER);
1219 tasklet_schedule(&dev->bh);
1225 /* some work can't be done in tasklets, so we use keventd
1227 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1228 * but tasklet_schedule() doesn't. hope the failure is rare.
1230 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1232 set_bit(work, &dev->flags);
1233 if (!schedule_delayed_work(&dev->wq, 0))
1234 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1237 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1241 if (urb->actual_length != 4) {
1242 netdev_warn(dev->net,
1243 "unexpected urb length %d", urb->actual_length);
1247 memcpy(&intdata, urb->transfer_buffer, 4);
1248 le32_to_cpus(&intdata);
1250 if (intdata & INT_ENP_PHY_INT) {
1251 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1252 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1254 if (dev->domain_data.phyirq > 0)
1255 generic_handle_irq(dev->domain_data.phyirq);
1257 netdev_warn(dev->net,
1258 "unexpected interrupt: 0x%08x\n", intdata);
1261 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1263 return MAX_EEPROM_SIZE;
1266 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1267 struct ethtool_eeprom *ee, u8 *data)
1269 struct lan78xx_net *dev = netdev_priv(netdev);
1272 ret = usb_autopm_get_interface(dev->intf);
1276 ee->magic = LAN78XX_EEPROM_MAGIC;
1278 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1280 usb_autopm_put_interface(dev->intf);
1285 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1286 struct ethtool_eeprom *ee, u8 *data)
1288 struct lan78xx_net *dev = netdev_priv(netdev);
1291 ret = usb_autopm_get_interface(dev->intf);
1295 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1296 * to load data from EEPROM
1298 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1299 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1300 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1301 (ee->offset == 0) &&
1303 (data[0] == OTP_INDICATOR_1))
1304 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1306 usb_autopm_put_interface(dev->intf);
1311 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1314 if (stringset == ETH_SS_STATS)
1315 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1318 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1320 if (sset == ETH_SS_STATS)
1321 return ARRAY_SIZE(lan78xx_gstrings);
1326 static void lan78xx_get_stats(struct net_device *netdev,
1327 struct ethtool_stats *stats, u64 *data)
1329 struct lan78xx_net *dev = netdev_priv(netdev);
1331 lan78xx_update_stats(dev);
1333 mutex_lock(&dev->stats.access_lock);
1334 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1335 mutex_unlock(&dev->stats.access_lock);
1338 static void lan78xx_get_wol(struct net_device *netdev,
1339 struct ethtool_wolinfo *wol)
1341 struct lan78xx_net *dev = netdev_priv(netdev);
1344 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1346 if (usb_autopm_get_interface(dev->intf) < 0)
1349 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1350 if (unlikely(ret < 0)) {
1354 if (buf & USB_CFG_RMT_WKP_) {
1355 wol->supported = WAKE_ALL;
1356 wol->wolopts = pdata->wol;
1363 usb_autopm_put_interface(dev->intf);
1366 static int lan78xx_set_wol(struct net_device *netdev,
1367 struct ethtool_wolinfo *wol)
1369 struct lan78xx_net *dev = netdev_priv(netdev);
1370 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1373 ret = usb_autopm_get_interface(dev->intf);
1377 if (wol->wolopts & ~WAKE_ALL)
1380 pdata->wol = wol->wolopts;
1382 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1384 phy_ethtool_set_wol(netdev->phydev, wol);
1386 usb_autopm_put_interface(dev->intf);
1391 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1393 struct lan78xx_net *dev = netdev_priv(net);
1394 struct phy_device *phydev = net->phydev;
1398 ret = usb_autopm_get_interface(dev->intf);
1402 ret = phy_ethtool_get_eee(phydev, edata);
1406 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1407 if (buf & MAC_CR_EEE_EN_) {
1408 edata->eee_enabled = true;
1409 edata->eee_active = !!(edata->advertised &
1410 edata->lp_advertised);
1411 edata->tx_lpi_enabled = true;
1412 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1413 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1414 edata->tx_lpi_timer = buf;
1416 edata->eee_enabled = false;
1417 edata->eee_active = false;
1418 edata->tx_lpi_enabled = false;
1419 edata->tx_lpi_timer = 0;
1424 usb_autopm_put_interface(dev->intf);
1429 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1431 struct lan78xx_net *dev = netdev_priv(net);
1435 ret = usb_autopm_get_interface(dev->intf);
1439 if (edata->eee_enabled) {
1440 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1441 buf |= MAC_CR_EEE_EN_;
1442 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1444 phy_ethtool_set_eee(net->phydev, edata);
1446 buf = (u32)edata->tx_lpi_timer;
1447 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1449 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1450 buf &= ~MAC_CR_EEE_EN_;
1451 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1454 usb_autopm_put_interface(dev->intf);
1459 static u32 lan78xx_get_link(struct net_device *net)
1463 mutex_lock(&net->phydev->lock);
1464 phy_read_status(net->phydev);
1465 link = net->phydev->link;
1466 mutex_unlock(&net->phydev->lock);
1471 static void lan78xx_get_drvinfo(struct net_device *net,
1472 struct ethtool_drvinfo *info)
1474 struct lan78xx_net *dev = netdev_priv(net);
1476 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1477 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1478 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1481 static u32 lan78xx_get_msglevel(struct net_device *net)
1483 struct lan78xx_net *dev = netdev_priv(net);
1485 return dev->msg_enable;
1488 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1490 struct lan78xx_net *dev = netdev_priv(net);
1492 dev->msg_enable = level;
1495 static int lan78xx_get_link_ksettings(struct net_device *net,
1496 struct ethtool_link_ksettings *cmd)
1498 struct lan78xx_net *dev = netdev_priv(net);
1499 struct phy_device *phydev = net->phydev;
1502 ret = usb_autopm_get_interface(dev->intf);
1506 phy_ethtool_ksettings_get(phydev, cmd);
1508 usb_autopm_put_interface(dev->intf);
1513 static int lan78xx_set_link_ksettings(struct net_device *net,
1514 const struct ethtool_link_ksettings *cmd)
1516 struct lan78xx_net *dev = netdev_priv(net);
1517 struct phy_device *phydev = net->phydev;
1521 ret = usb_autopm_get_interface(dev->intf);
1525 /* change speed & duplex */
1526 ret = phy_ethtool_ksettings_set(phydev, cmd);
1528 if (!cmd->base.autoneg) {
1529 /* force link down */
1530 temp = phy_read(phydev, MII_BMCR);
1531 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1533 phy_write(phydev, MII_BMCR, temp);
1536 usb_autopm_put_interface(dev->intf);
1541 static void lan78xx_get_pause(struct net_device *net,
1542 struct ethtool_pauseparam *pause)
1544 struct lan78xx_net *dev = netdev_priv(net);
1545 struct phy_device *phydev = net->phydev;
1546 struct ethtool_link_ksettings ecmd;
1548 phy_ethtool_ksettings_get(phydev, &ecmd);
1550 pause->autoneg = dev->fc_autoneg;
1552 if (dev->fc_request_control & FLOW_CTRL_TX)
1553 pause->tx_pause = 1;
1555 if (dev->fc_request_control & FLOW_CTRL_RX)
1556 pause->rx_pause = 1;
1559 static int lan78xx_set_pause(struct net_device *net,
1560 struct ethtool_pauseparam *pause)
1562 struct lan78xx_net *dev = netdev_priv(net);
1563 struct phy_device *phydev = net->phydev;
1564 struct ethtool_link_ksettings ecmd;
1567 phy_ethtool_ksettings_get(phydev, &ecmd);
1569 if (pause->autoneg && !ecmd.base.autoneg) {
1574 dev->fc_request_control = 0;
1575 if (pause->rx_pause)
1576 dev->fc_request_control |= FLOW_CTRL_RX;
1578 if (pause->tx_pause)
1579 dev->fc_request_control |= FLOW_CTRL_TX;
1581 if (ecmd.base.autoneg) {
1585 ethtool_convert_link_mode_to_legacy_u32(
1586 &advertising, ecmd.link_modes.advertising);
1588 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1589 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1590 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1592 ethtool_convert_legacy_u32_to_link_mode(
1593 ecmd.link_modes.advertising, advertising);
1595 phy_ethtool_ksettings_set(phydev, &ecmd);
1598 dev->fc_autoneg = pause->autoneg;
1605 static const struct ethtool_ops lan78xx_ethtool_ops = {
1606 .get_link = lan78xx_get_link,
1607 .nway_reset = phy_ethtool_nway_reset,
1608 .get_drvinfo = lan78xx_get_drvinfo,
1609 .get_msglevel = lan78xx_get_msglevel,
1610 .set_msglevel = lan78xx_set_msglevel,
1611 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1612 .get_eeprom = lan78xx_ethtool_get_eeprom,
1613 .set_eeprom = lan78xx_ethtool_set_eeprom,
1614 .get_ethtool_stats = lan78xx_get_stats,
1615 .get_sset_count = lan78xx_get_sset_count,
1616 .get_strings = lan78xx_get_strings,
1617 .get_wol = lan78xx_get_wol,
1618 .set_wol = lan78xx_set_wol,
1619 .get_eee = lan78xx_get_eee,
1620 .set_eee = lan78xx_set_eee,
1621 .get_pauseparam = lan78xx_get_pause,
1622 .set_pauseparam = lan78xx_set_pause,
1623 .get_link_ksettings = lan78xx_get_link_ksettings,
1624 .set_link_ksettings = lan78xx_set_link_ksettings,
1627 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1629 if (!netif_running(netdev))
1632 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1635 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1637 u32 addr_lo, addr_hi;
1641 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1642 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1644 addr[0] = addr_lo & 0xFF;
1645 addr[1] = (addr_lo >> 8) & 0xFF;
1646 addr[2] = (addr_lo >> 16) & 0xFF;
1647 addr[3] = (addr_lo >> 24) & 0xFF;
1648 addr[4] = addr_hi & 0xFF;
1649 addr[5] = (addr_hi >> 8) & 0xFF;
1651 if (!is_valid_ether_addr(addr)) {
1652 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1653 /* valid address present in Device Tree */
1654 netif_dbg(dev, ifup, dev->net,
1655 "MAC address read from Device Tree");
1656 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1657 ETH_ALEN, addr) == 0) ||
1658 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1659 ETH_ALEN, addr) == 0)) &&
1660 is_valid_ether_addr(addr)) {
1661 /* eeprom values are valid so use them */
1662 netif_dbg(dev, ifup, dev->net,
1663 "MAC address read from EEPROM");
1665 /* generate random MAC */
1666 random_ether_addr(addr);
1667 netif_dbg(dev, ifup, dev->net,
1668 "MAC address set to random addr");
1671 addr_lo = addr[0] | (addr[1] << 8) |
1672 (addr[2] << 16) | (addr[3] << 24);
1673 addr_hi = addr[4] | (addr[5] << 8);
1675 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1676 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1679 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1680 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1682 ether_addr_copy(dev->net->dev_addr, addr);
1685 /* MDIO read and write wrappers for phylib */
1686 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1688 struct lan78xx_net *dev = bus->priv;
1692 ret = usb_autopm_get_interface(dev->intf);
1696 mutex_lock(&dev->phy_mutex);
1698 /* confirm MII not busy */
1699 ret = lan78xx_phy_wait_not_busy(dev);
1703 /* set the address, index & direction (read from PHY) */
1704 addr = mii_access(phy_id, idx, MII_READ);
1705 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1707 ret = lan78xx_phy_wait_not_busy(dev);
1711 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1713 ret = (int)(val & 0xFFFF);
1716 mutex_unlock(&dev->phy_mutex);
1717 usb_autopm_put_interface(dev->intf);
1722 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1725 struct lan78xx_net *dev = bus->priv;
1729 ret = usb_autopm_get_interface(dev->intf);
1733 mutex_lock(&dev->phy_mutex);
1735 /* confirm MII not busy */
1736 ret = lan78xx_phy_wait_not_busy(dev);
1741 ret = lan78xx_write_reg(dev, MII_DATA, val);
1743 /* set the address, index & direction (write to PHY) */
1744 addr = mii_access(phy_id, idx, MII_WRITE);
1745 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1747 ret = lan78xx_phy_wait_not_busy(dev);
1752 mutex_unlock(&dev->phy_mutex);
1753 usb_autopm_put_interface(dev->intf);
1757 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1761 dev->mdiobus = mdiobus_alloc();
1762 if (!dev->mdiobus) {
1763 netdev_err(dev->net, "can't allocate MDIO bus\n");
1767 dev->mdiobus->priv = (void *)dev;
1768 dev->mdiobus->read = lan78xx_mdiobus_read;
1769 dev->mdiobus->write = lan78xx_mdiobus_write;
1770 dev->mdiobus->name = "lan78xx-mdiobus";
1771 dev->mdiobus->parent = &dev->udev->dev;
1773 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1774 dev->udev->bus->busnum, dev->udev->devnum);
1776 switch (dev->chipid) {
1777 case ID_REV_CHIP_ID_7800_:
1778 case ID_REV_CHIP_ID_7850_:
1779 /* set to internal PHY id */
1780 dev->mdiobus->phy_mask = ~(1 << 1);
1782 case ID_REV_CHIP_ID_7801_:
1783 /* scan thru PHYAD[2..0] */
1784 dev->mdiobus->phy_mask = ~(0xFF);
1788 ret = mdiobus_register(dev->mdiobus);
1790 netdev_err(dev->net, "can't register MDIO bus\n");
1794 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1797 mdiobus_free(dev->mdiobus);
1801 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1803 mdiobus_unregister(dev->mdiobus);
1804 mdiobus_free(dev->mdiobus);
1807 static void lan78xx_link_status_change(struct net_device *net)
1809 struct phy_device *phydev = net->phydev;
1812 /* At forced 100 F/H mode, chip may fail to set mode correctly
1813 * when cable is switched between long(~50+m) and short one.
1814 * As workaround, set to 10 before setting to 100
1815 * at forced 100 F/H mode.
1817 if (!phydev->autoneg && (phydev->speed == 100)) {
1818 /* disable phy interrupt */
1819 temp = phy_read(phydev, LAN88XX_INT_MASK);
1820 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1821 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1823 temp = phy_read(phydev, MII_BMCR);
1824 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1825 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1826 temp |= BMCR_SPEED100;
1827 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1829 /* clear pending interrupt generated while workaround */
1830 temp = phy_read(phydev, LAN88XX_INT_STS);
1832 /* enable phy interrupt back */
1833 temp = phy_read(phydev, LAN88XX_INT_MASK);
1834 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1835 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1839 static int irq_map(struct irq_domain *d, unsigned int irq,
1840 irq_hw_number_t hwirq)
1842 struct irq_domain_data *data = d->host_data;
1844 irq_set_chip_data(irq, data);
1845 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1846 irq_set_noprobe(irq);
1851 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1853 irq_set_chip_and_handler(irq, NULL, NULL);
1854 irq_set_chip_data(irq, NULL);
1857 static const struct irq_domain_ops chip_domain_ops = {
1862 static void lan78xx_irq_mask(struct irq_data *irqd)
1864 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1866 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1869 static void lan78xx_irq_unmask(struct irq_data *irqd)
1871 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1873 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1876 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1878 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1880 mutex_lock(&data->irq_lock);
1883 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1885 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1886 struct lan78xx_net *dev =
1887 container_of(data, struct lan78xx_net, domain_data);
1891 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1892 * are only two callbacks executed in non-atomic contex.
1894 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1895 if (buf != data->irqenable)
1896 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1898 mutex_unlock(&data->irq_lock);
1901 static struct irq_chip lan78xx_irqchip = {
1902 .name = "lan78xx-irqs",
1903 .irq_mask = lan78xx_irq_mask,
1904 .irq_unmask = lan78xx_irq_unmask,
1905 .irq_bus_lock = lan78xx_irq_bus_lock,
1906 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1909 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1911 struct device_node *of_node;
1912 struct irq_domain *irqdomain;
1913 unsigned int irqmap = 0;
1917 of_node = dev->udev->dev.parent->of_node;
1919 mutex_init(&dev->domain_data.irq_lock);
1921 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1922 dev->domain_data.irqenable = buf;
1924 dev->domain_data.irqchip = &lan78xx_irqchip;
1925 dev->domain_data.irq_handler = handle_simple_irq;
1927 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1928 &chip_domain_ops, &dev->domain_data);
1930 /* create mapping for PHY interrupt */
1931 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1933 irq_domain_remove(irqdomain);
1942 dev->domain_data.irqdomain = irqdomain;
1943 dev->domain_data.phyirq = irqmap;
1948 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1950 if (dev->domain_data.phyirq > 0) {
1951 irq_dispose_mapping(dev->domain_data.phyirq);
1953 if (dev->domain_data.irqdomain)
1954 irq_domain_remove(dev->domain_data.irqdomain);
1956 dev->domain_data.phyirq = 0;
1957 dev->domain_data.irqdomain = NULL;
1960 static int lan8835_fixup(struct phy_device *phydev)
1964 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1966 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1967 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1970 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1972 /* RGMII MAC TXC Delay Enable */
1973 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1974 MAC_RGMII_ID_TXC_DELAY_EN_);
1976 /* RGMII TX DLL Tune Adjust */
1977 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1979 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1984 static int ksz9031rnx_fixup(struct phy_device *phydev)
1986 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1988 /* Micrel9301RNX PHY configuration */
1989 /* RGMII Control Signal Pad Skew */
1990 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1991 /* RGMII RX Data Pad Skew */
1992 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1993 /* RGMII RX Clock Pad Skew */
1994 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
1996 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2001 static int lan78xx_phy_init(struct lan78xx_net *dev)
2005 struct phy_device *phydev = dev->net->phydev;
2007 phydev = phy_find_first(dev->mdiobus);
2009 netdev_err(dev->net, "no PHY found\n");
2013 if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2014 (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2015 phydev->is_internal = true;
2016 dev->interface = PHY_INTERFACE_MODE_GMII;
2018 } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2020 netdev_err(dev->net, "no PHY driver found\n");
2024 dev->interface = PHY_INTERFACE_MODE_RGMII;
2026 /* external PHY fixup for KSZ9031RNX */
2027 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2030 netdev_err(dev->net, "fail to register fixup\n");
2033 /* external PHY fixup for LAN8835 */
2034 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2037 netdev_err(dev->net, "fail to register fixup\n");
2040 /* add more external PHY fixup here if needed */
2042 phydev->is_internal = false;
2044 netdev_err(dev->net, "unknown ID found\n");
2049 /* if phyirq is not set, use polling mode in phylib */
2050 if (dev->domain_data.phyirq > 0)
2051 phydev->irq = dev->domain_data.phyirq;
2053 phydev->irq = PHY_POLL;
2054 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2056 /* set to AUTOMDIX */
2057 phydev->mdix = ETH_TP_MDI_AUTO;
2059 ret = phy_connect_direct(dev->net, phydev,
2060 lan78xx_link_status_change,
2063 netdev_err(dev->net, "can't attach PHY to %s\n",
2068 /* MAC doesn't support 1000T Half */
2069 phydev->supported &= ~SUPPORTED_1000baseT_Half;
2071 /* support both flow controls */
2072 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2073 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2074 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2075 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2077 genphy_config_aneg(phydev);
2079 dev->fc_autoneg = phydev->autoneg;
2084 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2085 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2090 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2096 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2098 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2101 buf &= ~MAC_RX_RXEN_;
2102 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2105 /* add 4 to size for FCS */
2106 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2107 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2109 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2112 buf |= MAC_RX_RXEN_;
2113 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2119 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2121 struct sk_buff *skb;
2122 unsigned long flags;
2125 spin_lock_irqsave(&q->lock, flags);
2126 while (!skb_queue_empty(q)) {
2127 struct skb_data *entry;
2131 skb_queue_walk(q, skb) {
2132 entry = (struct skb_data *)skb->cb;
2133 if (entry->state != unlink_start)
2138 entry->state = unlink_start;
2141 /* Get reference count of the URB to avoid it to be
2142 * freed during usb_unlink_urb, which may trigger
2143 * use-after-free problem inside usb_unlink_urb since
2144 * usb_unlink_urb is always racing with .complete
2145 * handler(include defer_bh).
2148 spin_unlock_irqrestore(&q->lock, flags);
2149 /* during some PM-driven resume scenarios,
2150 * these (async) unlinks complete immediately
2152 ret = usb_unlink_urb(urb);
2153 if (ret != -EINPROGRESS && ret != 0)
2154 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2158 spin_lock_irqsave(&q->lock, flags);
2160 spin_unlock_irqrestore(&q->lock, flags);
2164 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2166 struct lan78xx_net *dev = netdev_priv(netdev);
2167 int ll_mtu = new_mtu + netdev->hard_header_len;
2168 int old_hard_mtu = dev->hard_mtu;
2169 int old_rx_urb_size = dev->rx_urb_size;
2172 /* no second zero-length packet read wanted after mtu-sized packets */
2173 if ((ll_mtu % dev->maxpacket) == 0)
2176 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2178 netdev->mtu = new_mtu;
2180 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2181 if (dev->rx_urb_size == old_hard_mtu) {
2182 dev->rx_urb_size = dev->hard_mtu;
2183 if (dev->rx_urb_size > old_rx_urb_size) {
2184 if (netif_running(dev->net)) {
2185 unlink_urbs(dev, &dev->rxq);
2186 tasklet_schedule(&dev->bh);
2194 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2196 struct lan78xx_net *dev = netdev_priv(netdev);
2197 struct sockaddr *addr = p;
2198 u32 addr_lo, addr_hi;
2201 if (netif_running(netdev))
2204 if (!is_valid_ether_addr(addr->sa_data))
2205 return -EADDRNOTAVAIL;
2207 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2209 addr_lo = netdev->dev_addr[0] |
2210 netdev->dev_addr[1] << 8 |
2211 netdev->dev_addr[2] << 16 |
2212 netdev->dev_addr[3] << 24;
2213 addr_hi = netdev->dev_addr[4] |
2214 netdev->dev_addr[5] << 8;
2216 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2217 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2219 /* Added to support MAC address changes */
2220 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2221 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2226 /* Enable or disable Rx checksum offload engine */
2227 static int lan78xx_set_features(struct net_device *netdev,
2228 netdev_features_t features)
2230 struct lan78xx_net *dev = netdev_priv(netdev);
2231 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2232 unsigned long flags;
2235 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2237 if (features & NETIF_F_RXCSUM) {
2238 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2239 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2241 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2242 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2245 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2246 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2248 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2250 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2252 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2257 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2259 struct lan78xx_priv *pdata =
2260 container_of(param, struct lan78xx_priv, set_vlan);
2261 struct lan78xx_net *dev = pdata->dev;
2263 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2264 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2267 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2268 __be16 proto, u16 vid)
2270 struct lan78xx_net *dev = netdev_priv(netdev);
2271 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2273 u16 vid_dword_index;
2275 vid_dword_index = (vid >> 5) & 0x7F;
2276 vid_bit_index = vid & 0x1F;
2278 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2280 /* defer register writes to a sleepable context */
2281 schedule_work(&pdata->set_vlan);
2286 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2287 __be16 proto, u16 vid)
2289 struct lan78xx_net *dev = netdev_priv(netdev);
2290 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2292 u16 vid_dword_index;
2294 vid_dword_index = (vid >> 5) & 0x7F;
2295 vid_bit_index = vid & 0x1F;
2297 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2299 /* defer register writes to a sleepable context */
2300 schedule_work(&pdata->set_vlan);
2305 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2309 u32 regs[6] = { 0 };
2311 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2312 if (buf & USB_CFG1_LTM_ENABLE_) {
2314 /* Get values from EEPROM first */
2315 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2316 if (temp[0] == 24) {
2317 ret = lan78xx_read_raw_eeprom(dev,
2324 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2325 if (temp[0] == 24) {
2326 ret = lan78xx_read_raw_otp(dev,
2336 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2337 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2338 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2339 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2340 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2341 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2344 static int lan78xx_reset(struct lan78xx_net *dev)
2346 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2349 unsigned long timeout;
2352 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2353 buf |= HW_CFG_LRST_;
2354 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2356 timeout = jiffies + HZ;
2359 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2360 if (time_after(jiffies, timeout)) {
2361 netdev_warn(dev->net,
2362 "timeout on completion of LiteReset");
2365 } while (buf & HW_CFG_LRST_);
2367 lan78xx_init_mac_address(dev);
2369 /* save DEVID for later usage */
2370 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2371 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2372 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2374 /* Respond to the IN token with a NAK */
2375 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2376 buf |= USB_CFG_BIR_;
2377 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2380 lan78xx_init_ltm(dev);
2382 if (dev->udev->speed == USB_SPEED_SUPER) {
2383 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2384 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2387 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2388 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2389 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2390 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2391 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2393 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2394 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2399 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2400 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2402 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2404 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2406 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2407 buf |= USB_CFG_BCE_;
2408 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2410 /* set FIFO sizes */
2411 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2412 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2414 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2415 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2417 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2418 ret = lan78xx_write_reg(dev, FLOW, 0);
2419 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2421 /* Don't need rfe_ctl_lock during initialisation */
2422 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2423 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2424 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2426 /* Enable or disable checksum offload engines */
2427 lan78xx_set_features(dev->net, dev->net->features);
2429 lan78xx_set_multicast(dev->net);
2432 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2433 buf |= PMT_CTL_PHY_RST_;
2434 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2436 timeout = jiffies + HZ;
2439 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2440 if (time_after(jiffies, timeout)) {
2441 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2444 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2446 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2447 /* LAN7801 only has RGMII mode */
2448 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2449 buf &= ~MAC_CR_GMII_EN_;
2451 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2452 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2453 if (!ret && sig != EEPROM_INDICATOR) {
2454 /* Implies there is no external eeprom. Set mac speed */
2455 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2456 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2459 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2461 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2462 buf |= MAC_TX_TXEN_;
2463 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2465 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2466 buf |= FCT_TX_CTL_EN_;
2467 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2469 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2471 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2472 buf |= MAC_RX_RXEN_;
2473 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2475 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2476 buf |= FCT_RX_CTL_EN_;
2477 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2482 static void lan78xx_init_stats(struct lan78xx_net *dev)
2487 /* initialize for stats update
2488 * some counters are 20bits and some are 32bits
2490 p = (u32 *)&dev->stats.rollover_max;
2491 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2494 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2495 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2496 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2497 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2498 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2499 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2500 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2501 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2502 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2503 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2505 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2508 static int lan78xx_open(struct net_device *net)
2510 struct lan78xx_net *dev = netdev_priv(net);
2513 ret = usb_autopm_get_interface(dev->intf);
2517 phy_start(net->phydev);
2519 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2521 /* for Link Check */
2522 if (dev->urb_intr) {
2523 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2525 netif_err(dev, ifup, dev->net,
2526 "intr submit %d\n", ret);
2531 lan78xx_init_stats(dev);
2533 set_bit(EVENT_DEV_OPEN, &dev->flags);
2535 netif_start_queue(net);
2537 dev->link_on = false;
2539 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2541 usb_autopm_put_interface(dev->intf);
2547 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2549 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2550 DECLARE_WAITQUEUE(wait, current);
2553 /* ensure there are no more active urbs */
2554 add_wait_queue(&unlink_wakeup, &wait);
2555 set_current_state(TASK_UNINTERRUPTIBLE);
2556 dev->wait = &unlink_wakeup;
2557 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2559 /* maybe wait for deletions to finish. */
2560 while (!skb_queue_empty(&dev->rxq) &&
2561 !skb_queue_empty(&dev->txq) &&
2562 !skb_queue_empty(&dev->done)) {
2563 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2564 set_current_state(TASK_UNINTERRUPTIBLE);
2565 netif_dbg(dev, ifdown, dev->net,
2566 "waited for %d urb completions\n", temp);
2568 set_current_state(TASK_RUNNING);
2570 remove_wait_queue(&unlink_wakeup, &wait);
2573 static int lan78xx_stop(struct net_device *net)
2575 struct lan78xx_net *dev = netdev_priv(net);
2577 if (timer_pending(&dev->stat_monitor))
2578 del_timer_sync(&dev->stat_monitor);
2581 phy_stop(net->phydev);
2583 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2584 netif_stop_queue(net);
2586 netif_info(dev, ifdown, dev->net,
2587 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2588 net->stats.rx_packets, net->stats.tx_packets,
2589 net->stats.rx_errors, net->stats.tx_errors);
2591 lan78xx_terminate_urbs(dev);
2593 usb_kill_urb(dev->urb_intr);
2595 skb_queue_purge(&dev->rxq_pause);
2597 /* deferred work (task, timer, softirq) must also stop.
2598 * can't flush_scheduled_work() until we drop rtnl (later),
2599 * else workers could deadlock; so make workers a NOP.
2602 cancel_delayed_work_sync(&dev->wq);
2603 tasklet_kill(&dev->bh);
2605 usb_autopm_put_interface(dev->intf);
2610 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2611 struct sk_buff *skb, gfp_t flags)
2613 u32 tx_cmd_a, tx_cmd_b;
2615 if (skb_cow_head(skb, TX_OVERHEAD)) {
2616 dev_kfree_skb_any(skb);
2620 if (skb_linearize(skb)) {
2621 dev_kfree_skb_any(skb);
2625 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2627 if (skb->ip_summed == CHECKSUM_PARTIAL)
2628 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2631 if (skb_is_gso(skb)) {
2632 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2634 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2636 tx_cmd_a |= TX_CMD_A_LSO_;
2639 if (skb_vlan_tag_present(skb)) {
2640 tx_cmd_a |= TX_CMD_A_IVTG_;
2641 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2645 cpu_to_le32s(&tx_cmd_b);
2646 memcpy(skb->data, &tx_cmd_b, 4);
2649 cpu_to_le32s(&tx_cmd_a);
2650 memcpy(skb->data, &tx_cmd_a, 4);
2655 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2656 struct sk_buff_head *list, enum skb_state state)
2658 unsigned long flags;
2659 enum skb_state old_state;
2660 struct skb_data *entry = (struct skb_data *)skb->cb;
2662 spin_lock_irqsave(&list->lock, flags);
2663 old_state = entry->state;
2664 entry->state = state;
2666 __skb_unlink(skb, list);
2667 spin_unlock(&list->lock);
2668 spin_lock(&dev->done.lock);
2670 __skb_queue_tail(&dev->done, skb);
2671 if (skb_queue_len(&dev->done) == 1)
2672 tasklet_schedule(&dev->bh);
2673 spin_unlock_irqrestore(&dev->done.lock, flags);
2678 static void tx_complete(struct urb *urb)
2680 struct sk_buff *skb = (struct sk_buff *)urb->context;
2681 struct skb_data *entry = (struct skb_data *)skb->cb;
2682 struct lan78xx_net *dev = entry->dev;
2684 if (urb->status == 0) {
2685 dev->net->stats.tx_packets += entry->num_of_packet;
2686 dev->net->stats.tx_bytes += entry->length;
2688 dev->net->stats.tx_errors++;
2690 switch (urb->status) {
2692 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2695 /* software-driven interface shutdown */
2703 netif_stop_queue(dev->net);
2706 netif_dbg(dev, tx_err, dev->net,
2707 "tx err %d\n", entry->urb->status);
2712 usb_autopm_put_interface_async(dev->intf);
2714 defer_bh(dev, skb, &dev->txq, tx_done);
2717 static void lan78xx_queue_skb(struct sk_buff_head *list,
2718 struct sk_buff *newsk, enum skb_state state)
2720 struct skb_data *entry = (struct skb_data *)newsk->cb;
2722 __skb_queue_tail(list, newsk);
2723 entry->state = state;
2727 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2729 struct lan78xx_net *dev = netdev_priv(net);
2730 struct sk_buff *skb2 = NULL;
2733 skb_tx_timestamp(skb);
2734 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2738 skb_queue_tail(&dev->txq_pend, skb2);
2740 /* throttle TX patch at slower than SUPER SPEED USB */
2741 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2742 (skb_queue_len(&dev->txq_pend) > 10))
2743 netif_stop_queue(net);
2745 netif_dbg(dev, tx_err, dev->net,
2746 "lan78xx_tx_prep return NULL\n");
2747 dev->net->stats.tx_errors++;
2748 dev->net->stats.tx_dropped++;
2751 tasklet_schedule(&dev->bh);
2753 return NETDEV_TX_OK;
2756 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2758 struct lan78xx_priv *pdata = NULL;
2762 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2764 pdata = (struct lan78xx_priv *)(dev->data[0]);
2766 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2772 spin_lock_init(&pdata->rfe_ctl_lock);
2773 mutex_init(&pdata->dataport_mutex);
2775 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2777 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2778 pdata->vlan_table[i] = 0;
2780 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2782 dev->net->features = 0;
2784 if (DEFAULT_TX_CSUM_ENABLE)
2785 dev->net->features |= NETIF_F_HW_CSUM;
2787 if (DEFAULT_RX_CSUM_ENABLE)
2788 dev->net->features |= NETIF_F_RXCSUM;
2790 if (DEFAULT_TSO_CSUM_ENABLE)
2791 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2793 dev->net->hw_features = dev->net->features;
2795 ret = lan78xx_setup_irq_domain(dev);
2797 netdev_warn(dev->net,
2798 "lan78xx_setup_irq_domain() failed : %d", ret);
2802 dev->net->hard_header_len += TX_OVERHEAD;
2803 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2805 /* Init all registers */
2806 ret = lan78xx_reset(dev);
2808 netdev_warn(dev->net, "Registers INIT FAILED....");
2812 ret = lan78xx_mdio_init(dev);
2814 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2818 dev->net->flags |= IFF_MULTICAST;
2820 pdata->wol = WAKE_MAGIC;
2825 lan78xx_remove_irq_domain(dev);
2828 netdev_warn(dev->net, "Bind routine FAILED");
2829 cancel_work_sync(&pdata->set_multicast);
2830 cancel_work_sync(&pdata->set_vlan);
2835 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2837 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2839 lan78xx_remove_irq_domain(dev);
2841 lan78xx_remove_mdio(dev);
2844 cancel_work_sync(&pdata->set_multicast);
2845 cancel_work_sync(&pdata->set_vlan);
2846 netif_dbg(dev, ifdown, dev->net, "free pdata");
2853 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2854 struct sk_buff *skb,
2855 u32 rx_cmd_a, u32 rx_cmd_b)
2857 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2858 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2859 skb->ip_summed = CHECKSUM_NONE;
2861 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2862 skb->ip_summed = CHECKSUM_COMPLETE;
2866 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2870 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2871 skb_queue_tail(&dev->rxq_pause, skb);
2875 dev->net->stats.rx_packets++;
2876 dev->net->stats.rx_bytes += skb->len;
2878 skb->protocol = eth_type_trans(skb, dev->net);
2880 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2881 skb->len + sizeof(struct ethhdr), skb->protocol);
2882 memset(skb->cb, 0, sizeof(struct skb_data));
2884 if (skb_defer_rx_timestamp(skb))
2887 status = netif_rx(skb);
2888 if (status != NET_RX_SUCCESS)
2889 netif_dbg(dev, rx_err, dev->net,
2890 "netif_rx status %d\n", status);
2893 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2895 if (skb->len < dev->net->hard_header_len)
2898 while (skb->len > 0) {
2899 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2901 struct sk_buff *skb2;
2902 unsigned char *packet;
2904 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2905 le32_to_cpus(&rx_cmd_a);
2906 skb_pull(skb, sizeof(rx_cmd_a));
2908 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2909 le32_to_cpus(&rx_cmd_b);
2910 skb_pull(skb, sizeof(rx_cmd_b));
2912 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2913 le16_to_cpus(&rx_cmd_c);
2914 skb_pull(skb, sizeof(rx_cmd_c));
2918 /* get the packet length */
2919 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2920 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2922 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2923 netif_dbg(dev, rx_err, dev->net,
2924 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2926 /* last frame in this batch */
2927 if (skb->len == size) {
2928 lan78xx_rx_csum_offload(dev, skb,
2929 rx_cmd_a, rx_cmd_b);
2931 skb_trim(skb, skb->len - 4); /* remove fcs */
2932 skb->truesize = size + sizeof(struct sk_buff);
2937 skb2 = skb_clone(skb, GFP_ATOMIC);
2938 if (unlikely(!skb2)) {
2939 netdev_warn(dev->net, "Error allocating skb");
2944 skb2->data = packet;
2945 skb_set_tail_pointer(skb2, size);
2947 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2949 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2950 skb2->truesize = size + sizeof(struct sk_buff);
2952 lan78xx_skb_return(dev, skb2);
2955 skb_pull(skb, size);
2957 /* padding bytes before the next frame starts */
2959 skb_pull(skb, align_count);
2965 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2967 if (!lan78xx_rx(dev, skb)) {
2968 dev->net->stats.rx_errors++;
2973 lan78xx_skb_return(dev, skb);
2977 netif_dbg(dev, rx_err, dev->net, "drop\n");
2978 dev->net->stats.rx_errors++;
2980 skb_queue_tail(&dev->done, skb);
2983 static void rx_complete(struct urb *urb);
2985 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2987 struct sk_buff *skb;
2988 struct skb_data *entry;
2989 unsigned long lockflags;
2990 size_t size = dev->rx_urb_size;
2993 skb = netdev_alloc_skb_ip_align(dev->net, size);
2999 entry = (struct skb_data *)skb->cb;
3004 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3005 skb->data, size, rx_complete, skb);
3007 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3009 if (netif_device_present(dev->net) &&
3010 netif_running(dev->net) &&
3011 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3012 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3013 ret = usb_submit_urb(urb, GFP_ATOMIC);
3016 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3019 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3022 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3023 netif_device_detach(dev->net);
3029 netif_dbg(dev, rx_err, dev->net,
3030 "rx submit, %d\n", ret);
3031 tasklet_schedule(&dev->bh);
3034 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3037 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3039 dev_kfree_skb_any(skb);
3045 static void rx_complete(struct urb *urb)
3047 struct sk_buff *skb = (struct sk_buff *)urb->context;
3048 struct skb_data *entry = (struct skb_data *)skb->cb;
3049 struct lan78xx_net *dev = entry->dev;
3050 int urb_status = urb->status;
3051 enum skb_state state;
3053 skb_put(skb, urb->actual_length);
3057 switch (urb_status) {
3059 if (skb->len < dev->net->hard_header_len) {
3061 dev->net->stats.rx_errors++;
3062 dev->net->stats.rx_length_errors++;
3063 netif_dbg(dev, rx_err, dev->net,
3064 "rx length %d\n", skb->len);
3066 usb_mark_last_busy(dev->udev);
3069 dev->net->stats.rx_errors++;
3070 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3072 case -ECONNRESET: /* async unlink */
3073 case -ESHUTDOWN: /* hardware gone */
3074 netif_dbg(dev, ifdown, dev->net,
3075 "rx shutdown, code %d\n", urb_status);
3083 dev->net->stats.rx_errors++;
3089 /* data overrun ... flush fifo? */
3091 dev->net->stats.rx_over_errors++;
3096 dev->net->stats.rx_errors++;
3097 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3101 state = defer_bh(dev, skb, &dev->rxq, state);
3104 if (netif_running(dev->net) &&
3105 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3106 state != unlink_start) {
3107 rx_submit(dev, urb, GFP_ATOMIC);
3112 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3115 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3118 struct urb *urb = NULL;
3119 struct skb_data *entry;
3120 unsigned long flags;
3121 struct sk_buff_head *tqp = &dev->txq_pend;
3122 struct sk_buff *skb, *skb2;
3125 int skb_totallen, pkt_cnt;
3131 spin_lock_irqsave(&tqp->lock, flags);
3132 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3133 if (skb_is_gso(skb)) {
3135 /* handle previous packets first */
3139 length = skb->len - TX_OVERHEAD;
3140 __skb_unlink(skb, tqp);
3141 spin_unlock_irqrestore(&tqp->lock, flags);
3145 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3147 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3150 spin_unlock_irqrestore(&tqp->lock, flags);
3152 /* copy to a single skb */
3153 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3157 skb_put(skb, skb_totallen);
3159 for (count = pos = 0; count < pkt_cnt; count++) {
3160 skb2 = skb_dequeue(tqp);
3162 length += (skb2->len - TX_OVERHEAD);
3163 memcpy(skb->data + pos, skb2->data, skb2->len);
3164 pos += roundup(skb2->len, sizeof(u32));
3165 dev_kfree_skb(skb2);
3170 urb = usb_alloc_urb(0, GFP_ATOMIC);
3174 entry = (struct skb_data *)skb->cb;
3177 entry->length = length;
3178 entry->num_of_packet = count;
3180 spin_lock_irqsave(&dev->txq.lock, flags);
3181 ret = usb_autopm_get_interface_async(dev->intf);
3183 spin_unlock_irqrestore(&dev->txq.lock, flags);
3187 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3188 skb->data, skb->len, tx_complete, skb);
3190 if (length % dev->maxpacket == 0) {
3191 /* send USB_ZERO_PACKET */
3192 urb->transfer_flags |= URB_ZERO_PACKET;
3196 /* if this triggers the device is still a sleep */
3197 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3198 /* transmission will be done in resume */
3199 usb_anchor_urb(urb, &dev->deferred);
3200 /* no use to process more packets */
3201 netif_stop_queue(dev->net);
3203 spin_unlock_irqrestore(&dev->txq.lock, flags);
3204 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3209 ret = usb_submit_urb(urb, GFP_ATOMIC);
3212 netif_trans_update(dev->net);
3213 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3214 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3215 netif_stop_queue(dev->net);
3218 netif_stop_queue(dev->net);
3219 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3220 usb_autopm_put_interface_async(dev->intf);
3223 usb_autopm_put_interface_async(dev->intf);
3224 netif_dbg(dev, tx_err, dev->net,
3225 "tx: submit urb err %d\n", ret);
3229 spin_unlock_irqrestore(&dev->txq.lock, flags);
3232 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3234 dev->net->stats.tx_dropped++;
3236 dev_kfree_skb_any(skb);
3239 netif_dbg(dev, tx_queued, dev->net,
3240 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3243 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3248 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3249 for (i = 0; i < 10; i++) {
3250 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3252 urb = usb_alloc_urb(0, GFP_ATOMIC);
3254 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3258 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3259 tasklet_schedule(&dev->bh);
3261 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3262 netif_wake_queue(dev->net);
3265 static void lan78xx_bh(unsigned long param)
3267 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3268 struct sk_buff *skb;
3269 struct skb_data *entry;
3271 while ((skb = skb_dequeue(&dev->done))) {
3272 entry = (struct skb_data *)(skb->cb);
3273 switch (entry->state) {
3275 entry->state = rx_cleanup;
3276 rx_process(dev, skb);
3279 usb_free_urb(entry->urb);
3283 usb_free_urb(entry->urb);
3287 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3292 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3293 /* reset update timer delta */
3294 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3296 mod_timer(&dev->stat_monitor,
3297 jiffies + STAT_UPDATE_TIMER);
3300 if (!skb_queue_empty(&dev->txq_pend))
3303 if (!timer_pending(&dev->delay) &&
3304 !test_bit(EVENT_RX_HALT, &dev->flags))
3309 static void lan78xx_delayedwork(struct work_struct *work)
3312 struct lan78xx_net *dev;
3314 dev = container_of(work, struct lan78xx_net, wq.work);
3316 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3317 unlink_urbs(dev, &dev->txq);
3318 status = usb_autopm_get_interface(dev->intf);
3321 status = usb_clear_halt(dev->udev, dev->pipe_out);
3322 usb_autopm_put_interface(dev->intf);
3325 status != -ESHUTDOWN) {
3326 if (netif_msg_tx_err(dev))
3328 netdev_err(dev->net,
3329 "can't clear tx halt, status %d\n",
3332 clear_bit(EVENT_TX_HALT, &dev->flags);
3333 if (status != -ESHUTDOWN)
3334 netif_wake_queue(dev->net);
3337 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3338 unlink_urbs(dev, &dev->rxq);
3339 status = usb_autopm_get_interface(dev->intf);
3342 status = usb_clear_halt(dev->udev, dev->pipe_in);
3343 usb_autopm_put_interface(dev->intf);
3346 status != -ESHUTDOWN) {
3347 if (netif_msg_rx_err(dev))
3349 netdev_err(dev->net,
3350 "can't clear rx halt, status %d\n",
3353 clear_bit(EVENT_RX_HALT, &dev->flags);
3354 tasklet_schedule(&dev->bh);
3358 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3361 clear_bit(EVENT_LINK_RESET, &dev->flags);
3362 status = usb_autopm_get_interface(dev->intf);
3365 if (lan78xx_link_reset(dev) < 0) {
3366 usb_autopm_put_interface(dev->intf);
3368 netdev_info(dev->net, "link reset failed (%d)\n",
3371 usb_autopm_put_interface(dev->intf);
3375 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3376 lan78xx_update_stats(dev);
3378 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3380 mod_timer(&dev->stat_monitor,
3381 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3383 dev->delta = min((dev->delta * 2), 50);
3387 static void intr_complete(struct urb *urb)
3389 struct lan78xx_net *dev = urb->context;
3390 int status = urb->status;
3395 lan78xx_status(dev, urb);
3398 /* software-driven interface shutdown */
3399 case -ENOENT: /* urb killed */
3400 case -ESHUTDOWN: /* hardware gone */
3401 netif_dbg(dev, ifdown, dev->net,
3402 "intr shutdown, code %d\n", status);
3405 /* NOTE: not throttling like RX/TX, since this endpoint
3406 * already polls infrequently
3409 netdev_dbg(dev->net, "intr status %d\n", status);
3413 if (!netif_running(dev->net))
3416 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3417 status = usb_submit_urb(urb, GFP_ATOMIC);
3419 netif_err(dev, timer, dev->net,
3420 "intr resubmit --> %d\n", status);
3423 static void lan78xx_disconnect(struct usb_interface *intf)
3425 struct lan78xx_net *dev;
3426 struct usb_device *udev;
3427 struct net_device *net;
3429 dev = usb_get_intfdata(intf);
3430 usb_set_intfdata(intf, NULL);
3434 udev = interface_to_usbdev(intf);
3437 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3438 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3440 phy_disconnect(net->phydev);
3442 unregister_netdev(net);
3444 cancel_delayed_work_sync(&dev->wq);
3446 usb_scuttle_anchored_urbs(&dev->deferred);
3448 lan78xx_unbind(dev, intf);
3450 usb_kill_urb(dev->urb_intr);
3451 usb_free_urb(dev->urb_intr);
3457 static void lan78xx_tx_timeout(struct net_device *net)
3459 struct lan78xx_net *dev = netdev_priv(net);
3461 unlink_urbs(dev, &dev->txq);
3462 tasklet_schedule(&dev->bh);
3465 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3466 struct net_device *netdev,
3467 netdev_features_t features)
3469 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3470 features &= ~NETIF_F_GSO_MASK;
3472 features = vlan_features_check(skb, features);
3473 features = vxlan_features_check(skb, features);
3478 static const struct net_device_ops lan78xx_netdev_ops = {
3479 .ndo_open = lan78xx_open,
3480 .ndo_stop = lan78xx_stop,
3481 .ndo_start_xmit = lan78xx_start_xmit,
3482 .ndo_tx_timeout = lan78xx_tx_timeout,
3483 .ndo_change_mtu = lan78xx_change_mtu,
3484 .ndo_set_mac_address = lan78xx_set_mac_addr,
3485 .ndo_validate_addr = eth_validate_addr,
3486 .ndo_do_ioctl = lan78xx_ioctl,
3487 .ndo_set_rx_mode = lan78xx_set_multicast,
3488 .ndo_set_features = lan78xx_set_features,
3489 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3490 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3491 .ndo_features_check = lan78xx_features_check,
3494 static void lan78xx_stat_monitor(unsigned long param)
3496 struct lan78xx_net *dev;
3498 dev = (struct lan78xx_net *)param;
3500 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3503 static int lan78xx_probe(struct usb_interface *intf,
3504 const struct usb_device_id *id)
3506 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3507 struct lan78xx_net *dev;
3508 struct net_device *netdev;
3509 struct usb_device *udev;
3515 udev = interface_to_usbdev(intf);
3516 udev = usb_get_dev(udev);
3518 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3520 dev_err(&intf->dev, "Error: OOM\n");
3525 /* netdev_printk() needs this */
3526 SET_NETDEV_DEV(netdev, &intf->dev);
3528 dev = netdev_priv(netdev);
3532 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3533 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3535 skb_queue_head_init(&dev->rxq);
3536 skb_queue_head_init(&dev->txq);
3537 skb_queue_head_init(&dev->done);
3538 skb_queue_head_init(&dev->rxq_pause);
3539 skb_queue_head_init(&dev->txq_pend);
3540 mutex_init(&dev->phy_mutex);
3542 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3543 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3544 init_usb_anchor(&dev->deferred);
3546 netdev->netdev_ops = &lan78xx_netdev_ops;
3547 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3548 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3550 dev->stat_monitor.function = lan78xx_stat_monitor;
3551 dev->stat_monitor.data = (unsigned long)dev;
3553 init_timer(&dev->stat_monitor);
3555 mutex_init(&dev->stats.access_lock);
3557 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3562 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3563 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3564 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3569 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3570 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3571 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3576 ep_intr = &intf->cur_altsetting->endpoint[2];
3577 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3582 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3583 usb_endpoint_num(&ep_intr->desc));
3585 ret = lan78xx_bind(dev, intf);
3588 strcpy(netdev->name, "eth%d");
3590 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3591 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3593 /* MTU range: 68 - 9000 */
3594 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3595 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3597 period = ep_intr->desc.bInterval;
3598 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3599 buf = kmalloc(maxp, GFP_KERNEL);
3601 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3602 if (!dev->urb_intr) {
3607 usb_fill_int_urb(dev->urb_intr, dev->udev,
3608 dev->pipe_intr, buf, maxp,
3609 intr_complete, dev, period);
3610 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3614 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3616 /* Reject broken descriptors. */
3617 if (dev->maxpacket == 0) {
3622 /* driver requires remote-wakeup capability during autosuspend. */
3623 intf->needs_remote_wakeup = 1;
3625 ret = lan78xx_phy_init(dev);
3629 ret = register_netdev(netdev);
3631 netif_err(dev, probe, netdev, "couldn't register the device\n");
3635 usb_set_intfdata(intf, dev);
3637 ret = device_set_wakeup_enable(&udev->dev, true);
3639 /* Default delay of 2sec has more overhead than advantage.
3640 * Set to 10sec as default.
3642 pm_runtime_set_autosuspend_delay(&udev->dev,
3643 DEFAULT_AUTOSUSPEND_DELAY);
3648 phy_disconnect(netdev->phydev);
3650 usb_free_urb(dev->urb_intr);
3652 lan78xx_unbind(dev, intf);
3654 free_netdev(netdev);
3661 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3663 const u16 crc16poly = 0x8005;
3669 for (i = 0; i < len; i++) {
3671 for (bit = 0; bit < 8; bit++) {
3675 if (msb ^ (u16)(data & 1)) {
3677 crc |= (u16)0x0001U;
3686 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3694 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3695 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3696 const u8 arp_type[2] = { 0x08, 0x06 };
3698 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3699 buf &= ~MAC_TX_TXEN_;
3700 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3701 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3702 buf &= ~MAC_RX_RXEN_;
3703 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3705 ret = lan78xx_write_reg(dev, WUCSR, 0);
3706 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3707 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3712 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3713 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3714 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3716 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3717 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3720 if (wol & WAKE_PHY) {
3721 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3723 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3724 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3725 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3727 if (wol & WAKE_MAGIC) {
3728 temp_wucsr |= WUCSR_MPEN_;
3730 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3731 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3732 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3734 if (wol & WAKE_BCAST) {
3735 temp_wucsr |= WUCSR_BCST_EN_;
3737 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3738 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3739 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3741 if (wol & WAKE_MCAST) {
3742 temp_wucsr |= WUCSR_WAKE_EN_;
3744 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3745 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3746 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3748 WUF_CFGX_TYPE_MCAST_ |
3749 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3750 (crc & WUF_CFGX_CRC16_MASK_));
3752 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3753 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3754 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3755 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3758 /* for IPv6 Multicast */
3759 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3760 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3762 WUF_CFGX_TYPE_MCAST_ |
3763 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3764 (crc & WUF_CFGX_CRC16_MASK_));
3766 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3767 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3768 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3769 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3772 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3773 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3774 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3776 if (wol & WAKE_UCAST) {
3777 temp_wucsr |= WUCSR_PFDA_EN_;
3779 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3780 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3781 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3783 if (wol & WAKE_ARP) {
3784 temp_wucsr |= WUCSR_WAKE_EN_;
3786 /* set WUF_CFG & WUF_MASK
3787 * for packettype (offset 12,13) = ARP (0x0806)
3789 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3790 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3792 WUF_CFGX_TYPE_ALL_ |
3793 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3794 (crc & WUF_CFGX_CRC16_MASK_));
3796 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3797 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3798 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3799 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3802 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3803 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3804 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3807 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3809 /* when multiple WOL bits are set */
3810 if (hweight_long((unsigned long)wol) > 1) {
3811 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3812 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3813 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3815 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3818 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3819 buf |= PMT_CTL_WUPS_MASK_;
3820 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3822 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3823 buf |= MAC_RX_RXEN_;
3824 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3829 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3831 struct lan78xx_net *dev = usb_get_intfdata(intf);
3832 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3837 event = message.event;
3839 if (!dev->suspend_count++) {
3840 spin_lock_irq(&dev->txq.lock);
3841 /* don't autosuspend while transmitting */
3842 if ((skb_queue_len(&dev->txq) ||
3843 skb_queue_len(&dev->txq_pend)) &&
3844 PMSG_IS_AUTO(message)) {
3845 spin_unlock_irq(&dev->txq.lock);
3849 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3850 spin_unlock_irq(&dev->txq.lock);
3854 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3855 buf &= ~MAC_TX_TXEN_;
3856 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3857 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3858 buf &= ~MAC_RX_RXEN_;
3859 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3861 /* empty out the rx and queues */
3862 netif_device_detach(dev->net);
3863 lan78xx_terminate_urbs(dev);
3864 usb_kill_urb(dev->urb_intr);
3867 netif_device_attach(dev->net);
3870 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3871 del_timer(&dev->stat_monitor);
3873 if (PMSG_IS_AUTO(message)) {
3874 /* auto suspend (selective suspend) */
3875 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3876 buf &= ~MAC_TX_TXEN_;
3877 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3878 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3879 buf &= ~MAC_RX_RXEN_;
3880 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3882 ret = lan78xx_write_reg(dev, WUCSR, 0);
3883 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3884 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3886 /* set goodframe wakeup */
3887 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3889 buf |= WUCSR_RFE_WAKE_EN_;
3890 buf |= WUCSR_STORE_WAKE_;
3892 ret = lan78xx_write_reg(dev, WUCSR, buf);
3894 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3896 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3897 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3899 buf |= PMT_CTL_PHY_WAKE_EN_;
3900 buf |= PMT_CTL_WOL_EN_;
3901 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3902 buf |= PMT_CTL_SUS_MODE_3_;
3904 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3906 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3908 buf |= PMT_CTL_WUPS_MASK_;
3910 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3912 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3913 buf |= MAC_RX_RXEN_;
3914 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3916 lan78xx_set_suspend(dev, pdata->wol);
3925 static int lan78xx_resume(struct usb_interface *intf)
3927 struct lan78xx_net *dev = usb_get_intfdata(intf);
3928 struct sk_buff *skb;
3933 if (!timer_pending(&dev->stat_monitor)) {
3935 mod_timer(&dev->stat_monitor,
3936 jiffies + STAT_UPDATE_TIMER);
3939 if (!--dev->suspend_count) {
3940 /* resume interrupt URBs */
3941 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3942 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3944 spin_lock_irq(&dev->txq.lock);
3945 while ((res = usb_get_from_anchor(&dev->deferred))) {
3946 skb = (struct sk_buff *)res->context;
3947 ret = usb_submit_urb(res, GFP_ATOMIC);
3949 dev_kfree_skb_any(skb);
3951 usb_autopm_put_interface_async(dev->intf);
3953 netif_trans_update(dev->net);
3954 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3958 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3959 spin_unlock_irq(&dev->txq.lock);
3961 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3962 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3963 netif_start_queue(dev->net);
3964 tasklet_schedule(&dev->bh);
3968 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3969 ret = lan78xx_write_reg(dev, WUCSR, 0);
3970 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3972 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3974 WUCSR2_IPV6_TCPSYN_RCD_ |
3975 WUCSR2_IPV4_TCPSYN_RCD_);
3977 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3978 WUCSR_EEE_RX_WAKE_ |
3980 WUCSR_RFE_WAKE_FR_ |
3985 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3986 buf |= MAC_TX_TXEN_;
3987 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3992 static int lan78xx_reset_resume(struct usb_interface *intf)
3994 struct lan78xx_net *dev = usb_get_intfdata(intf);
3998 phy_start(dev->net->phydev);
4000 return lan78xx_resume(intf);
4003 static const struct usb_device_id products[] = {
4005 /* LAN7800 USB Gigabit Ethernet Device */
4006 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4009 /* LAN7850 USB Gigabit Ethernet Device */
4010 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4013 /* LAN7801 USB Gigabit Ethernet Device */
4014 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4018 MODULE_DEVICE_TABLE(usb, products);
4020 static struct usb_driver lan78xx_driver = {
4021 .name = DRIVER_NAME,
4022 .id_table = products,
4023 .probe = lan78xx_probe,
4024 .disconnect = lan78xx_disconnect,
4025 .suspend = lan78xx_suspend,
4026 .resume = lan78xx_resume,
4027 .reset_resume = lan78xx_reset_resume,
4028 .supports_autosuspend = 1,
4029 .disable_hub_initiated_lpm = 1,
4032 module_usb_driver(lan78xx_driver);
4034 MODULE_AUTHOR(DRIVER_AUTHOR);
4035 MODULE_DESCRIPTION(DRIVER_DESC);
4036 MODULE_LICENSE("GPL");