1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2006 Intel Corporation. */
5 * e100.c: Intel(R) PRO/100 ethernet driver
7 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
8 * original e100 driver, but better described as a munging of
9 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
12 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
13 * Open Source Software Developers Manual,
14 * http://sourceforge.net/projects/e1000
21 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
22 * controller family, which includes the 82557, 82558, 82559, 82550,
23 * 82551, and 82562 devices. 82558 and greater controllers
24 * integrate the Intel 82555 PHY. The controllers are used in
25 * server and client network interface cards, as well as in
26 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
27 * configurations. 8255x supports a 32-bit linear addressing
28 * mode and operates at 33Mhz PCI clock rate.
30 * II. Driver Operation
32 * Memory-mapped mode is used exclusively to access the device's
33 * shared-memory structure, the Control/Status Registers (CSR). All
34 * setup, configuration, and control of the device, including queuing
35 * of Tx, Rx, and configuration commands is through the CSR.
36 * cmd_lock serializes accesses to the CSR command register. cb_lock
37 * protects the shared Command Block List (CBL).
39 * 8255x is highly MII-compliant and all access to the PHY go
40 * through the Management Data Interface (MDI). Consequently, the
41 * driver leverages the mii.c library shared with other MII-compliant
44 * Big- and Little-Endian byte order as well as 32- and 64-bit
45 * archs are supported. Weak-ordered memory and non-cache-coherent
46 * archs are supported.
50 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
51 * together in a fixed-size ring (CBL) thus forming the flexible mode
52 * memory structure. A TCB marked with the suspend-bit indicates
53 * the end of the ring. The last TCB processed suspends the
54 * controller, and the controller can be restarted by issue a CU
55 * resume command to continue from the suspend point, or a CU start
56 * command to start at a given position in the ring.
58 * Non-Tx commands (config, multicast setup, etc) are linked
59 * into the CBL ring along with Tx commands. The common structure
60 * used for both Tx and non-Tx commands is the Command Block (CB).
62 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
63 * is the next CB to check for completion; cb_to_send is the first
64 * CB to start on in case of a previous failure to resume. CB clean
65 * up happens in interrupt context in response to a CU interrupt.
66 * cbs_avail keeps track of number of free CB resources available.
68 * Hardware padding of short packets to minimum packet size is
69 * enabled. 82557 pads with 7Eh, while the later controllers pad
74 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
75 * Descriptors (RFD) + data buffer, thus forming the simplified mode
76 * memory structure. Rx skbs are allocated to contain both the RFD
77 * and the data buffer, but the RFD is pulled off before the skb is
78 * indicated. The data buffer is aligned such that encapsulated
79 * protocol headers are u32-aligned. Since the RFD is part of the
80 * mapped shared memory, and completion status is contained within
81 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
82 * view from software and hardware.
84 * In order to keep updates to the RFD link field from colliding with
85 * hardware writes to mark packets complete, we use the feature that
86 * hardware will not write to a size 0 descriptor and mark the previous
87 * packet as end-of-list (EL). After updating the link, we remove EL
88 * and only then restore the size such that hardware may use the
89 * previous-to-end RFD.
91 * Under typical operation, the receive unit (RU) is start once,
92 * and the controller happily fills RFDs as frames arrive. If
93 * replacement RFDs cannot be allocated, or the RU goes non-active,
94 * the RU must be restarted. Frame arrival generates an interrupt,
95 * and Rx indication and re-allocation happen in the same context,
96 * therefore no locking is required. A software-generated interrupt
97 * is generated from the watchdog to recover from a failed allocation
98 * scenario where all Rx resources have been indicated and none re-
103 * VLAN offloading of tagging, stripping and filtering is not
104 * supported, but driver will accommodate the extra 4-byte VLAN tag
105 * for processing by upper layers. Tx/Rx Checksum offloading is not
106 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
107 * not supported (hardware limitation).
109 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
111 * Thanks to JC (jchapman@katalix.com) for helping with
112 * testing/troubleshooting the development driver.
115 * o several entry points race with dev->close
116 * o check for tx-no-resources/stop Q races with tx clean/wake Q
119 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
120 * - Stratus87247: protect MDI control register manipulations
121 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
122 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
125 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
127 #include <linux/hardirq.h>
128 #include <linux/interrupt.h>
129 #include <linux/module.h>
130 #include <linux/moduleparam.h>
131 #include <linux/kernel.h>
132 #include <linux/types.h>
133 #include <linux/sched.h>
134 #include <linux/slab.h>
135 #include <linux/delay.h>
136 #include <linux/init.h>
137 #include <linux/pci.h>
138 #include <linux/dma-mapping.h>
139 #include <linux/dmapool.h>
140 #include <linux/netdevice.h>
141 #include <linux/etherdevice.h>
142 #include <linux/mii.h>
143 #include <linux/if_vlan.h>
144 #include <linux/skbuff.h>
145 #include <linux/ethtool.h>
146 #include <linux/string.h>
147 #include <linux/firmware.h>
148 #include <linux/rtnetlink.h>
149 #include <asm/unaligned.h>
152 #define DRV_NAME "e100"
153 #define DRV_EXT "-NAPI"
154 #define DRV_VERSION "3.5.24-k2"DRV_EXT
155 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
156 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
158 #define E100_WATCHDOG_PERIOD (2 * HZ)
159 #define E100_NAPI_WEIGHT 16
161 #define FIRMWARE_D101M "/*(DEBLOBBED)*/"
162 #define FIRMWARE_D101S "/*(DEBLOBBED)*/"
163 #define FIRMWARE_D102E "/*(DEBLOBBED)*/"
165 MODULE_DESCRIPTION(DRV_DESCRIPTION);
166 MODULE_AUTHOR(DRV_COPYRIGHT);
167 MODULE_LICENSE("GPL");
168 MODULE_VERSION(DRV_VERSION);
171 static int debug = 3;
172 static int eeprom_bad_csum_allow = 0;
173 static int use_io = 0;
174 module_param(debug, int, 0);
175 module_param(eeprom_bad_csum_allow, int, 0);
176 module_param(use_io, int, 0);
177 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
178 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
179 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
181 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
182 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
183 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
184 static const struct pci_device_id e100_id_table[] = {
185 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
186 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
187 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
188 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
189 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
190 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
191 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
192 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
193 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
194 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
195 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
196 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
197 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
198 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
199 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
200 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
201 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
202 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
203 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
204 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
205 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
206 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
207 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
208 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
209 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
210 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
211 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
212 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
213 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
214 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
215 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
216 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
217 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
218 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
219 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
220 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
221 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
222 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
223 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
224 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
225 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
226 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
229 MODULE_DEVICE_TABLE(pci, e100_id_table);
232 mac_82557_D100_A = 0,
233 mac_82557_D100_B = 1,
234 mac_82557_D100_C = 2,
235 mac_82558_D101_A4 = 4,
236 mac_82558_D101_B0 = 5,
240 mac_82550_D102_C = 13,
248 phy_100a = 0x000003E0,
249 phy_100c = 0x035002A8,
250 phy_82555_tx = 0x015002A8,
251 phy_nsc_tx = 0x5C002000,
252 phy_82562_et = 0x033002A8,
253 phy_82562_em = 0x032002A8,
254 phy_82562_ek = 0x031002A8,
255 phy_82562_eh = 0x017002A8,
256 phy_82552_v = 0xd061004d,
257 phy_unknown = 0xFFFFFFFF,
260 /* CSR (Control/Status Registers) */
286 RU_UNINITIALIZED = -1,
290 stat_ack_not_ours = 0x00,
291 stat_ack_sw_gen = 0x04,
293 stat_ack_cu_idle = 0x20,
294 stat_ack_frame_rx = 0x40,
295 stat_ack_cu_cmd_done = 0x80,
296 stat_ack_not_present = 0xFF,
297 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
298 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
302 irq_mask_none = 0x00,
310 ruc_load_base = 0x06,
313 cuc_dump_addr = 0x40,
314 cuc_dump_stats = 0x50,
315 cuc_load_base = 0x60,
316 cuc_dump_reset = 0x70,
320 cuc_dump_complete = 0x0000A005,
321 cuc_dump_reset_complete = 0x0000A007,
325 software_reset = 0x0000,
327 selective_reset = 0x0002,
330 enum eeprom_ctrl_lo {
338 mdi_write = 0x04000000,
339 mdi_read = 0x08000000,
340 mdi_ready = 0x10000000,
350 enum eeprom_offsets {
351 eeprom_cnfg_mdix = 0x03,
352 eeprom_phy_iface = 0x06,
354 eeprom_config_asf = 0x0D,
355 eeprom_smbus_addr = 0x90,
358 enum eeprom_cnfg_mdix {
359 eeprom_mdix_enabled = 0x0080,
362 enum eeprom_phy_iface {
375 eeprom_id_wol = 0x0020,
378 enum eeprom_config_asf {
384 cb_complete = 0x8000,
389 * cb_command - Command Block flags
390 * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory
418 struct rx *next, *prev;
423 #if defined(__BIG_ENDIAN_BITFIELD)
429 /*0*/ u8 X(byte_count:6, pad0:2);
430 /*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
431 /*2*/ u8 adaptive_ifs;
432 /*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
433 term_write_cache_line:1), pad3:4);
434 /*4*/ u8 X(rx_dma_max_count:7, pad4:1);
435 /*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
436 /*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
437 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
438 rx_save_overruns : 1), rx_save_bad_frames : 1);
439 /*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
440 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
442 /*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
443 /*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
444 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
445 /*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
447 /*11*/ u8 X(linear_priority:3, pad11:5);
448 /*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
449 /*13*/ u8 ip_addr_lo;
450 /*14*/ u8 ip_addr_hi;
451 /*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
452 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
453 pad15_2:1), crs_or_cdt:1);
454 /*16*/ u8 fc_delay_lo;
455 /*17*/ u8 fc_delay_hi;
456 /*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
457 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
458 /*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
459 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
460 full_duplex_force:1), full_duplex_pin:1);
461 /*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
462 /*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
463 /*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
467 #define E100_MAX_MULTICAST_ADDRS 64
470 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
473 /* Important: keep total struct u32-aligned */
474 #define UCODE_SIZE 134
481 __le32 ucode[UCODE_SIZE];
482 struct config config;
495 __le32 dump_buffer_addr;
497 struct cb *next, *prev;
503 lb_none = 0, lb_mac = 1, lb_phy = 3,
507 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
508 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
509 tx_multiple_collisions, tx_total_collisions;
510 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
511 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
512 rx_short_frame_errors;
513 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
514 __le16 xmt_tco_frames, rcv_tco_frames;
534 struct param_range rfds;
535 struct param_range cbs;
539 /* Begin: frequently used values: keep adjacent for cache effect */
540 u32 msg_enable ____cacheline_aligned;
541 struct net_device *netdev;
542 struct pci_dev *pdev;
543 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
545 struct rx *rxs ____cacheline_aligned;
546 struct rx *rx_to_use;
547 struct rx *rx_to_clean;
548 struct rfd blank_rfd;
549 enum ru_state ru_running;
551 spinlock_t cb_lock ____cacheline_aligned;
553 struct csr __iomem *csr;
554 enum scb_cmd_lo cuc_cmd;
555 unsigned int cbs_avail;
556 struct napi_struct napi;
558 struct cb *cb_to_use;
559 struct cb *cb_to_send;
560 struct cb *cb_to_clean;
562 /* End: frequently used values: keep adjacent for cache effect */
566 promiscuous = (1 << 1),
567 multicast_all = (1 << 2),
568 wol_magic = (1 << 3),
569 ich_10h_workaround = (1 << 4),
570 } flags ____cacheline_aligned;
574 struct params params;
575 struct timer_list watchdog;
576 struct mii_if_info mii;
577 struct work_struct tx_timeout_task;
578 enum loopback loopback;
583 struct dma_pool *cbs_pool;
584 dma_addr_t cbs_dma_addr;
590 u32 tx_single_collisions;
591 u32 tx_multiple_collisions;
596 u32 rx_fc_unsupported;
598 u32 rx_short_frame_errors;
599 u32 rx_over_length_errors;
603 spinlock_t mdio_lock;
604 const struct firmware *fw;
607 static inline void e100_write_flush(struct nic *nic)
609 /* Flush previous PCI writes through intermediate bridges
610 * by doing a benign read */
611 (void)ioread8(&nic->csr->scb.status);
614 static void e100_enable_irq(struct nic *nic)
618 spin_lock_irqsave(&nic->cmd_lock, flags);
619 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
620 e100_write_flush(nic);
621 spin_unlock_irqrestore(&nic->cmd_lock, flags);
624 static void e100_disable_irq(struct nic *nic)
628 spin_lock_irqsave(&nic->cmd_lock, flags);
629 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
630 e100_write_flush(nic);
631 spin_unlock_irqrestore(&nic->cmd_lock, flags);
634 static void e100_hw_reset(struct nic *nic)
636 /* Put CU and RU into idle with a selective reset to get
637 * device off of PCI bus */
638 iowrite32(selective_reset, &nic->csr->port);
639 e100_write_flush(nic); udelay(20);
641 /* Now fully reset device */
642 iowrite32(software_reset, &nic->csr->port);
643 e100_write_flush(nic); udelay(20);
645 /* Mask off our interrupt line - it's unmasked after reset */
646 e100_disable_irq(nic);
649 static int e100_self_test(struct nic *nic)
651 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
653 /* Passing the self-test is a pretty good indication
654 * that the device can DMA to/from host memory */
656 nic->mem->selftest.signature = 0;
657 nic->mem->selftest.result = 0xFFFFFFFF;
659 iowrite32(selftest | dma_addr, &nic->csr->port);
660 e100_write_flush(nic);
661 /* Wait 10 msec for self-test to complete */
664 /* Interrupts are enabled after self-test */
665 e100_disable_irq(nic);
667 /* Check results of self-test */
668 if (nic->mem->selftest.result != 0) {
669 netif_err(nic, hw, nic->netdev,
670 "Self-test failed: result=0x%08X\n",
671 nic->mem->selftest.result);
674 if (nic->mem->selftest.signature == 0) {
675 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
682 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
684 u32 cmd_addr_data[3];
688 /* Three cmds: write/erase enable, write data, write/erase disable */
689 cmd_addr_data[0] = op_ewen << (addr_len - 2);
690 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
692 cmd_addr_data[2] = op_ewds << (addr_len - 2);
694 /* Bit-bang cmds to write word to eeprom */
695 for (j = 0; j < 3; j++) {
698 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
699 e100_write_flush(nic); udelay(4);
701 for (i = 31; i >= 0; i--) {
702 ctrl = (cmd_addr_data[j] & (1 << i)) ?
704 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
705 e100_write_flush(nic); udelay(4);
707 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
708 e100_write_flush(nic); udelay(4);
710 /* Wait 10 msec for cmd to complete */
714 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
715 e100_write_flush(nic); udelay(4);
719 /* General technique stolen from the eepro100 driver - very clever */
720 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
727 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
730 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
731 e100_write_flush(nic); udelay(4);
733 /* Bit-bang to read word from eeprom */
734 for (i = 31; i >= 0; i--) {
735 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
736 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
737 e100_write_flush(nic); udelay(4);
739 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
740 e100_write_flush(nic); udelay(4);
742 /* Eeprom drives a dummy zero to EEDO after receiving
743 * complete address. Use this to adjust addr_len. */
744 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
745 if (!(ctrl & eedo) && i > 16) {
746 *addr_len -= (i - 16);
750 data = (data << 1) | (ctrl & eedo ? 1 : 0);
754 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
755 e100_write_flush(nic); udelay(4);
757 return cpu_to_le16(data);
760 /* Load entire EEPROM image into driver cache and validate checksum */
761 static int e100_eeprom_load(struct nic *nic)
763 u16 addr, addr_len = 8, checksum = 0;
765 /* Try reading with an 8-bit addr len to discover actual addr len */
766 e100_eeprom_read(nic, &addr_len, 0);
767 nic->eeprom_wc = 1 << addr_len;
769 for (addr = 0; addr < nic->eeprom_wc; addr++) {
770 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
771 if (addr < nic->eeprom_wc - 1)
772 checksum += le16_to_cpu(nic->eeprom[addr]);
775 /* The checksum, stored in the last word, is calculated such that
776 * the sum of words should be 0xBABA */
777 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
778 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
779 if (!eeprom_bad_csum_allow)
786 /* Save (portion of) driver EEPROM cache to device and update checksum */
787 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
789 u16 addr, addr_len = 8, checksum = 0;
791 /* Try reading with an 8-bit addr len to discover actual addr len */
792 e100_eeprom_read(nic, &addr_len, 0);
793 nic->eeprom_wc = 1 << addr_len;
795 if (start + count >= nic->eeprom_wc)
798 for (addr = start; addr < start + count; addr++)
799 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
801 /* The checksum, stored in the last word, is calculated such that
802 * the sum of words should be 0xBABA */
803 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
804 checksum += le16_to_cpu(nic->eeprom[addr]);
805 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
806 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
807 nic->eeprom[nic->eeprom_wc - 1]);
812 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
813 #define E100_WAIT_SCB_FAST 20 /* delay like the old code */
814 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
820 spin_lock_irqsave(&nic->cmd_lock, flags);
822 /* Previous command is accepted when SCB clears */
823 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
824 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
827 if (unlikely(i > E100_WAIT_SCB_FAST))
830 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
835 if (unlikely(cmd != cuc_resume))
836 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
837 iowrite8(cmd, &nic->csr->scb.cmd_lo);
840 spin_unlock_irqrestore(&nic->cmd_lock, flags);
845 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
846 int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
852 spin_lock_irqsave(&nic->cb_lock, flags);
854 if (unlikely(!nic->cbs_avail)) {
860 nic->cb_to_use = cb->next;
864 err = cb_prepare(nic, cb, skb);
868 if (unlikely(!nic->cbs_avail))
872 /* Order is important otherwise we'll be in a race with h/w:
873 * set S-bit in current first, then clear S-bit in previous. */
874 cb->command |= cpu_to_le16(cb_s);
876 cb->prev->command &= cpu_to_le16(~cb_s);
878 while (nic->cb_to_send != nic->cb_to_use) {
879 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
880 nic->cb_to_send->dma_addr))) {
881 /* Ok, here's where things get sticky. It's
882 * possible that we can't schedule the command
883 * because the controller is too busy, so
884 * let's just queue the command and try again
885 * when another command is scheduled. */
886 if (err == -ENOSPC) {
888 schedule_work(&nic->tx_timeout_task);
892 nic->cuc_cmd = cuc_resume;
893 nic->cb_to_send = nic->cb_to_send->next;
898 spin_unlock_irqrestore(&nic->cb_lock, flags);
903 static int mdio_read(struct net_device *netdev, int addr, int reg)
905 struct nic *nic = netdev_priv(netdev);
906 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
909 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
911 struct nic *nic = netdev_priv(netdev);
913 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
916 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
917 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
925 * Stratus87247: we shouldn't be writing the MDI control
926 * register until the Ready bit shows True. Also, since
927 * manipulation of the MDI control registers is a multi-step
928 * procedure it should be done under lock.
930 spin_lock_irqsave(&nic->mdio_lock, flags);
931 for (i = 100; i; --i) {
932 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
937 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
938 spin_unlock_irqrestore(&nic->mdio_lock, flags);
939 return 0; /* No way to indicate timeout error */
941 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
943 for (i = 0; i < 100; i++) {
945 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
948 spin_unlock_irqrestore(&nic->mdio_lock, flags);
949 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
950 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
951 dir == mdi_read ? "READ" : "WRITE",
952 addr, reg, data, data_out);
953 return (u16)data_out;
956 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
957 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
963 if ((reg == MII_BMCR) && (dir == mdi_write)) {
964 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
965 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
969 * Workaround Si issue where sometimes the part will not
970 * autoneg to 100Mbps even when advertised.
972 if (advert & ADVERTISE_100FULL)
973 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
974 else if (advert & ADVERTISE_100HALF)
975 data |= BMCR_SPEED100;
978 return mdio_ctrl_hw(nic, addr, dir, reg, data);
981 /* Fully software-emulated mdio_ctrl() function for cards without
982 * MII-compliant PHYs.
983 * For now, this is mainly geared towards 80c24 support; in case of further
984 * requirements for other types (i82503, ...?) either extend this mechanism
985 * or split it, whichever is cleaner.
987 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
993 /* might need to allocate a netdev_priv'ed register array eventually
994 * to be able to record state changes, but for now
995 * some fully hardcoded register handling ought to be ok I guess. */
997 if (dir == mdi_read) {
1000 /* Auto-negotiation, right? */
1001 return BMCR_ANENABLE |
1004 return BMSR_LSTATUS /* for mii_link_ok() */ |
1008 /* 80c24 is a "combo card" PHY, right? */
1009 return ADVERTISE_10HALF |
1012 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1013 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1014 dir == mdi_read ? "READ" : "WRITE",
1021 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1022 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1023 dir == mdi_read ? "READ" : "WRITE",
1029 static inline int e100_phy_supports_mii(struct nic *nic)
1031 /* for now, just check it by comparing whether we
1032 are using MII software emulation.
1034 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1037 static void e100_get_defaults(struct nic *nic)
1039 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1040 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1042 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
1043 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1044 if (nic->mac == mac_unknown)
1045 nic->mac = mac_82557_D100_A;
1047 nic->params.rfds = rfds;
1048 nic->params.cbs = cbs;
1050 /* Quadwords to DMA into FIFO before starting frame transmit */
1051 nic->tx_threshold = 0xE0;
1053 /* no interrupt for every tx completion, delay = 256us if not 557 */
1054 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1055 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1057 /* Template for a freshly allocated RFD */
1058 nic->blank_rfd.command = 0;
1059 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1060 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1063 nic->mii.phy_id_mask = 0x1F;
1064 nic->mii.reg_num_mask = 0x1F;
1065 nic->mii.dev = nic->netdev;
1066 nic->mii.mdio_read = mdio_read;
1067 nic->mii.mdio_write = mdio_write;
1070 static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1072 struct config *config = &cb->u.config;
1073 u8 *c = (u8 *)config;
1074 struct net_device *netdev = nic->netdev;
1076 cb->command = cpu_to_le16(cb_config);
1078 memset(config, 0, sizeof(struct config));
1080 config->byte_count = 0x16; /* bytes in this struct */
1081 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1082 config->direct_rx_dma = 0x1; /* reserved */
1083 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1084 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1085 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1086 config->tx_underrun_retry = 0x3; /* # of underrun retries */
1087 if (e100_phy_supports_mii(nic))
1088 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1089 config->pad10 = 0x6;
1090 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1091 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1092 config->ifs = 0x6; /* x16 = inter frame spacing */
1093 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1094 config->pad15_1 = 0x1;
1095 config->pad15_2 = 0x1;
1096 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1097 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1098 config->tx_padding = 0x1; /* 1=pad short frames */
1099 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1100 config->pad18 = 0x1;
1101 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1102 config->pad20_1 = 0x1F;
1103 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1104 config->pad21_1 = 0x5;
1106 config->adaptive_ifs = nic->adaptive_ifs;
1107 config->loopback = nic->loopback;
1109 if (nic->mii.force_media && nic->mii.full_duplex)
1110 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1112 if (nic->flags & promiscuous || nic->loopback) {
1113 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1114 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1115 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1118 if (unlikely(netdev->features & NETIF_F_RXFCS))
1119 config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */
1121 if (nic->flags & multicast_all)
1122 config->multicast_all = 0x1; /* 1=accept, 0=no */
1124 /* disable WoL when up */
1125 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1126 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1128 if (nic->mac >= mac_82558_D101_A4) {
1129 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1130 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1131 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1132 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
1133 if (nic->mac >= mac_82559_D101M) {
1134 config->tno_intr = 0x1; /* TCO stats enable */
1135 /* Enable TCO in extended config */
1136 if (nic->mac >= mac_82551_10) {
1137 config->byte_count = 0x20; /* extended bytes */
1138 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1141 config->standard_stat_counter = 0x0;
1145 if (netdev->features & NETIF_F_RXALL) {
1146 config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
1147 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1148 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1151 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
1153 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
1155 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
1160 /*************************************************************************
1161 * CPUSaver parameters
1163 * All CPUSaver parameters are 16-bit literals that are part of a
1164 * "move immediate value" instruction. By changing the value of
1165 * the literal in the instruction before the code is loaded, the
1166 * driver can change the algorithm.
1168 * INTDELAY - This loads the dead-man timer with its initial value.
1169 * When this timer expires the interrupt is asserted, and the
1170 * timer is reset each time a new packet is received. (see
1171 * BUNDLEMAX below to set the limit on number of chained packets)
1172 * The current default is 0x600 or 1536. Experiments show that
1173 * the value should probably stay within the 0x200 - 0x1000.
1176 * This sets the maximum number of frames that will be bundled. In
1177 * some situations, such as the TCP windowing algorithm, it may be
1178 * better to limit the growth of the bundle size than let it go as
1179 * high as it can, because that could cause too much added latency.
1180 * The default is six, because this is the number of packets in the
1181 * default TCP window size. A value of 1 would make CPUSaver indicate
1182 * an interrupt for every frame received. If you do not want to put
1183 * a limit on the bundle size, set this value to xFFFF.
1186 * This contains a bit-mask describing the minimum size frame that
1187 * will be bundled. The default masks the lower 7 bits, which means
1188 * that any frame less than 128 bytes in length will not be bundled,
1189 * but will instead immediately generate an interrupt. This does
1190 * not affect the current bundle in any way. Any frame that is 128
1191 * bytes or large will be bundled normally. This feature is meant
1192 * to provide immediate indication of ACK frames in a TCP environment.
1193 * Customers were seeing poor performance when a machine with CPUSaver
1194 * enabled was sending but not receiving. The delay introduced when
1195 * the ACKs were received was enough to reduce total throughput, because
1196 * the sender would sit idle until the ACK was finally seen.
1198 * The current default is 0xFF80, which masks out the lower 7 bits.
1199 * This means that any frame which is x7F (127) bytes or smaller
1200 * will cause an immediate interrupt. Because this value must be a
1201 * bit mask, there are only a few valid values that can be used. To
1202 * turn this feature off, the driver can write the value xFFFF to the
1203 * lower word of this instruction (in the same way that the other
1204 * parameters are used). Likewise, a value of 0xF800 (2047) would
1205 * cause an interrupt to be generated for every frame, because all
1206 * standard Ethernet frames are <= 2047 bytes in length.
1207 *************************************************************************/
1209 /* if you wish to disable the ucode functionality, while maintaining the
1210 * workarounds it provides, set the following defines to:
1215 #define BUNDLESMALL 1
1216 #define BUNDLEMAX (u16)6
1217 #define INTDELAY (u16)1536 /* 0x600 */
1219 /* Initialize firmware */
1220 static const struct firmware *e100_request_firmware(struct nic *nic)
1222 const char *fw_name;
1223 const struct firmware *fw = nic->fw;
1224 u8 timer, bundle, min_size;
1226 bool required = false;
1228 /* do not load u-code for ICH devices */
1229 if (nic->flags & ich)
1232 /* Search for ucode match against h/w revision
1234 * Based on comments in the source code for the FreeBSD fxp
1235 * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
1237 * "fixes for bugs in the B-step hardware (specifically, bugs
1238 * with Inline Receive)."
1240 * So we must fail if it cannot be loaded.
1242 * The other microcode files are only required for the optional
1243 * CPUSaver feature. Nice to have, but no reason to fail.
1245 if (nic->mac == mac_82559_D101M) {
1246 fw_name = FIRMWARE_D101M;
1247 } else if (nic->mac == mac_82559_D101S) {
1248 fw_name = FIRMWARE_D101S;
1249 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
1250 fw_name = FIRMWARE_D102E;
1252 } else { /* No ucode on other devices */
1256 /* If the firmware has not previously been loaded, request a pointer
1257 * to it. If it was previously loaded, we are reinitializing the
1258 * adapter, possibly in a resume from hibernate, in which case
1259 * reject_firmware() cannot be used.
1262 err = reject_firmware(&fw, fw_name, &nic->pdev->dev);
1266 netif_err(nic, probe, nic->netdev,
1267 "Failed to load firmware \"%s\": %d\n",
1269 netif_err(nic, probe, nic->netdev, "Proceeding without firmware\n");
1272 netif_info(nic, probe, nic->netdev,
1273 "CPUSaver disabled. Needs \"%s\": %d\n",
1279 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1280 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1281 if (fw->size != UCODE_SIZE * 4 + 3) {
1282 netif_err(nic, probe, nic->netdev,
1283 "Firmware \"%s\" has wrong size %zu\n",
1285 release_firmware(fw);
1286 return ERR_PTR(-EINVAL);
1289 /* Read timer, bundle and min_size from end of firmware blob */
1290 timer = fw->data[UCODE_SIZE * 4];
1291 bundle = fw->data[UCODE_SIZE * 4 + 1];
1292 min_size = fw->data[UCODE_SIZE * 4 + 2];
1294 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1295 min_size >= UCODE_SIZE) {
1296 netif_err(nic, probe, nic->netdev,
1297 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1298 fw_name, timer, bundle, min_size);
1299 release_firmware(fw);
1300 return ERR_PTR(-EINVAL);
1303 /* OK, firmware is validated and ready to use. Save a pointer
1304 * to it in the nic */
1309 static int e100_setup_ucode(struct nic *nic, struct cb *cb,
1310 struct sk_buff *skb)
1312 const struct firmware *fw = (void *)skb;
1313 u8 timer, bundle, min_size;
1315 /* It's not a real skb; we just abused the fact that e100_exec_cb
1316 will pass it through to here... */
1319 /* firmware is stored as little endian already */
1320 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1322 /* Read timer, bundle and min_size from end of firmware blob */
1323 timer = fw->data[UCODE_SIZE * 4];
1324 bundle = fw->data[UCODE_SIZE * 4 + 1];
1325 min_size = fw->data[UCODE_SIZE * 4 + 2];
1327 /* Insert user-tunable settings in cb->u.ucode */
1328 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1329 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1330 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1331 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1332 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1333 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1335 cb->command = cpu_to_le16(cb_ucode | cb_el);
1339 static inline int e100_load_ucode_wait(struct nic *nic)
1341 const struct firmware *fw;
1342 int err = 0, counter = 50;
1343 struct cb *cb = nic->cb_to_clean;
1345 fw = e100_request_firmware(nic);
1346 /* If it's NULL, then no ucode is required */
1347 if (IS_ERR_OR_NULL(fw))
1348 return PTR_ERR_OR_ZERO(fw);
1350 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1351 netif_err(nic, probe, nic->netdev,
1352 "ucode cmd failed with error %d\n", err);
1354 /* must restart cuc */
1355 nic->cuc_cmd = cuc_start;
1357 /* wait for completion */
1358 e100_write_flush(nic);
1361 /* wait for possibly (ouch) 500ms */
1362 while (!(cb->status & cpu_to_le16(cb_complete))) {
1364 if (!--counter) break;
1367 /* ack any interrupts, something could have been set */
1368 iowrite8(~0, &nic->csr->scb.stat_ack);
1370 /* if the command failed, or is not OK, notify and return */
1371 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1372 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1379 static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1380 struct sk_buff *skb)
1382 cb->command = cpu_to_le16(cb_iaaddr);
1383 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1387 static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1389 cb->command = cpu_to_le16(cb_dump);
1390 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1391 offsetof(struct mem, dump_buf));
1395 static int e100_phy_check_without_mii(struct nic *nic)
1400 phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f;
1403 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1404 case I82503: /* Non-MII PHY; UNTESTED! */
1405 case S80C24: /* Non-MII PHY; tested and working */
1406 /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1407 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1408 * doesn't have a programming interface of any sort. The
1409 * media is sensed automatically based on how the link partner
1410 * is configured. This is, in essence, manual configuration.
1412 netif_info(nic, probe, nic->netdev,
1413 "found MII-less i82503 or 80c24 or other PHY\n");
1415 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1416 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1418 /* these might be needed for certain MII-less cards...
1419 * nic->flags |= ich;
1420 * nic->flags |= ich_10h_workaround; */
1431 #define NCONFIG_AUTO_SWITCH 0x0080
1432 #define MII_NSC_CONG MII_RESV1
1433 #define NSC_CONG_ENABLE 0x0100
1434 #define NSC_CONG_TXREADY 0x0400
1435 #define ADVERTISE_FC_SUPPORTED 0x0400
1436 static int e100_phy_init(struct nic *nic)
1438 struct net_device *netdev = nic->netdev;
1440 u16 bmcr, stat, id_lo, id_hi, cong;
1442 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1443 for (addr = 0; addr < 32; addr++) {
1444 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1445 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1446 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1447 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1448 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1452 /* uhoh, no PHY detected: check whether we seem to be some
1453 * weird, rare variant which is *known* to not have any MII.
1454 * But do this AFTER MII checking only, since this does
1455 * lookup of EEPROM values which may easily be unreliable. */
1456 if (e100_phy_check_without_mii(nic))
1457 return 0; /* simply return and hope for the best */
1459 /* for unknown cases log a fatal error */
1460 netif_err(nic, hw, nic->netdev,
1461 "Failed to locate any known PHY, aborting\n");
1465 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1466 "phy_addr = %d\n", nic->mii.phy_id);
1469 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1470 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1471 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1472 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1473 "phy ID = 0x%08X\n", nic->phy);
1475 /* Select the phy and isolate the rest */
1476 for (addr = 0; addr < 32; addr++) {
1477 if (addr != nic->mii.phy_id) {
1478 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1479 } else if (nic->phy != phy_82552_v) {
1480 bmcr = mdio_read(netdev, addr, MII_BMCR);
1481 mdio_write(netdev, addr, MII_BMCR,
1482 bmcr & ~BMCR_ISOLATE);
1486 * Workaround for 82552:
1487 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1488 * other phy_id's) using bmcr value from addr discovery loop above.
1490 if (nic->phy == phy_82552_v)
1491 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1492 bmcr & ~BMCR_ISOLATE);
1494 /* Handle National tx phys */
1495 #define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1496 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1497 /* Disable congestion control */
1498 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1499 cong |= NSC_CONG_TXREADY;
1500 cong &= ~NSC_CONG_ENABLE;
1501 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1504 if (nic->phy == phy_82552_v) {
1505 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1507 /* assign special tweaked mdio_ctrl() function */
1508 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1510 /* Workaround Si not advertising flow-control during autoneg */
1511 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1512 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1514 /* Reset for the above changes to take effect */
1515 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1517 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1518 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1519 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1520 (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) {
1521 /* enable/disable MDI/MDI-X auto-switching. */
1522 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1523 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1529 static int e100_hw_init(struct nic *nic)
1535 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1536 if (!in_interrupt() && (err = e100_self_test(nic)))
1539 if ((err = e100_phy_init(nic)))
1541 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1543 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1545 if ((err = e100_load_ucode_wait(nic)))
1547 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1549 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1551 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1552 nic->dma_addr + offsetof(struct mem, stats))))
1554 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1557 e100_disable_irq(nic);
1562 static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1564 struct net_device *netdev = nic->netdev;
1565 struct netdev_hw_addr *ha;
1566 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1568 cb->command = cpu_to_le16(cb_multi);
1569 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1571 netdev_for_each_mc_addr(ha, netdev) {
1574 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1580 static void e100_set_multicast_list(struct net_device *netdev)
1582 struct nic *nic = netdev_priv(netdev);
1584 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1585 "mc_count=%d, flags=0x%04X\n",
1586 netdev_mc_count(netdev), netdev->flags);
1588 if (netdev->flags & IFF_PROMISC)
1589 nic->flags |= promiscuous;
1591 nic->flags &= ~promiscuous;
1593 if (netdev->flags & IFF_ALLMULTI ||
1594 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1595 nic->flags |= multicast_all;
1597 nic->flags &= ~multicast_all;
1599 e100_exec_cb(nic, NULL, e100_configure);
1600 e100_exec_cb(nic, NULL, e100_multi);
1603 static void e100_update_stats(struct nic *nic)
1605 struct net_device *dev = nic->netdev;
1606 struct net_device_stats *ns = &dev->stats;
1607 struct stats *s = &nic->mem->stats;
1608 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1609 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1612 /* Device's stats reporting may take several microseconds to
1613 * complete, so we're always waiting for results of the
1614 * previous command. */
1616 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1618 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1619 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1620 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1621 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1622 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1623 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1624 ns->collisions += nic->tx_collisions;
1625 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1626 le32_to_cpu(s->tx_lost_crs);
1627 nic->rx_short_frame_errors +=
1628 le32_to_cpu(s->rx_short_frame_errors);
1629 ns->rx_length_errors = nic->rx_short_frame_errors +
1630 nic->rx_over_length_errors;
1631 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1632 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1633 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1634 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1635 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1636 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1637 le32_to_cpu(s->rx_alignment_errors) +
1638 le32_to_cpu(s->rx_short_frame_errors) +
1639 le32_to_cpu(s->rx_cdt_errors);
1640 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1641 nic->tx_single_collisions +=
1642 le32_to_cpu(s->tx_single_collisions);
1643 nic->tx_multiple_collisions +=
1644 le32_to_cpu(s->tx_multiple_collisions);
1645 if (nic->mac >= mac_82558_D101_A4) {
1646 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1647 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1648 nic->rx_fc_unsupported +=
1649 le32_to_cpu(s->fc_rcv_unsupported);
1650 if (nic->mac >= mac_82559_D101M) {
1651 nic->tx_tco_frames +=
1652 le16_to_cpu(s->xmt_tco_frames);
1653 nic->rx_tco_frames +=
1654 le16_to_cpu(s->rcv_tco_frames);
1660 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1661 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1662 "exec cuc_dump_reset failed\n");
1665 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1667 /* Adjust inter-frame-spacing (IFS) between two transmits if
1668 * we're getting collisions on a half-duplex connection. */
1670 if (duplex == DUPLEX_HALF) {
1671 u32 prev = nic->adaptive_ifs;
1672 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1674 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1675 (nic->tx_frames > min_frames)) {
1676 if (nic->adaptive_ifs < 60)
1677 nic->adaptive_ifs += 5;
1678 } else if (nic->tx_frames < min_frames) {
1679 if (nic->adaptive_ifs >= 5)
1680 nic->adaptive_ifs -= 5;
1682 if (nic->adaptive_ifs != prev)
1683 e100_exec_cb(nic, NULL, e100_configure);
1687 static void e100_watchdog(struct timer_list *t)
1689 struct nic *nic = from_timer(nic, t, watchdog);
1690 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1693 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1694 "right now = %ld\n", jiffies);
1696 /* mii library handles link maintenance tasks */
1698 mii_ethtool_gset(&nic->mii, &cmd);
1699 speed = ethtool_cmd_speed(&cmd);
1701 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1702 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1703 speed == SPEED_100 ? 100 : 10,
1704 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1705 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1706 netdev_info(nic->netdev, "NIC Link is Down\n");
1709 mii_check_link(&nic->mii);
1711 /* Software generated interrupt to recover from (rare) Rx
1712 * allocation failure.
1713 * Unfortunately have to use a spinlock to not re-enable interrupts
1714 * accidentally, due to hardware that shares a register between the
1715 * interrupt mask bit and the SW Interrupt generation bit */
1716 spin_lock_irq(&nic->cmd_lock);
1717 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1718 e100_write_flush(nic);
1719 spin_unlock_irq(&nic->cmd_lock);
1721 e100_update_stats(nic);
1722 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1724 if (nic->mac <= mac_82557_D100_C)
1725 /* Issue a multicast command to workaround a 557 lock up */
1726 e100_set_multicast_list(nic->netdev);
1728 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1729 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1730 nic->flags |= ich_10h_workaround;
1732 nic->flags &= ~ich_10h_workaround;
1734 mod_timer(&nic->watchdog,
1735 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1738 static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
1739 struct sk_buff *skb)
1741 dma_addr_t dma_addr;
1742 cb->command = nic->tx_command;
1744 dma_addr = pci_map_single(nic->pdev,
1745 skb->data, skb->len, PCI_DMA_TODEVICE);
1746 /* If we can't map the skb, have the upper layer try later */
1747 if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
1748 dev_kfree_skb_any(skb);
1754 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
1755 * testing, ie sending frames with bad CRC.
1757 if (unlikely(skb->no_fcs))
1758 cb->command |= cpu_to_le16(cb_tx_nc);
1760 cb->command &= ~cpu_to_le16(cb_tx_nc);
1762 /* interrupt every 16 packets regardless of delay */
1763 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
1764 cb->command |= cpu_to_le16(cb_i);
1765 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1766 cb->u.tcb.tcb_byte_count = 0;
1767 cb->u.tcb.threshold = nic->tx_threshold;
1768 cb->u.tcb.tbd_count = 1;
1769 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
1770 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1771 skb_tx_timestamp(skb);
1775 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1776 struct net_device *netdev)
1778 struct nic *nic = netdev_priv(netdev);
1781 if (nic->flags & ich_10h_workaround) {
1782 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1783 Issue a NOP command followed by a 1us delay before
1784 issuing the Tx command. */
1785 if (e100_exec_cmd(nic, cuc_nop, 0))
1786 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1787 "exec cuc_nop failed\n");
1791 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1795 /* We queued the skb, but now we're out of space. */
1796 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1797 "No space for CB\n");
1798 netif_stop_queue(netdev);
1801 /* This is a hard error - log it. */
1802 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1803 "Out of Tx resources, returning skb\n");
1804 netif_stop_queue(netdev);
1805 return NETDEV_TX_BUSY;
1808 return NETDEV_TX_OK;
1811 static int e100_tx_clean(struct nic *nic)
1813 struct net_device *dev = nic->netdev;
1817 spin_lock(&nic->cb_lock);
1819 /* Clean CBs marked complete */
1820 for (cb = nic->cb_to_clean;
1821 cb->status & cpu_to_le16(cb_complete);
1822 cb = nic->cb_to_clean = cb->next) {
1823 dma_rmb(); /* read skb after status */
1824 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1825 "cb[%d]->status = 0x%04X\n",
1826 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1829 if (likely(cb->skb != NULL)) {
1830 dev->stats.tx_packets++;
1831 dev->stats.tx_bytes += cb->skb->len;
1833 pci_unmap_single(nic->pdev,
1834 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1835 le16_to_cpu(cb->u.tcb.tbd.size),
1837 dev_kfree_skb_any(cb->skb);
1845 spin_unlock(&nic->cb_lock);
1847 /* Recover from running out of Tx resources in xmit_frame */
1848 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1849 netif_wake_queue(nic->netdev);
1854 static void e100_clean_cbs(struct nic *nic)
1857 while (nic->cbs_avail != nic->params.cbs.count) {
1858 struct cb *cb = nic->cb_to_clean;
1860 pci_unmap_single(nic->pdev,
1861 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1862 le16_to_cpu(cb->u.tcb.tbd.size),
1864 dev_kfree_skb(cb->skb);
1866 nic->cb_to_clean = nic->cb_to_clean->next;
1869 dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1873 nic->cuc_cmd = cuc_start;
1874 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1878 static int e100_alloc_cbs(struct nic *nic)
1881 unsigned int i, count = nic->params.cbs.count;
1883 nic->cuc_cmd = cuc_start;
1884 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1887 nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL,
1888 &nic->cbs_dma_addr);
1892 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1893 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1894 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1896 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1897 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1898 ((i+1) % count) * sizeof(struct cb));
1901 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1902 nic->cbs_avail = count;
1907 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1909 if (!nic->rxs) return;
1910 if (RU_SUSPENDED != nic->ru_running) return;
1912 /* handle init time starts */
1913 if (!rx) rx = nic->rxs;
1915 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1917 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1918 nic->ru_running = RU_RUNNING;
1922 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
1923 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1925 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1928 /* Init, and map the RFD. */
1929 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1930 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1931 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1933 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1934 dev_kfree_skb_any(rx->skb);
1940 /* Link the RFD to end of RFA by linking previous RFD to
1941 * this one. We are safe to touch the previous RFD because
1942 * it is protected by the before last buffer's el bit being set */
1943 if (rx->prev->skb) {
1944 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1945 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1946 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1947 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1953 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1954 unsigned int *work_done, unsigned int work_to_do)
1956 struct net_device *dev = nic->netdev;
1957 struct sk_buff *skb = rx->skb;
1958 struct rfd *rfd = (struct rfd *)skb->data;
1959 u16 rfd_status, actual_size;
1962 if (unlikely(work_done && *work_done >= work_to_do))
1965 /* Need to sync before taking a peek at cb_complete bit */
1966 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1967 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1968 rfd_status = le16_to_cpu(rfd->status);
1970 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1971 "status=0x%04X\n", rfd_status);
1972 dma_rmb(); /* read size after status bit */
1974 /* If data isn't ready, nothing to indicate */
1975 if (unlikely(!(rfd_status & cb_complete))) {
1976 /* If the next buffer has the el bit, but we think the receiver
1977 * is still running, check to see if it really stopped while
1978 * we had interrupts off.
1979 * This allows for a fast restart without re-enabling
1981 if ((le16_to_cpu(rfd->command) & cb_el) &&
1982 (RU_RUNNING == nic->ru_running))
1984 if (ioread8(&nic->csr->scb.status) & rus_no_res)
1985 nic->ru_running = RU_SUSPENDED;
1986 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
1988 PCI_DMA_FROMDEVICE);
1992 /* Get actual data size */
1993 if (unlikely(dev->features & NETIF_F_RXFCS))
1995 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1996 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1997 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
2000 pci_unmap_single(nic->pdev, rx->dma_addr,
2001 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2003 /* If this buffer has the el bit, but we think the receiver
2004 * is still running, check to see if it really stopped while
2005 * we had interrupts off.
2006 * This allows for a fast restart without re-enabling interrupts.
2007 * This can happen when the RU sees the size change but also sees
2008 * the el bit set. */
2009 if ((le16_to_cpu(rfd->command) & cb_el) &&
2010 (RU_RUNNING == nic->ru_running)) {
2012 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2013 nic->ru_running = RU_SUSPENDED;
2016 /* Pull off the RFD and put the actual data (minus eth hdr) */
2017 skb_reserve(skb, sizeof(struct rfd));
2018 skb_put(skb, actual_size);
2019 skb->protocol = eth_type_trans(skb, nic->netdev);
2021 /* If we are receiving all frames, then don't bother
2022 * checking for errors.
2024 if (unlikely(dev->features & NETIF_F_RXALL)) {
2025 if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2026 /* Received oversized frame, but keep it. */
2027 nic->rx_over_length_errors++;
2031 if (unlikely(!(rfd_status & cb_ok))) {
2032 /* Don't indicate if hardware indicates errors */
2033 dev_kfree_skb_any(skb);
2034 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
2035 /* Don't indicate oversized frames */
2036 nic->rx_over_length_errors++;
2037 dev_kfree_skb_any(skb);
2040 dev->stats.rx_packets++;
2041 dev->stats.rx_bytes += (actual_size - fcs_pad);
2042 netif_receive_skb(skb);
2052 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
2053 unsigned int work_to_do)
2056 int restart_required = 0, err = 0;
2057 struct rx *old_before_last_rx, *new_before_last_rx;
2058 struct rfd *old_before_last_rfd, *new_before_last_rfd;
2060 /* Indicate newly arrived packets */
2061 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
2062 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2063 /* Hit quota or no more to clean */
2064 if (-EAGAIN == err || -ENODATA == err)
2069 /* On EAGAIN, hit quota so have more work to do, restart once
2070 * cleanup is complete.
2071 * Else, are we already rnr? then pay attention!!! this ensures that
2072 * the state machine progression never allows a start with a
2073 * partially cleaned list, avoiding a race between hardware
2074 * and rx_to_clean when in NAPI mode */
2075 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2076 restart_required = 1;
2078 old_before_last_rx = nic->rx_to_use->prev->prev;
2079 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
2081 /* Alloc new skbs to refill list */
2082 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2083 if (unlikely(e100_rx_alloc_skb(nic, rx)))
2084 break; /* Better luck next time (see watchdog) */
2087 new_before_last_rx = nic->rx_to_use->prev->prev;
2088 if (new_before_last_rx != old_before_last_rx) {
2089 /* Set the el-bit on the buffer that is before the last buffer.
2090 * This lets us update the next pointer on the last buffer
2091 * without worrying about hardware touching it.
2092 * We set the size to 0 to prevent hardware from touching this
2094 * When the hardware hits the before last buffer with el-bit
2095 * and size of 0, it will RNR interrupt, the RUS will go into
2096 * the No Resources state. It will not complete nor write to
2098 new_before_last_rfd =
2099 (struct rfd *)new_before_last_rx->skb->data;
2100 new_before_last_rfd->size = 0;
2101 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2102 pci_dma_sync_single_for_device(nic->pdev,
2103 new_before_last_rx->dma_addr, sizeof(struct rfd),
2104 PCI_DMA_BIDIRECTIONAL);
2106 /* Now that we have a new stopping point, we can clear the old
2107 * stopping point. We must sync twice to get the proper
2108 * ordering on the hardware side of things. */
2109 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2110 pci_dma_sync_single_for_device(nic->pdev,
2111 old_before_last_rx->dma_addr, sizeof(struct rfd),
2112 PCI_DMA_BIDIRECTIONAL);
2113 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2115 pci_dma_sync_single_for_device(nic->pdev,
2116 old_before_last_rx->dma_addr, sizeof(struct rfd),
2117 PCI_DMA_BIDIRECTIONAL);
2120 if (restart_required) {
2122 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
2123 e100_start_receiver(nic, nic->rx_to_clean);
2129 static void e100_rx_clean_list(struct nic *nic)
2132 unsigned int i, count = nic->params.rfds.count;
2134 nic->ru_running = RU_UNINITIALIZED;
2137 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2139 pci_unmap_single(nic->pdev, rx->dma_addr,
2140 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2141 dev_kfree_skb(rx->skb);
2148 nic->rx_to_use = nic->rx_to_clean = NULL;
2151 static int e100_rx_alloc_list(struct nic *nic)
2154 unsigned int i, count = nic->params.rfds.count;
2155 struct rfd *before_last;
2157 nic->rx_to_use = nic->rx_to_clean = NULL;
2158 nic->ru_running = RU_UNINITIALIZED;
2160 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
2163 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2164 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2165 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2166 if (e100_rx_alloc_skb(nic, rx)) {
2167 e100_rx_clean_list(nic);
2171 /* Set the el-bit on the buffer that is before the last buffer.
2172 * This lets us update the next pointer on the last buffer without
2173 * worrying about hardware touching it.
2174 * We set the size to 0 to prevent hardware from touching this buffer.
2175 * When the hardware hits the before last buffer with el-bit and size
2176 * of 0, it will RNR interrupt, the RU will go into the No Resources
2177 * state. It will not complete nor write to this buffer. */
2178 rx = nic->rxs->prev->prev;
2179 before_last = (struct rfd *)rx->skb->data;
2180 before_last->command |= cpu_to_le16(cb_el);
2181 before_last->size = 0;
2182 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2183 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2185 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2186 nic->ru_running = RU_SUSPENDED;
2191 static irqreturn_t e100_intr(int irq, void *dev_id)
2193 struct net_device *netdev = dev_id;
2194 struct nic *nic = netdev_priv(netdev);
2195 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2197 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2198 "stat_ack = 0x%02X\n", stat_ack);
2200 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
2201 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2204 /* Ack interrupt(s) */
2205 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
2207 /* We hit Receive No Resource (RNR); restart RU after cleaning */
2208 if (stat_ack & stat_ack_rnr)
2209 nic->ru_running = RU_SUSPENDED;
2211 if (likely(napi_schedule_prep(&nic->napi))) {
2212 e100_disable_irq(nic);
2213 __napi_schedule(&nic->napi);
2219 static int e100_poll(struct napi_struct *napi, int budget)
2221 struct nic *nic = container_of(napi, struct nic, napi);
2222 unsigned int work_done = 0;
2224 e100_rx_clean(nic, &work_done, budget);
2227 /* If budget not fully consumed, exit the polling mode */
2228 if (work_done < budget) {
2229 napi_complete_done(napi, work_done);
2230 e100_enable_irq(nic);
2236 #ifdef CONFIG_NET_POLL_CONTROLLER
2237 static void e100_netpoll(struct net_device *netdev)
2239 struct nic *nic = netdev_priv(netdev);
2241 e100_disable_irq(nic);
2242 e100_intr(nic->pdev->irq, netdev);
2244 e100_enable_irq(nic);
2248 static int e100_set_mac_address(struct net_device *netdev, void *p)
2250 struct nic *nic = netdev_priv(netdev);
2251 struct sockaddr *addr = p;
2253 if (!is_valid_ether_addr(addr->sa_data))
2254 return -EADDRNOTAVAIL;
2256 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2257 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2262 static int e100_asf(struct nic *nic)
2264 /* ASF can be enabled from eeprom */
2265 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2266 (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) &&
2267 !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) &&
2268 ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE);
2271 static int e100_up(struct nic *nic)
2275 if ((err = e100_rx_alloc_list(nic)))
2277 if ((err = e100_alloc_cbs(nic)))
2278 goto err_rx_clean_list;
2279 if ((err = e100_hw_init(nic)))
2281 e100_set_multicast_list(nic->netdev);
2282 e100_start_receiver(nic, NULL);
2283 mod_timer(&nic->watchdog, jiffies);
2284 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2285 nic->netdev->name, nic->netdev)))
2287 netif_wake_queue(nic->netdev);
2288 napi_enable(&nic->napi);
2289 /* enable ints _after_ enabling poll, preventing a race between
2290 * disable ints+schedule */
2291 e100_enable_irq(nic);
2295 del_timer_sync(&nic->watchdog);
2297 e100_clean_cbs(nic);
2299 e100_rx_clean_list(nic);
2303 static void e100_down(struct nic *nic)
2305 /* wait here for poll to complete */
2306 napi_disable(&nic->napi);
2307 netif_stop_queue(nic->netdev);
2309 free_irq(nic->pdev->irq, nic->netdev);
2310 del_timer_sync(&nic->watchdog);
2311 netif_carrier_off(nic->netdev);
2312 e100_clean_cbs(nic);
2313 e100_rx_clean_list(nic);
2316 static void e100_tx_timeout(struct net_device *netdev)
2318 struct nic *nic = netdev_priv(netdev);
2320 /* Reset outside of interrupt context, to avoid request_irq
2321 * in interrupt context */
2322 schedule_work(&nic->tx_timeout_task);
2325 static void e100_tx_timeout_task(struct work_struct *work)
2327 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2328 struct net_device *netdev = nic->netdev;
2330 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2331 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2334 if (netif_running(netdev)) {
2335 e100_down(netdev_priv(netdev));
2336 e100_up(netdev_priv(netdev));
2341 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2344 struct sk_buff *skb;
2346 /* Use driver resources to perform internal MAC or PHY
2347 * loopback test. A single packet is prepared and transmitted
2348 * in loopback mode, and the test passes if the received
2349 * packet compares byte-for-byte to the transmitted packet. */
2351 if ((err = e100_rx_alloc_list(nic)))
2353 if ((err = e100_alloc_cbs(nic)))
2356 /* ICH PHY loopback is broken so do MAC loopback instead */
2357 if (nic->flags & ich && loopback_mode == lb_phy)
2358 loopback_mode = lb_mac;
2360 nic->loopback = loopback_mode;
2361 if ((err = e100_hw_init(nic)))
2362 goto err_loopback_none;
2364 if (loopback_mode == lb_phy)
2365 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2368 e100_start_receiver(nic, NULL);
2370 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2372 goto err_loopback_none;
2374 skb_put(skb, ETH_DATA_LEN);
2375 memset(skb->data, 0xFF, ETH_DATA_LEN);
2376 e100_xmit_frame(skb, nic->netdev);
2380 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2381 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2383 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2384 skb->data, ETH_DATA_LEN))
2388 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2389 nic->loopback = lb_none;
2390 e100_clean_cbs(nic);
2393 e100_rx_clean_list(nic);
2397 #define MII_LED_CONTROL 0x1B
2398 #define E100_82552_LED_OVERRIDE 0x19
2399 #define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2400 #define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
2402 static int e100_get_link_ksettings(struct net_device *netdev,
2403 struct ethtool_link_ksettings *cmd)
2405 struct nic *nic = netdev_priv(netdev);
2407 mii_ethtool_get_link_ksettings(&nic->mii, cmd);
2412 static int e100_set_link_ksettings(struct net_device *netdev,
2413 const struct ethtool_link_ksettings *cmd)
2415 struct nic *nic = netdev_priv(netdev);
2418 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2419 err = mii_ethtool_set_link_ksettings(&nic->mii, cmd);
2420 e100_exec_cb(nic, NULL, e100_configure);
2425 static void e100_get_drvinfo(struct net_device *netdev,
2426 struct ethtool_drvinfo *info)
2428 struct nic *nic = netdev_priv(netdev);
2429 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2430 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2431 strlcpy(info->bus_info, pci_name(nic->pdev),
2432 sizeof(info->bus_info));
2435 #define E100_PHY_REGS 0x1D
2436 static int e100_get_regs_len(struct net_device *netdev)
2438 struct nic *nic = netdev_priv(netdev);
2440 /* We know the number of registers, and the size of the dump buffer.
2441 * Calculate the total size in bytes.
2443 return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);
2446 static void e100_get_regs(struct net_device *netdev,
2447 struct ethtool_regs *regs, void *p)
2449 struct nic *nic = netdev_priv(netdev);
2453 regs->version = (1 << 24) | nic->pdev->revision;
2454 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2455 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2456 ioread16(&nic->csr->scb.status);
2457 for (i = 0; i < E100_PHY_REGS; i++)
2458 /* Note that we read the registers in reverse order. This
2459 * ordering is the ABI apparently used by ethtool and other
2462 buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,
2463 E100_PHY_REGS - 1 - i);
2464 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2465 e100_exec_cb(nic, NULL, e100_dump);
2467 memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,
2468 sizeof(nic->mem->dump_buf));
2471 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2473 struct nic *nic = netdev_priv(netdev);
2474 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2475 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2478 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2480 struct nic *nic = netdev_priv(netdev);
2482 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2483 !device_can_wakeup(&nic->pdev->dev))
2487 nic->flags |= wol_magic;
2489 nic->flags &= ~wol_magic;
2491 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2493 e100_exec_cb(nic, NULL, e100_configure);
2498 static u32 e100_get_msglevel(struct net_device *netdev)
2500 struct nic *nic = netdev_priv(netdev);
2501 return nic->msg_enable;
2504 static void e100_set_msglevel(struct net_device *netdev, u32 value)
2506 struct nic *nic = netdev_priv(netdev);
2507 nic->msg_enable = value;
2510 static int e100_nway_reset(struct net_device *netdev)
2512 struct nic *nic = netdev_priv(netdev);
2513 return mii_nway_restart(&nic->mii);
2516 static u32 e100_get_link(struct net_device *netdev)
2518 struct nic *nic = netdev_priv(netdev);
2519 return mii_link_ok(&nic->mii);
2522 static int e100_get_eeprom_len(struct net_device *netdev)
2524 struct nic *nic = netdev_priv(netdev);
2525 return nic->eeprom_wc << 1;
2528 #define E100_EEPROM_MAGIC 0x1234
2529 static int e100_get_eeprom(struct net_device *netdev,
2530 struct ethtool_eeprom *eeprom, u8 *bytes)
2532 struct nic *nic = netdev_priv(netdev);
2534 eeprom->magic = E100_EEPROM_MAGIC;
2535 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2540 static int e100_set_eeprom(struct net_device *netdev,
2541 struct ethtool_eeprom *eeprom, u8 *bytes)
2543 struct nic *nic = netdev_priv(netdev);
2545 if (eeprom->magic != E100_EEPROM_MAGIC)
2548 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2550 return e100_eeprom_save(nic, eeprom->offset >> 1,
2551 (eeprom->len >> 1) + 1);
2554 static void e100_get_ringparam(struct net_device *netdev,
2555 struct ethtool_ringparam *ring)
2557 struct nic *nic = netdev_priv(netdev);
2558 struct param_range *rfds = &nic->params.rfds;
2559 struct param_range *cbs = &nic->params.cbs;
2561 ring->rx_max_pending = rfds->max;
2562 ring->tx_max_pending = cbs->max;
2563 ring->rx_pending = rfds->count;
2564 ring->tx_pending = cbs->count;
2567 static int e100_set_ringparam(struct net_device *netdev,
2568 struct ethtool_ringparam *ring)
2570 struct nic *nic = netdev_priv(netdev);
2571 struct param_range *rfds = &nic->params.rfds;
2572 struct param_range *cbs = &nic->params.cbs;
2574 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2577 if (netif_running(netdev))
2579 rfds->count = max(ring->rx_pending, rfds->min);
2580 rfds->count = min(rfds->count, rfds->max);
2581 cbs->count = max(ring->tx_pending, cbs->min);
2582 cbs->count = min(cbs->count, cbs->max);
2583 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2584 rfds->count, cbs->count);
2585 if (netif_running(netdev))
2591 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2592 "Link test (on/offline)",
2593 "Eeprom test (on/offline)",
2594 "Self test (offline)",
2595 "Mac loopback (offline)",
2596 "Phy loopback (offline)",
2598 #define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
2600 static void e100_diag_test(struct net_device *netdev,
2601 struct ethtool_test *test, u64 *data)
2603 struct ethtool_cmd cmd;
2604 struct nic *nic = netdev_priv(netdev);
2607 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2608 data[0] = !mii_link_ok(&nic->mii);
2609 data[1] = e100_eeprom_load(nic);
2610 if (test->flags & ETH_TEST_FL_OFFLINE) {
2612 /* save speed, duplex & autoneg settings */
2613 err = mii_ethtool_gset(&nic->mii, &cmd);
2615 if (netif_running(netdev))
2617 data[2] = e100_self_test(nic);
2618 data[3] = e100_loopback_test(nic, lb_mac);
2619 data[4] = e100_loopback_test(nic, lb_phy);
2621 /* restore speed, duplex & autoneg settings */
2622 err = mii_ethtool_sset(&nic->mii, &cmd);
2624 if (netif_running(netdev))
2627 for (i = 0; i < E100_TEST_LEN; i++)
2628 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2630 msleep_interruptible(4 * 1000);
2633 static int e100_set_phys_id(struct net_device *netdev,
2634 enum ethtool_phys_id_state state)
2636 struct nic *nic = netdev_priv(netdev);
2643 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2648 case ETHTOOL_ID_ACTIVE:
2652 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2653 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2656 case ETHTOOL_ID_OFF:
2657 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2660 case ETHTOOL_ID_INACTIVE:
2664 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
2668 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2669 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2670 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2671 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2672 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2673 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2674 "tx_heartbeat_errors", "tx_window_errors",
2675 /* device-specific stats */
2676 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2677 "tx_flow_control_pause", "rx_flow_control_pause",
2678 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2679 "rx_short_frame_errors", "rx_over_length_errors",
2681 #define E100_NET_STATS_LEN 21
2682 #define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
2684 static int e100_get_sset_count(struct net_device *netdev, int sset)
2688 return E100_TEST_LEN;
2690 return E100_STATS_LEN;
2696 static void e100_get_ethtool_stats(struct net_device *netdev,
2697 struct ethtool_stats *stats, u64 *data)
2699 struct nic *nic = netdev_priv(netdev);
2702 for (i = 0; i < E100_NET_STATS_LEN; i++)
2703 data[i] = ((unsigned long *)&netdev->stats)[i];
2705 data[i++] = nic->tx_deferred;
2706 data[i++] = nic->tx_single_collisions;
2707 data[i++] = nic->tx_multiple_collisions;
2708 data[i++] = nic->tx_fc_pause;
2709 data[i++] = nic->rx_fc_pause;
2710 data[i++] = nic->rx_fc_unsupported;
2711 data[i++] = nic->tx_tco_frames;
2712 data[i++] = nic->rx_tco_frames;
2713 data[i++] = nic->rx_short_frame_errors;
2714 data[i++] = nic->rx_over_length_errors;
2717 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2719 switch (stringset) {
2721 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2724 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2729 static const struct ethtool_ops e100_ethtool_ops = {
2730 .get_drvinfo = e100_get_drvinfo,
2731 .get_regs_len = e100_get_regs_len,
2732 .get_regs = e100_get_regs,
2733 .get_wol = e100_get_wol,
2734 .set_wol = e100_set_wol,
2735 .get_msglevel = e100_get_msglevel,
2736 .set_msglevel = e100_set_msglevel,
2737 .nway_reset = e100_nway_reset,
2738 .get_link = e100_get_link,
2739 .get_eeprom_len = e100_get_eeprom_len,
2740 .get_eeprom = e100_get_eeprom,
2741 .set_eeprom = e100_set_eeprom,
2742 .get_ringparam = e100_get_ringparam,
2743 .set_ringparam = e100_set_ringparam,
2744 .self_test = e100_diag_test,
2745 .get_strings = e100_get_strings,
2746 .set_phys_id = e100_set_phys_id,
2747 .get_ethtool_stats = e100_get_ethtool_stats,
2748 .get_sset_count = e100_get_sset_count,
2749 .get_ts_info = ethtool_op_get_ts_info,
2750 .get_link_ksettings = e100_get_link_ksettings,
2751 .set_link_ksettings = e100_set_link_ksettings,
2754 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2756 struct nic *nic = netdev_priv(netdev);
2758 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2761 static int e100_alloc(struct nic *nic)
2763 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2765 return nic->mem ? 0 : -ENOMEM;
2768 static void e100_free(struct nic *nic)
2771 pci_free_consistent(nic->pdev, sizeof(struct mem),
2772 nic->mem, nic->dma_addr);
2777 static int e100_open(struct net_device *netdev)
2779 struct nic *nic = netdev_priv(netdev);
2782 netif_carrier_off(netdev);
2783 if ((err = e100_up(nic)))
2784 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2788 static int e100_close(struct net_device *netdev)
2790 e100_down(netdev_priv(netdev));
2794 static int e100_set_features(struct net_device *netdev,
2795 netdev_features_t features)
2797 struct nic *nic = netdev_priv(netdev);
2798 netdev_features_t changed = features ^ netdev->features;
2800 if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
2803 netdev->features = features;
2804 e100_exec_cb(nic, NULL, e100_configure);
2808 static const struct net_device_ops e100_netdev_ops = {
2809 .ndo_open = e100_open,
2810 .ndo_stop = e100_close,
2811 .ndo_start_xmit = e100_xmit_frame,
2812 .ndo_validate_addr = eth_validate_addr,
2813 .ndo_set_rx_mode = e100_set_multicast_list,
2814 .ndo_set_mac_address = e100_set_mac_address,
2815 .ndo_do_ioctl = e100_do_ioctl,
2816 .ndo_tx_timeout = e100_tx_timeout,
2817 #ifdef CONFIG_NET_POLL_CONTROLLER
2818 .ndo_poll_controller = e100_netpoll,
2820 .ndo_set_features = e100_set_features,
2823 static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2825 struct net_device *netdev;
2829 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
2832 netdev->hw_features |= NETIF_F_RXFCS;
2833 netdev->priv_flags |= IFF_SUPP_NOFCS;
2834 netdev->hw_features |= NETIF_F_RXALL;
2836 netdev->netdev_ops = &e100_netdev_ops;
2837 netdev->ethtool_ops = &e100_ethtool_ops;
2838 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2839 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2841 nic = netdev_priv(netdev);
2842 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2843 nic->netdev = netdev;
2845 nic->msg_enable = (1 << debug) - 1;
2846 nic->mdio_ctrl = mdio_ctrl_hw;
2847 pci_set_drvdata(pdev, netdev);
2849 if ((err = pci_enable_device(pdev))) {
2850 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2851 goto err_out_free_dev;
2854 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2855 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2857 goto err_out_disable_pdev;
2860 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2861 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2862 goto err_out_disable_pdev;
2865 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2866 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2867 goto err_out_free_res;
2870 SET_NETDEV_DEV(netdev, &pdev->dev);
2873 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2875 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2877 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2879 goto err_out_free_res;
2882 if (ent->driver_data)
2887 e100_get_defaults(nic);
2889 /* D100 MAC doesn't allow rx of vlan packets with normal MTU */
2890 if (nic->mac < mac_82558_D101_A4)
2891 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2893 /* locks must be initialized before calling hw_reset */
2894 spin_lock_init(&nic->cb_lock);
2895 spin_lock_init(&nic->cmd_lock);
2896 spin_lock_init(&nic->mdio_lock);
2898 /* Reset the device before pci_set_master() in case device is in some
2899 * funky state and has an interrupt pending - hint: we don't have the
2900 * interrupt handler registered yet. */
2903 pci_set_master(pdev);
2905 timer_setup(&nic->watchdog, e100_watchdog, 0);
2907 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2909 if ((err = e100_alloc(nic))) {
2910 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2911 goto err_out_iounmap;
2914 if ((err = e100_eeprom_load(nic)))
2919 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2920 if (!is_valid_ether_addr(netdev->dev_addr)) {
2921 if (!eeprom_bad_csum_allow) {
2922 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2926 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2930 /* Wol magic packet can be enabled from eeprom */
2931 if ((nic->mac >= mac_82558_D101_A4) &&
2932 (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) {
2933 nic->flags |= wol_magic;
2934 device_set_wakeup_enable(&pdev->dev, true);
2937 /* ack any pending wake events, disable PME */
2938 pci_pme_active(pdev, false);
2940 strcpy(netdev->name, "eth%d");
2941 if ((err = register_netdev(netdev))) {
2942 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2945 nic->cbs_pool = dma_pool_create(netdev->name,
2947 nic->params.cbs.max * sizeof(struct cb),
2950 if (!nic->cbs_pool) {
2951 netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
2955 netif_info(nic, probe, nic->netdev,
2956 "addr 0x%llx, irq %d, MAC addr %pM\n",
2957 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2958 pdev->irq, netdev->dev_addr);
2963 unregister_netdev(netdev);
2967 pci_iounmap(pdev, nic->csr);
2969 pci_release_regions(pdev);
2970 err_out_disable_pdev:
2971 pci_disable_device(pdev);
2973 free_netdev(netdev);
2977 static void e100_remove(struct pci_dev *pdev)
2979 struct net_device *netdev = pci_get_drvdata(pdev);
2982 struct nic *nic = netdev_priv(netdev);
2983 unregister_netdev(netdev);
2985 pci_iounmap(pdev, nic->csr);
2986 dma_pool_destroy(nic->cbs_pool);
2987 free_netdev(netdev);
2988 pci_release_regions(pdev);
2989 pci_disable_device(pdev);
2993 #define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
2994 #define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
2995 #define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
2996 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
2998 struct net_device *netdev = pci_get_drvdata(pdev);
2999 struct nic *nic = netdev_priv(netdev);
3001 if (netif_running(netdev))
3003 netif_device_detach(netdev);
3005 pci_save_state(pdev);
3007 if ((nic->flags & wol_magic) | e100_asf(nic)) {
3008 /* enable reverse auto-negotiation */
3009 if (nic->phy == phy_82552_v) {
3010 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3011 E100_82552_SMARTSPEED);
3013 mdio_write(netdev, nic->mii.phy_id,
3014 E100_82552_SMARTSPEED, smartspeed |
3015 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
3017 *enable_wake = true;
3019 *enable_wake = false;
3022 pci_clear_master(pdev);
3025 static int __e100_power_off(struct pci_dev *pdev, bool wake)
3028 return pci_prepare_to_sleep(pdev);
3030 pci_wake_from_d3(pdev, false);
3031 pci_set_power_state(pdev, PCI_D3hot);
3037 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
3040 __e100_shutdown(pdev, &wake);
3041 return __e100_power_off(pdev, wake);
3044 static int e100_resume(struct pci_dev *pdev)
3046 struct net_device *netdev = pci_get_drvdata(pdev);
3047 struct nic *nic = netdev_priv(netdev);
3049 pci_set_power_state(pdev, PCI_D0);
3050 pci_restore_state(pdev);
3051 /* ack any pending wake events, disable PME */
3052 pci_enable_wake(pdev, PCI_D0, 0);
3054 /* disable reverse auto-negotiation */
3055 if (nic->phy == phy_82552_v) {
3056 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3057 E100_82552_SMARTSPEED);
3059 mdio_write(netdev, nic->mii.phy_id,
3060 E100_82552_SMARTSPEED,
3061 smartspeed & ~(E100_82552_REV_ANEG));
3064 netif_device_attach(netdev);
3065 if (netif_running(netdev))
3070 #endif /* CONFIG_PM */
3072 static void e100_shutdown(struct pci_dev *pdev)
3075 __e100_shutdown(pdev, &wake);
3076 if (system_state == SYSTEM_POWER_OFF)
3077 __e100_power_off(pdev, wake);
3080 /* ------------------ PCI Error Recovery infrastructure -------------- */
3082 * e100_io_error_detected - called when PCI error is detected.
3083 * @pdev: Pointer to PCI device
3084 * @state: The current pci connection state
3086 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3088 struct net_device *netdev = pci_get_drvdata(pdev);
3089 struct nic *nic = netdev_priv(netdev);
3091 netif_device_detach(netdev);
3093 if (state == pci_channel_io_perm_failure)
3094 return PCI_ERS_RESULT_DISCONNECT;
3096 if (netif_running(netdev))
3098 pci_disable_device(pdev);
3100 /* Request a slot reset. */
3101 return PCI_ERS_RESULT_NEED_RESET;
3105 * e100_io_slot_reset - called after the pci bus has been reset.
3106 * @pdev: Pointer to PCI device
3108 * Restart the card from scratch.
3110 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3112 struct net_device *netdev = pci_get_drvdata(pdev);
3113 struct nic *nic = netdev_priv(netdev);
3115 if (pci_enable_device(pdev)) {
3116 pr_err("Cannot re-enable PCI device after reset\n");
3117 return PCI_ERS_RESULT_DISCONNECT;
3119 pci_set_master(pdev);
3121 /* Only one device per card can do a reset */
3122 if (0 != PCI_FUNC(pdev->devfn))
3123 return PCI_ERS_RESULT_RECOVERED;
3127 return PCI_ERS_RESULT_RECOVERED;
3131 * e100_io_resume - resume normal operations
3132 * @pdev: Pointer to PCI device
3134 * Resume normal operations after an error recovery
3135 * sequence has been completed.
3137 static void e100_io_resume(struct pci_dev *pdev)
3139 struct net_device *netdev = pci_get_drvdata(pdev);
3140 struct nic *nic = netdev_priv(netdev);
3142 /* ack any pending wake events, disable PME */
3143 pci_enable_wake(pdev, PCI_D0, 0);
3145 netif_device_attach(netdev);
3146 if (netif_running(netdev)) {
3148 mod_timer(&nic->watchdog, jiffies);
3152 static const struct pci_error_handlers e100_err_handler = {
3153 .error_detected = e100_io_error_detected,
3154 .slot_reset = e100_io_slot_reset,
3155 .resume = e100_io_resume,
3158 static struct pci_driver e100_driver = {
3160 .id_table = e100_id_table,
3161 .probe = e100_probe,
3162 .remove = e100_remove,
3164 /* Power Management hooks */
3165 .suspend = e100_suspend,
3166 .resume = e100_resume,
3168 .shutdown = e100_shutdown,
3169 .err_handler = &e100_err_handler,
3172 static int __init e100_init_module(void)
3174 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3175 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3176 pr_info("%s\n", DRV_COPYRIGHT);
3178 return pci_register_driver(&e100_driver);
3181 static void __exit e100_cleanup_module(void)
3183 pci_unregister_driver(&e100_driver);
3186 module_init(e100_init_module);
3187 module_exit(e100_cleanup_module);