1 /*******************************************************************************
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 * e100.c: Intel(R) PRO/100 ethernet driver
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
55 * II. Driver Operation
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
123 * scenario where all Rx resources have been indicated and none re-
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
136 * Thanks to JC (jchapman@katalix.com) for helping with
137 * testing/troubleshooting the development driver.
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
146 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
150 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
152 #include <linux/hardirq.h>
153 #include <linux/interrupt.h>
154 #include <linux/module.h>
155 #include <linux/moduleparam.h>
156 #include <linux/kernel.h>
157 #include <linux/types.h>
158 #include <linux/sched.h>
159 #include <linux/slab.h>
160 #include <linux/delay.h>
161 #include <linux/init.h>
162 #include <linux/pci.h>
163 #include <linux/dma-mapping.h>
164 #include <linux/dmapool.h>
165 #include <linux/netdevice.h>
166 #include <linux/etherdevice.h>
167 #include <linux/mii.h>
168 #include <linux/if_vlan.h>
169 #include <linux/skbuff.h>
170 #include <linux/ethtool.h>
171 #include <linux/string.h>
172 #include <linux/firmware.h>
173 #include <linux/rtnetlink.h>
174 #include <asm/unaligned.h>
177 #define DRV_NAME "e100"
178 #define DRV_EXT "-NAPI"
179 #define DRV_VERSION "3.5.24-k2"DRV_EXT
180 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
181 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
183 #define E100_WATCHDOG_PERIOD (2 * HZ)
184 #define E100_NAPI_WEIGHT 16
186 #define FIRMWARE_D101M "/*(DEBLOBBED)*/"
187 #define FIRMWARE_D101S "/*(DEBLOBBED)*/"
188 #define FIRMWARE_D102E "/*(DEBLOBBED)*/"
190 MODULE_DESCRIPTION(DRV_DESCRIPTION);
191 MODULE_AUTHOR(DRV_COPYRIGHT);
192 MODULE_LICENSE("GPL");
193 MODULE_VERSION(DRV_VERSION);
196 static int debug = 3;
197 static int eeprom_bad_csum_allow = 0;
198 static int use_io = 0;
199 module_param(debug, int, 0);
200 module_param(eeprom_bad_csum_allow, int, 0);
201 module_param(use_io, int, 0);
202 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
203 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
204 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
206 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
207 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
208 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
209 static const struct pci_device_id e100_id_table[] = {
210 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
211 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
212 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
213 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
214 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
218 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
219 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
224 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
225 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
232 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
233 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
234 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
241 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
242 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
243 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
246 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
247 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
248 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
249 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
250 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
251 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
254 MODULE_DEVICE_TABLE(pci, e100_id_table);
257 mac_82557_D100_A = 0,
258 mac_82557_D100_B = 1,
259 mac_82557_D100_C = 2,
260 mac_82558_D101_A4 = 4,
261 mac_82558_D101_B0 = 5,
265 mac_82550_D102_C = 13,
273 phy_100a = 0x000003E0,
274 phy_100c = 0x035002A8,
275 phy_82555_tx = 0x015002A8,
276 phy_nsc_tx = 0x5C002000,
277 phy_82562_et = 0x033002A8,
278 phy_82562_em = 0x032002A8,
279 phy_82562_ek = 0x031002A8,
280 phy_82562_eh = 0x017002A8,
281 phy_82552_v = 0xd061004d,
282 phy_unknown = 0xFFFFFFFF,
285 /* CSR (Control/Status Registers) */
311 RU_UNINITIALIZED = -1,
315 stat_ack_not_ours = 0x00,
316 stat_ack_sw_gen = 0x04,
318 stat_ack_cu_idle = 0x20,
319 stat_ack_frame_rx = 0x40,
320 stat_ack_cu_cmd_done = 0x80,
321 stat_ack_not_present = 0xFF,
322 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
323 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
327 irq_mask_none = 0x00,
335 ruc_load_base = 0x06,
338 cuc_dump_addr = 0x40,
339 cuc_dump_stats = 0x50,
340 cuc_load_base = 0x60,
341 cuc_dump_reset = 0x70,
345 cuc_dump_complete = 0x0000A005,
346 cuc_dump_reset_complete = 0x0000A007,
350 software_reset = 0x0000,
352 selective_reset = 0x0002,
355 enum eeprom_ctrl_lo {
363 mdi_write = 0x04000000,
364 mdi_read = 0x08000000,
365 mdi_ready = 0x10000000,
375 enum eeprom_offsets {
376 eeprom_cnfg_mdix = 0x03,
377 eeprom_phy_iface = 0x06,
379 eeprom_config_asf = 0x0D,
380 eeprom_smbus_addr = 0x90,
383 enum eeprom_cnfg_mdix {
384 eeprom_mdix_enabled = 0x0080,
387 enum eeprom_phy_iface {
400 eeprom_id_wol = 0x0020,
403 enum eeprom_config_asf {
409 cb_complete = 0x8000,
414 * cb_command - Command Block flags
415 * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory
443 struct rx *next, *prev;
448 #if defined(__BIG_ENDIAN_BITFIELD)
454 /*0*/ u8 X(byte_count:6, pad0:2);
455 /*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
456 /*2*/ u8 adaptive_ifs;
457 /*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
458 term_write_cache_line:1), pad3:4);
459 /*4*/ u8 X(rx_dma_max_count:7, pad4:1);
460 /*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
461 /*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
462 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
463 rx_save_overruns : 1), rx_save_bad_frames : 1);
464 /*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
465 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
467 /*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
468 /*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
469 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
470 /*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
472 /*11*/ u8 X(linear_priority:3, pad11:5);
473 /*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
474 /*13*/ u8 ip_addr_lo;
475 /*14*/ u8 ip_addr_hi;
476 /*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
477 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
478 pad15_2:1), crs_or_cdt:1);
479 /*16*/ u8 fc_delay_lo;
480 /*17*/ u8 fc_delay_hi;
481 /*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
482 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
483 /*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
484 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
485 full_duplex_force:1), full_duplex_pin:1);
486 /*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
487 /*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
488 /*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
492 #define E100_MAX_MULTICAST_ADDRS 64
495 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
498 /* Important: keep total struct u32-aligned */
499 #define UCODE_SIZE 134
506 __le32 ucode[UCODE_SIZE];
507 struct config config;
520 __le32 dump_buffer_addr;
522 struct cb *next, *prev;
528 lb_none = 0, lb_mac = 1, lb_phy = 3,
532 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
533 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
534 tx_multiple_collisions, tx_total_collisions;
535 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
536 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
537 rx_short_frame_errors;
538 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
539 __le16 xmt_tco_frames, rcv_tco_frames;
559 struct param_range rfds;
560 struct param_range cbs;
564 /* Begin: frequently used values: keep adjacent for cache effect */
565 u32 msg_enable ____cacheline_aligned;
566 struct net_device *netdev;
567 struct pci_dev *pdev;
568 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
570 struct rx *rxs ____cacheline_aligned;
571 struct rx *rx_to_use;
572 struct rx *rx_to_clean;
573 struct rfd blank_rfd;
574 enum ru_state ru_running;
576 spinlock_t cb_lock ____cacheline_aligned;
578 struct csr __iomem *csr;
579 enum scb_cmd_lo cuc_cmd;
580 unsigned int cbs_avail;
581 struct napi_struct napi;
583 struct cb *cb_to_use;
584 struct cb *cb_to_send;
585 struct cb *cb_to_clean;
587 /* End: frequently used values: keep adjacent for cache effect */
591 promiscuous = (1 << 1),
592 multicast_all = (1 << 2),
593 wol_magic = (1 << 3),
594 ich_10h_workaround = (1 << 4),
595 } flags ____cacheline_aligned;
599 struct params params;
600 struct timer_list watchdog;
601 struct mii_if_info mii;
602 struct work_struct tx_timeout_task;
603 enum loopback loopback;
608 struct pci_pool *cbs_pool;
609 dma_addr_t cbs_dma_addr;
615 u32 tx_single_collisions;
616 u32 tx_multiple_collisions;
621 u32 rx_fc_unsupported;
623 u32 rx_short_frame_errors;
624 u32 rx_over_length_errors;
628 spinlock_t mdio_lock;
629 const struct firmware *fw;
632 static inline void e100_write_flush(struct nic *nic)
634 /* Flush previous PCI writes through intermediate bridges
635 * by doing a benign read */
636 (void)ioread8(&nic->csr->scb.status);
639 static void e100_enable_irq(struct nic *nic)
643 spin_lock_irqsave(&nic->cmd_lock, flags);
644 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
645 e100_write_flush(nic);
646 spin_unlock_irqrestore(&nic->cmd_lock, flags);
649 static void e100_disable_irq(struct nic *nic)
653 spin_lock_irqsave(&nic->cmd_lock, flags);
654 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
655 e100_write_flush(nic);
656 spin_unlock_irqrestore(&nic->cmd_lock, flags);
659 static void e100_hw_reset(struct nic *nic)
661 /* Put CU and RU into idle with a selective reset to get
662 * device off of PCI bus */
663 iowrite32(selective_reset, &nic->csr->port);
664 e100_write_flush(nic); udelay(20);
666 /* Now fully reset device */
667 iowrite32(software_reset, &nic->csr->port);
668 e100_write_flush(nic); udelay(20);
670 /* Mask off our interrupt line - it's unmasked after reset */
671 e100_disable_irq(nic);
674 static int e100_self_test(struct nic *nic)
676 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
678 /* Passing the self-test is a pretty good indication
679 * that the device can DMA to/from host memory */
681 nic->mem->selftest.signature = 0;
682 nic->mem->selftest.result = 0xFFFFFFFF;
684 iowrite32(selftest | dma_addr, &nic->csr->port);
685 e100_write_flush(nic);
686 /* Wait 10 msec for self-test to complete */
689 /* Interrupts are enabled after self-test */
690 e100_disable_irq(nic);
692 /* Check results of self-test */
693 if (nic->mem->selftest.result != 0) {
694 netif_err(nic, hw, nic->netdev,
695 "Self-test failed: result=0x%08X\n",
696 nic->mem->selftest.result);
699 if (nic->mem->selftest.signature == 0) {
700 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
707 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
709 u32 cmd_addr_data[3];
713 /* Three cmds: write/erase enable, write data, write/erase disable */
714 cmd_addr_data[0] = op_ewen << (addr_len - 2);
715 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
717 cmd_addr_data[2] = op_ewds << (addr_len - 2);
719 /* Bit-bang cmds to write word to eeprom */
720 for (j = 0; j < 3; j++) {
723 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
724 e100_write_flush(nic); udelay(4);
726 for (i = 31; i >= 0; i--) {
727 ctrl = (cmd_addr_data[j] & (1 << i)) ?
729 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
730 e100_write_flush(nic); udelay(4);
732 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
733 e100_write_flush(nic); udelay(4);
735 /* Wait 10 msec for cmd to complete */
739 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
740 e100_write_flush(nic); udelay(4);
744 /* General technique stolen from the eepro100 driver - very clever */
745 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
752 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
755 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
756 e100_write_flush(nic); udelay(4);
758 /* Bit-bang to read word from eeprom */
759 for (i = 31; i >= 0; i--) {
760 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
761 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
762 e100_write_flush(nic); udelay(4);
764 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
765 e100_write_flush(nic); udelay(4);
767 /* Eeprom drives a dummy zero to EEDO after receiving
768 * complete address. Use this to adjust addr_len. */
769 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
770 if (!(ctrl & eedo) && i > 16) {
771 *addr_len -= (i - 16);
775 data = (data << 1) | (ctrl & eedo ? 1 : 0);
779 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
780 e100_write_flush(nic); udelay(4);
782 return cpu_to_le16(data);
785 /* Load entire EEPROM image into driver cache and validate checksum */
786 static int e100_eeprom_load(struct nic *nic)
788 u16 addr, addr_len = 8, checksum = 0;
790 /* Try reading with an 8-bit addr len to discover actual addr len */
791 e100_eeprom_read(nic, &addr_len, 0);
792 nic->eeprom_wc = 1 << addr_len;
794 for (addr = 0; addr < nic->eeprom_wc; addr++) {
795 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
796 if (addr < nic->eeprom_wc - 1)
797 checksum += le16_to_cpu(nic->eeprom[addr]);
800 /* The checksum, stored in the last word, is calculated such that
801 * the sum of words should be 0xBABA */
802 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
803 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
804 if (!eeprom_bad_csum_allow)
811 /* Save (portion of) driver EEPROM cache to device and update checksum */
812 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
814 u16 addr, addr_len = 8, checksum = 0;
816 /* Try reading with an 8-bit addr len to discover actual addr len */
817 e100_eeprom_read(nic, &addr_len, 0);
818 nic->eeprom_wc = 1 << addr_len;
820 if (start + count >= nic->eeprom_wc)
823 for (addr = start; addr < start + count; addr++)
824 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
826 /* The checksum, stored in the last word, is calculated such that
827 * the sum of words should be 0xBABA */
828 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
829 checksum += le16_to_cpu(nic->eeprom[addr]);
830 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
831 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
832 nic->eeprom[nic->eeprom_wc - 1]);
837 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
838 #define E100_WAIT_SCB_FAST 20 /* delay like the old code */
839 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
845 spin_lock_irqsave(&nic->cmd_lock, flags);
847 /* Previous command is accepted when SCB clears */
848 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
849 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
852 if (unlikely(i > E100_WAIT_SCB_FAST))
855 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
860 if (unlikely(cmd != cuc_resume))
861 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
862 iowrite8(cmd, &nic->csr->scb.cmd_lo);
865 spin_unlock_irqrestore(&nic->cmd_lock, flags);
870 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
871 int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
877 spin_lock_irqsave(&nic->cb_lock, flags);
879 if (unlikely(!nic->cbs_avail)) {
885 nic->cb_to_use = cb->next;
889 err = cb_prepare(nic, cb, skb);
893 if (unlikely(!nic->cbs_avail))
897 /* Order is important otherwise we'll be in a race with h/w:
898 * set S-bit in current first, then clear S-bit in previous. */
899 cb->command |= cpu_to_le16(cb_s);
901 cb->prev->command &= cpu_to_le16(~cb_s);
903 while (nic->cb_to_send != nic->cb_to_use) {
904 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
905 nic->cb_to_send->dma_addr))) {
906 /* Ok, here's where things get sticky. It's
907 * possible that we can't schedule the command
908 * because the controller is too busy, so
909 * let's just queue the command and try again
910 * when another command is scheduled. */
911 if (err == -ENOSPC) {
913 schedule_work(&nic->tx_timeout_task);
917 nic->cuc_cmd = cuc_resume;
918 nic->cb_to_send = nic->cb_to_send->next;
923 spin_unlock_irqrestore(&nic->cb_lock, flags);
928 static int mdio_read(struct net_device *netdev, int addr, int reg)
930 struct nic *nic = netdev_priv(netdev);
931 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
934 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
936 struct nic *nic = netdev_priv(netdev);
938 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
941 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
942 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
950 * Stratus87247: we shouldn't be writing the MDI control
951 * register until the Ready bit shows True. Also, since
952 * manipulation of the MDI control registers is a multi-step
953 * procedure it should be done under lock.
955 spin_lock_irqsave(&nic->mdio_lock, flags);
956 for (i = 100; i; --i) {
957 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
962 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
963 spin_unlock_irqrestore(&nic->mdio_lock, flags);
964 return 0; /* No way to indicate timeout error */
966 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
968 for (i = 0; i < 100; i++) {
970 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
973 spin_unlock_irqrestore(&nic->mdio_lock, flags);
974 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
975 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
976 dir == mdi_read ? "READ" : "WRITE",
977 addr, reg, data, data_out);
978 return (u16)data_out;
981 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
982 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
988 if ((reg == MII_BMCR) && (dir == mdi_write)) {
989 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
990 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
994 * Workaround Si issue where sometimes the part will not
995 * autoneg to 100Mbps even when advertised.
997 if (advert & ADVERTISE_100FULL)
998 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
999 else if (advert & ADVERTISE_100HALF)
1000 data |= BMCR_SPEED100;
1003 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1006 /* Fully software-emulated mdio_ctrl() function for cards without
1007 * MII-compliant PHYs.
1008 * For now, this is mainly geared towards 80c24 support; in case of further
1009 * requirements for other types (i82503, ...?) either extend this mechanism
1010 * or split it, whichever is cleaner.
1012 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1018 /* might need to allocate a netdev_priv'ed register array eventually
1019 * to be able to record state changes, but for now
1020 * some fully hardcoded register handling ought to be ok I guess. */
1022 if (dir == mdi_read) {
1025 /* Auto-negotiation, right? */
1026 return BMCR_ANENABLE |
1029 return BMSR_LSTATUS /* for mii_link_ok() */ |
1033 /* 80c24 is a "combo card" PHY, right? */
1034 return ADVERTISE_10HALF |
1037 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1038 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1039 dir == mdi_read ? "READ" : "WRITE",
1046 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1047 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1048 dir == mdi_read ? "READ" : "WRITE",
1054 static inline int e100_phy_supports_mii(struct nic *nic)
1056 /* for now, just check it by comparing whether we
1057 are using MII software emulation.
1059 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1062 static void e100_get_defaults(struct nic *nic)
1064 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1065 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1067 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
1068 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1069 if (nic->mac == mac_unknown)
1070 nic->mac = mac_82557_D100_A;
1072 nic->params.rfds = rfds;
1073 nic->params.cbs = cbs;
1075 /* Quadwords to DMA into FIFO before starting frame transmit */
1076 nic->tx_threshold = 0xE0;
1078 /* no interrupt for every tx completion, delay = 256us if not 557 */
1079 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1080 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1082 /* Template for a freshly allocated RFD */
1083 nic->blank_rfd.command = 0;
1084 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1085 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1088 nic->mii.phy_id_mask = 0x1F;
1089 nic->mii.reg_num_mask = 0x1F;
1090 nic->mii.dev = nic->netdev;
1091 nic->mii.mdio_read = mdio_read;
1092 nic->mii.mdio_write = mdio_write;
1095 static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1097 struct config *config = &cb->u.config;
1098 u8 *c = (u8 *)config;
1099 struct net_device *netdev = nic->netdev;
1101 cb->command = cpu_to_le16(cb_config);
1103 memset(config, 0, sizeof(struct config));
1105 config->byte_count = 0x16; /* bytes in this struct */
1106 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1107 config->direct_rx_dma = 0x1; /* reserved */
1108 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1109 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1110 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1111 config->tx_underrun_retry = 0x3; /* # of underrun retries */
1112 if (e100_phy_supports_mii(nic))
1113 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1114 config->pad10 = 0x6;
1115 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1116 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1117 config->ifs = 0x6; /* x16 = inter frame spacing */
1118 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1119 config->pad15_1 = 0x1;
1120 config->pad15_2 = 0x1;
1121 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1122 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1123 config->tx_padding = 0x1; /* 1=pad short frames */
1124 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1125 config->pad18 = 0x1;
1126 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1127 config->pad20_1 = 0x1F;
1128 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1129 config->pad21_1 = 0x5;
1131 config->adaptive_ifs = nic->adaptive_ifs;
1132 config->loopback = nic->loopback;
1134 if (nic->mii.force_media && nic->mii.full_duplex)
1135 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1137 if (nic->flags & promiscuous || nic->loopback) {
1138 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1139 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1140 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1143 if (unlikely(netdev->features & NETIF_F_RXFCS))
1144 config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */
1146 if (nic->flags & multicast_all)
1147 config->multicast_all = 0x1; /* 1=accept, 0=no */
1149 /* disable WoL when up */
1150 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1151 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1153 if (nic->mac >= mac_82558_D101_A4) {
1154 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1155 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1156 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1157 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
1158 if (nic->mac >= mac_82559_D101M) {
1159 config->tno_intr = 0x1; /* TCO stats enable */
1160 /* Enable TCO in extended config */
1161 if (nic->mac >= mac_82551_10) {
1162 config->byte_count = 0x20; /* extended bytes */
1163 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1166 config->standard_stat_counter = 0x0;
1170 if (netdev->features & NETIF_F_RXALL) {
1171 config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
1172 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1173 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1176 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
1180 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
1185 /*************************************************************************
1186 * CPUSaver parameters
1188 * All CPUSaver parameters are 16-bit literals that are part of a
1189 * "move immediate value" instruction. By changing the value of
1190 * the literal in the instruction before the code is loaded, the
1191 * driver can change the algorithm.
1193 * INTDELAY - This loads the dead-man timer with its initial value.
1194 * When this timer expires the interrupt is asserted, and the
1195 * timer is reset each time a new packet is received. (see
1196 * BUNDLEMAX below to set the limit on number of chained packets)
1197 * The current default is 0x600 or 1536. Experiments show that
1198 * the value should probably stay within the 0x200 - 0x1000.
1201 * This sets the maximum number of frames that will be bundled. In
1202 * some situations, such as the TCP windowing algorithm, it may be
1203 * better to limit the growth of the bundle size than let it go as
1204 * high as it can, because that could cause too much added latency.
1205 * The default is six, because this is the number of packets in the
1206 * default TCP window size. A value of 1 would make CPUSaver indicate
1207 * an interrupt for every frame received. If you do not want to put
1208 * a limit on the bundle size, set this value to xFFFF.
1211 * This contains a bit-mask describing the minimum size frame that
1212 * will be bundled. The default masks the lower 7 bits, which means
1213 * that any frame less than 128 bytes in length will not be bundled,
1214 * but will instead immediately generate an interrupt. This does
1215 * not affect the current bundle in any way. Any frame that is 128
1216 * bytes or large will be bundled normally. This feature is meant
1217 * to provide immediate indication of ACK frames in a TCP environment.
1218 * Customers were seeing poor performance when a machine with CPUSaver
1219 * enabled was sending but not receiving. The delay introduced when
1220 * the ACKs were received was enough to reduce total throughput, because
1221 * the sender would sit idle until the ACK was finally seen.
1223 * The current default is 0xFF80, which masks out the lower 7 bits.
1224 * This means that any frame which is x7F (127) bytes or smaller
1225 * will cause an immediate interrupt. Because this value must be a
1226 * bit mask, there are only a few valid values that can be used. To
1227 * turn this feature off, the driver can write the value xFFFF to the
1228 * lower word of this instruction (in the same way that the other
1229 * parameters are used). Likewise, a value of 0xF800 (2047) would
1230 * cause an interrupt to be generated for every frame, because all
1231 * standard Ethernet frames are <= 2047 bytes in length.
1232 *************************************************************************/
1234 /* if you wish to disable the ucode functionality, while maintaining the
1235 * workarounds it provides, set the following defines to:
1240 #define BUNDLESMALL 1
1241 #define BUNDLEMAX (u16)6
1242 #define INTDELAY (u16)1536 /* 0x600 */
1244 /* Initialize firmware */
1245 static const struct firmware *e100_request_firmware(struct nic *nic)
1247 const char *fw_name;
1248 const struct firmware *fw = nic->fw;
1249 u8 timer, bundle, min_size;
1251 bool required = false;
1253 /* do not load u-code for ICH devices */
1254 if (nic->flags & ich)
1257 /* Search for ucode match against h/w revision
1259 * Based on comments in the source code for the FreeBSD fxp
1260 * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
1262 * "fixes for bugs in the B-step hardware (specifically, bugs
1263 * with Inline Receive)."
1265 * So we must fail if it cannot be loaded.
1267 * The other microcode files are only required for the optional
1268 * CPUSaver feature. Nice to have, but no reason to fail.
1270 if (nic->mac == mac_82559_D101M) {
1271 fw_name = FIRMWARE_D101M;
1272 } else if (nic->mac == mac_82559_D101S) {
1273 fw_name = FIRMWARE_D101S;
1274 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
1275 fw_name = FIRMWARE_D102E;
1277 } else { /* No ucode on other devices */
1281 /* If the firmware has not previously been loaded, request a pointer
1282 * to it. If it was previously loaded, we are reinitializing the
1283 * adapter, possibly in a resume from hibernate, in which case
1284 * reject_firmware() cannot be used.
1287 err = reject_firmware(&fw, fw_name, &nic->pdev->dev);
1291 netif_err(nic, probe, nic->netdev,
1292 "Failed to load firmware \"%s\": %d\n",
1294 netif_err(nic, probe, nic->netdev, "Proceeding without firmware\n");
1297 netif_info(nic, probe, nic->netdev,
1298 "CPUSaver disabled. Needs \"%s\": %d\n",
1304 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1305 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1306 if (fw->size != UCODE_SIZE * 4 + 3) {
1307 netif_err(nic, probe, nic->netdev,
1308 "Firmware \"%s\" has wrong size %zu\n",
1310 release_firmware(fw);
1311 return ERR_PTR(-EINVAL);
1314 /* Read timer, bundle and min_size from end of firmware blob */
1315 timer = fw->data[UCODE_SIZE * 4];
1316 bundle = fw->data[UCODE_SIZE * 4 + 1];
1317 min_size = fw->data[UCODE_SIZE * 4 + 2];
1319 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1320 min_size >= UCODE_SIZE) {
1321 netif_err(nic, probe, nic->netdev,
1322 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1323 fw_name, timer, bundle, min_size);
1324 release_firmware(fw);
1325 return ERR_PTR(-EINVAL);
1328 /* OK, firmware is validated and ready to use. Save a pointer
1329 * to it in the nic */
1334 static int e100_setup_ucode(struct nic *nic, struct cb *cb,
1335 struct sk_buff *skb)
1337 const struct firmware *fw = (void *)skb;
1338 u8 timer, bundle, min_size;
1340 /* It's not a real skb; we just abused the fact that e100_exec_cb
1341 will pass it through to here... */
1344 /* firmware is stored as little endian already */
1345 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1347 /* Read timer, bundle and min_size from end of firmware blob */
1348 timer = fw->data[UCODE_SIZE * 4];
1349 bundle = fw->data[UCODE_SIZE * 4 + 1];
1350 min_size = fw->data[UCODE_SIZE * 4 + 2];
1352 /* Insert user-tunable settings in cb->u.ucode */
1353 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1354 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1355 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1356 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1357 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1358 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1360 cb->command = cpu_to_le16(cb_ucode | cb_el);
1364 static inline int e100_load_ucode_wait(struct nic *nic)
1366 const struct firmware *fw;
1367 int err = 0, counter = 50;
1368 struct cb *cb = nic->cb_to_clean;
1370 fw = e100_request_firmware(nic);
1371 /* If it's NULL, then no ucode is required */
1372 if (IS_ERR_OR_NULL(fw))
1373 return PTR_ERR_OR_ZERO(fw);
1375 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1376 netif_err(nic, probe, nic->netdev,
1377 "ucode cmd failed with error %d\n", err);
1379 /* must restart cuc */
1380 nic->cuc_cmd = cuc_start;
1382 /* wait for completion */
1383 e100_write_flush(nic);
1386 /* wait for possibly (ouch) 500ms */
1387 while (!(cb->status & cpu_to_le16(cb_complete))) {
1389 if (!--counter) break;
1392 /* ack any interrupts, something could have been set */
1393 iowrite8(~0, &nic->csr->scb.stat_ack);
1395 /* if the command failed, or is not OK, notify and return */
1396 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1397 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1404 static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1405 struct sk_buff *skb)
1407 cb->command = cpu_to_le16(cb_iaaddr);
1408 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1412 static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1414 cb->command = cpu_to_le16(cb_dump);
1415 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1416 offsetof(struct mem, dump_buf));
1420 static int e100_phy_check_without_mii(struct nic *nic)
1425 phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f;
1428 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1429 case I82503: /* Non-MII PHY; UNTESTED! */
1430 case S80C24: /* Non-MII PHY; tested and working */
1431 /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1432 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1433 * doesn't have a programming interface of any sort. The
1434 * media is sensed automatically based on how the link partner
1435 * is configured. This is, in essence, manual configuration.
1437 netif_info(nic, probe, nic->netdev,
1438 "found MII-less i82503 or 80c24 or other PHY\n");
1440 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1441 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1443 /* these might be needed for certain MII-less cards...
1444 * nic->flags |= ich;
1445 * nic->flags |= ich_10h_workaround; */
1456 #define NCONFIG_AUTO_SWITCH 0x0080
1457 #define MII_NSC_CONG MII_RESV1
1458 #define NSC_CONG_ENABLE 0x0100
1459 #define NSC_CONG_TXREADY 0x0400
1460 #define ADVERTISE_FC_SUPPORTED 0x0400
1461 static int e100_phy_init(struct nic *nic)
1463 struct net_device *netdev = nic->netdev;
1465 u16 bmcr, stat, id_lo, id_hi, cong;
1467 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1468 for (addr = 0; addr < 32; addr++) {
1469 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1470 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1471 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1472 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1473 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1477 /* uhoh, no PHY detected: check whether we seem to be some
1478 * weird, rare variant which is *known* to not have any MII.
1479 * But do this AFTER MII checking only, since this does
1480 * lookup of EEPROM values which may easily be unreliable. */
1481 if (e100_phy_check_without_mii(nic))
1482 return 0; /* simply return and hope for the best */
1484 /* for unknown cases log a fatal error */
1485 netif_err(nic, hw, nic->netdev,
1486 "Failed to locate any known PHY, aborting\n");
1490 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1491 "phy_addr = %d\n", nic->mii.phy_id);
1494 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1495 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1496 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1497 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1498 "phy ID = 0x%08X\n", nic->phy);
1500 /* Select the phy and isolate the rest */
1501 for (addr = 0; addr < 32; addr++) {
1502 if (addr != nic->mii.phy_id) {
1503 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1504 } else if (nic->phy != phy_82552_v) {
1505 bmcr = mdio_read(netdev, addr, MII_BMCR);
1506 mdio_write(netdev, addr, MII_BMCR,
1507 bmcr & ~BMCR_ISOLATE);
1511 * Workaround for 82552:
1512 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1513 * other phy_id's) using bmcr value from addr discovery loop above.
1515 if (nic->phy == phy_82552_v)
1516 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1517 bmcr & ~BMCR_ISOLATE);
1519 /* Handle National tx phys */
1520 #define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1521 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1522 /* Disable congestion control */
1523 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1524 cong |= NSC_CONG_TXREADY;
1525 cong &= ~NSC_CONG_ENABLE;
1526 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1529 if (nic->phy == phy_82552_v) {
1530 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1532 /* assign special tweaked mdio_ctrl() function */
1533 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1535 /* Workaround Si not advertising flow-control during autoneg */
1536 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1537 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1539 /* Reset for the above changes to take effect */
1540 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1542 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1543 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1544 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1545 (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) {
1546 /* enable/disable MDI/MDI-X auto-switching. */
1547 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1548 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1554 static int e100_hw_init(struct nic *nic)
1560 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1561 if (!in_interrupt() && (err = e100_self_test(nic)))
1564 if ((err = e100_phy_init(nic)))
1566 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1568 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1570 if ((err = e100_load_ucode_wait(nic)))
1572 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1574 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1576 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1577 nic->dma_addr + offsetof(struct mem, stats))))
1579 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1582 e100_disable_irq(nic);
1587 static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1589 struct net_device *netdev = nic->netdev;
1590 struct netdev_hw_addr *ha;
1591 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1593 cb->command = cpu_to_le16(cb_multi);
1594 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1596 netdev_for_each_mc_addr(ha, netdev) {
1599 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1605 static void e100_set_multicast_list(struct net_device *netdev)
1607 struct nic *nic = netdev_priv(netdev);
1609 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1610 "mc_count=%d, flags=0x%04X\n",
1611 netdev_mc_count(netdev), netdev->flags);
1613 if (netdev->flags & IFF_PROMISC)
1614 nic->flags |= promiscuous;
1616 nic->flags &= ~promiscuous;
1618 if (netdev->flags & IFF_ALLMULTI ||
1619 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1620 nic->flags |= multicast_all;
1622 nic->flags &= ~multicast_all;
1624 e100_exec_cb(nic, NULL, e100_configure);
1625 e100_exec_cb(nic, NULL, e100_multi);
1628 static void e100_update_stats(struct nic *nic)
1630 struct net_device *dev = nic->netdev;
1631 struct net_device_stats *ns = &dev->stats;
1632 struct stats *s = &nic->mem->stats;
1633 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1634 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1637 /* Device's stats reporting may take several microseconds to
1638 * complete, so we're always waiting for results of the
1639 * previous command. */
1641 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1643 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1644 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1645 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1646 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1647 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1648 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1649 ns->collisions += nic->tx_collisions;
1650 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1651 le32_to_cpu(s->tx_lost_crs);
1652 nic->rx_short_frame_errors +=
1653 le32_to_cpu(s->rx_short_frame_errors);
1654 ns->rx_length_errors = nic->rx_short_frame_errors +
1655 nic->rx_over_length_errors;
1656 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1657 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1658 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1659 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1660 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1661 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1662 le32_to_cpu(s->rx_alignment_errors) +
1663 le32_to_cpu(s->rx_short_frame_errors) +
1664 le32_to_cpu(s->rx_cdt_errors);
1665 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1666 nic->tx_single_collisions +=
1667 le32_to_cpu(s->tx_single_collisions);
1668 nic->tx_multiple_collisions +=
1669 le32_to_cpu(s->tx_multiple_collisions);
1670 if (nic->mac >= mac_82558_D101_A4) {
1671 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1672 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1673 nic->rx_fc_unsupported +=
1674 le32_to_cpu(s->fc_rcv_unsupported);
1675 if (nic->mac >= mac_82559_D101M) {
1676 nic->tx_tco_frames +=
1677 le16_to_cpu(s->xmt_tco_frames);
1678 nic->rx_tco_frames +=
1679 le16_to_cpu(s->rcv_tco_frames);
1685 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1686 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1687 "exec cuc_dump_reset failed\n");
1690 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1692 /* Adjust inter-frame-spacing (IFS) between two transmits if
1693 * we're getting collisions on a half-duplex connection. */
1695 if (duplex == DUPLEX_HALF) {
1696 u32 prev = nic->adaptive_ifs;
1697 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1699 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1700 (nic->tx_frames > min_frames)) {
1701 if (nic->adaptive_ifs < 60)
1702 nic->adaptive_ifs += 5;
1703 } else if (nic->tx_frames < min_frames) {
1704 if (nic->adaptive_ifs >= 5)
1705 nic->adaptive_ifs -= 5;
1707 if (nic->adaptive_ifs != prev)
1708 e100_exec_cb(nic, NULL, e100_configure);
1712 static void e100_watchdog(unsigned long data)
1714 struct nic *nic = (struct nic *)data;
1715 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1718 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1719 "right now = %ld\n", jiffies);
1721 /* mii library handles link maintenance tasks */
1723 mii_ethtool_gset(&nic->mii, &cmd);
1724 speed = ethtool_cmd_speed(&cmd);
1726 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1727 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1728 speed == SPEED_100 ? 100 : 10,
1729 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1730 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1731 netdev_info(nic->netdev, "NIC Link is Down\n");
1734 mii_check_link(&nic->mii);
1736 /* Software generated interrupt to recover from (rare) Rx
1737 * allocation failure.
1738 * Unfortunately have to use a spinlock to not re-enable interrupts
1739 * accidentally, due to hardware that shares a register between the
1740 * interrupt mask bit and the SW Interrupt generation bit */
1741 spin_lock_irq(&nic->cmd_lock);
1742 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1743 e100_write_flush(nic);
1744 spin_unlock_irq(&nic->cmd_lock);
1746 e100_update_stats(nic);
1747 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1749 if (nic->mac <= mac_82557_D100_C)
1750 /* Issue a multicast command to workaround a 557 lock up */
1751 e100_set_multicast_list(nic->netdev);
1753 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1754 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1755 nic->flags |= ich_10h_workaround;
1757 nic->flags &= ~ich_10h_workaround;
1759 mod_timer(&nic->watchdog,
1760 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1763 static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
1764 struct sk_buff *skb)
1766 dma_addr_t dma_addr;
1767 cb->command = nic->tx_command;
1769 dma_addr = pci_map_single(nic->pdev,
1770 skb->data, skb->len, PCI_DMA_TODEVICE);
1771 /* If we can't map the skb, have the upper layer try later */
1772 if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
1773 dev_kfree_skb_any(skb);
1779 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
1780 * testing, ie sending frames with bad CRC.
1782 if (unlikely(skb->no_fcs))
1783 cb->command |= cpu_to_le16(cb_tx_nc);
1785 cb->command &= ~cpu_to_le16(cb_tx_nc);
1787 /* interrupt every 16 packets regardless of delay */
1788 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
1789 cb->command |= cpu_to_le16(cb_i);
1790 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1791 cb->u.tcb.tcb_byte_count = 0;
1792 cb->u.tcb.threshold = nic->tx_threshold;
1793 cb->u.tcb.tbd_count = 1;
1794 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
1795 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1796 skb_tx_timestamp(skb);
1800 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1801 struct net_device *netdev)
1803 struct nic *nic = netdev_priv(netdev);
1806 if (nic->flags & ich_10h_workaround) {
1807 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1808 Issue a NOP command followed by a 1us delay before
1809 issuing the Tx command. */
1810 if (e100_exec_cmd(nic, cuc_nop, 0))
1811 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1812 "exec cuc_nop failed\n");
1816 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1820 /* We queued the skb, but now we're out of space. */
1821 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1822 "No space for CB\n");
1823 netif_stop_queue(netdev);
1826 /* This is a hard error - log it. */
1827 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1828 "Out of Tx resources, returning skb\n");
1829 netif_stop_queue(netdev);
1830 return NETDEV_TX_BUSY;
1833 return NETDEV_TX_OK;
1836 static int e100_tx_clean(struct nic *nic)
1838 struct net_device *dev = nic->netdev;
1842 spin_lock(&nic->cb_lock);
1844 /* Clean CBs marked complete */
1845 for (cb = nic->cb_to_clean;
1846 cb->status & cpu_to_le16(cb_complete);
1847 cb = nic->cb_to_clean = cb->next) {
1848 dma_rmb(); /* read skb after status */
1849 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1850 "cb[%d]->status = 0x%04X\n",
1851 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1854 if (likely(cb->skb != NULL)) {
1855 dev->stats.tx_packets++;
1856 dev->stats.tx_bytes += cb->skb->len;
1858 pci_unmap_single(nic->pdev,
1859 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1860 le16_to_cpu(cb->u.tcb.tbd.size),
1862 dev_kfree_skb_any(cb->skb);
1870 spin_unlock(&nic->cb_lock);
1872 /* Recover from running out of Tx resources in xmit_frame */
1873 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1874 netif_wake_queue(nic->netdev);
1879 static void e100_clean_cbs(struct nic *nic)
1882 while (nic->cbs_avail != nic->params.cbs.count) {
1883 struct cb *cb = nic->cb_to_clean;
1885 pci_unmap_single(nic->pdev,
1886 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1887 le16_to_cpu(cb->u.tcb.tbd.size),
1889 dev_kfree_skb(cb->skb);
1891 nic->cb_to_clean = nic->cb_to_clean->next;
1894 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1898 nic->cuc_cmd = cuc_start;
1899 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1903 static int e100_alloc_cbs(struct nic *nic)
1906 unsigned int i, count = nic->params.cbs.count;
1908 nic->cuc_cmd = cuc_start;
1909 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1912 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1913 &nic->cbs_dma_addr);
1916 memset(nic->cbs, 0, count * sizeof(struct cb));
1918 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1919 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1920 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1922 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1923 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1924 ((i+1) % count) * sizeof(struct cb));
1927 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1928 nic->cbs_avail = count;
1933 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1935 if (!nic->rxs) return;
1936 if (RU_SUSPENDED != nic->ru_running) return;
1938 /* handle init time starts */
1939 if (!rx) rx = nic->rxs;
1941 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1943 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1944 nic->ru_running = RU_RUNNING;
1948 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
1949 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1951 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1954 /* Init, and map the RFD. */
1955 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1956 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1957 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1959 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1960 dev_kfree_skb_any(rx->skb);
1966 /* Link the RFD to end of RFA by linking previous RFD to
1967 * this one. We are safe to touch the previous RFD because
1968 * it is protected by the before last buffer's el bit being set */
1969 if (rx->prev->skb) {
1970 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1971 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1972 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1973 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1979 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1980 unsigned int *work_done, unsigned int work_to_do)
1982 struct net_device *dev = nic->netdev;
1983 struct sk_buff *skb = rx->skb;
1984 struct rfd *rfd = (struct rfd *)skb->data;
1985 u16 rfd_status, actual_size;
1988 if (unlikely(work_done && *work_done >= work_to_do))
1991 /* Need to sync before taking a peek at cb_complete bit */
1992 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1993 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1994 rfd_status = le16_to_cpu(rfd->status);
1996 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1997 "status=0x%04X\n", rfd_status);
1998 dma_rmb(); /* read size after status bit */
2000 /* If data isn't ready, nothing to indicate */
2001 if (unlikely(!(rfd_status & cb_complete))) {
2002 /* If the next buffer has the el bit, but we think the receiver
2003 * is still running, check to see if it really stopped while
2004 * we had interrupts off.
2005 * This allows for a fast restart without re-enabling
2007 if ((le16_to_cpu(rfd->command) & cb_el) &&
2008 (RU_RUNNING == nic->ru_running))
2010 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2011 nic->ru_running = RU_SUSPENDED;
2012 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2014 PCI_DMA_FROMDEVICE);
2018 /* Get actual data size */
2019 if (unlikely(dev->features & NETIF_F_RXFCS))
2021 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
2022 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
2023 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
2026 pci_unmap_single(nic->pdev, rx->dma_addr,
2027 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2029 /* If this buffer has the el bit, but we think the receiver
2030 * is still running, check to see if it really stopped while
2031 * we had interrupts off.
2032 * This allows for a fast restart without re-enabling interrupts.
2033 * This can happen when the RU sees the size change but also sees
2034 * the el bit set. */
2035 if ((le16_to_cpu(rfd->command) & cb_el) &&
2036 (RU_RUNNING == nic->ru_running)) {
2038 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2039 nic->ru_running = RU_SUSPENDED;
2042 /* Pull off the RFD and put the actual data (minus eth hdr) */
2043 skb_reserve(skb, sizeof(struct rfd));
2044 skb_put(skb, actual_size);
2045 skb->protocol = eth_type_trans(skb, nic->netdev);
2047 /* If we are receiving all frames, then don't bother
2048 * checking for errors.
2050 if (unlikely(dev->features & NETIF_F_RXALL)) {
2051 if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2052 /* Received oversized frame, but keep it. */
2053 nic->rx_over_length_errors++;
2057 if (unlikely(!(rfd_status & cb_ok))) {
2058 /* Don't indicate if hardware indicates errors */
2059 dev_kfree_skb_any(skb);
2060 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
2061 /* Don't indicate oversized frames */
2062 nic->rx_over_length_errors++;
2063 dev_kfree_skb_any(skb);
2066 dev->stats.rx_packets++;
2067 dev->stats.rx_bytes += (actual_size - fcs_pad);
2068 netif_receive_skb(skb);
2078 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
2079 unsigned int work_to_do)
2082 int restart_required = 0, err = 0;
2083 struct rx *old_before_last_rx, *new_before_last_rx;
2084 struct rfd *old_before_last_rfd, *new_before_last_rfd;
2086 /* Indicate newly arrived packets */
2087 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
2088 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2089 /* Hit quota or no more to clean */
2090 if (-EAGAIN == err || -ENODATA == err)
2095 /* On EAGAIN, hit quota so have more work to do, restart once
2096 * cleanup is complete.
2097 * Else, are we already rnr? then pay attention!!! this ensures that
2098 * the state machine progression never allows a start with a
2099 * partially cleaned list, avoiding a race between hardware
2100 * and rx_to_clean when in NAPI mode */
2101 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2102 restart_required = 1;
2104 old_before_last_rx = nic->rx_to_use->prev->prev;
2105 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
2107 /* Alloc new skbs to refill list */
2108 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2109 if (unlikely(e100_rx_alloc_skb(nic, rx)))
2110 break; /* Better luck next time (see watchdog) */
2113 new_before_last_rx = nic->rx_to_use->prev->prev;
2114 if (new_before_last_rx != old_before_last_rx) {
2115 /* Set the el-bit on the buffer that is before the last buffer.
2116 * This lets us update the next pointer on the last buffer
2117 * without worrying about hardware touching it.
2118 * We set the size to 0 to prevent hardware from touching this
2120 * When the hardware hits the before last buffer with el-bit
2121 * and size of 0, it will RNR interrupt, the RUS will go into
2122 * the No Resources state. It will not complete nor write to
2124 new_before_last_rfd =
2125 (struct rfd *)new_before_last_rx->skb->data;
2126 new_before_last_rfd->size = 0;
2127 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2128 pci_dma_sync_single_for_device(nic->pdev,
2129 new_before_last_rx->dma_addr, sizeof(struct rfd),
2130 PCI_DMA_BIDIRECTIONAL);
2132 /* Now that we have a new stopping point, we can clear the old
2133 * stopping point. We must sync twice to get the proper
2134 * ordering on the hardware side of things. */
2135 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2136 pci_dma_sync_single_for_device(nic->pdev,
2137 old_before_last_rx->dma_addr, sizeof(struct rfd),
2138 PCI_DMA_BIDIRECTIONAL);
2139 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2141 pci_dma_sync_single_for_device(nic->pdev,
2142 old_before_last_rx->dma_addr, sizeof(struct rfd),
2143 PCI_DMA_BIDIRECTIONAL);
2146 if (restart_required) {
2148 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
2149 e100_start_receiver(nic, nic->rx_to_clean);
2155 static void e100_rx_clean_list(struct nic *nic)
2158 unsigned int i, count = nic->params.rfds.count;
2160 nic->ru_running = RU_UNINITIALIZED;
2163 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2165 pci_unmap_single(nic->pdev, rx->dma_addr,
2166 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2167 dev_kfree_skb(rx->skb);
2174 nic->rx_to_use = nic->rx_to_clean = NULL;
2177 static int e100_rx_alloc_list(struct nic *nic)
2180 unsigned int i, count = nic->params.rfds.count;
2181 struct rfd *before_last;
2183 nic->rx_to_use = nic->rx_to_clean = NULL;
2184 nic->ru_running = RU_UNINITIALIZED;
2186 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
2189 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2190 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2191 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2192 if (e100_rx_alloc_skb(nic, rx)) {
2193 e100_rx_clean_list(nic);
2197 /* Set the el-bit on the buffer that is before the last buffer.
2198 * This lets us update the next pointer on the last buffer without
2199 * worrying about hardware touching it.
2200 * We set the size to 0 to prevent hardware from touching this buffer.
2201 * When the hardware hits the before last buffer with el-bit and size
2202 * of 0, it will RNR interrupt, the RU will go into the No Resources
2203 * state. It will not complete nor write to this buffer. */
2204 rx = nic->rxs->prev->prev;
2205 before_last = (struct rfd *)rx->skb->data;
2206 before_last->command |= cpu_to_le16(cb_el);
2207 before_last->size = 0;
2208 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2209 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2211 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2212 nic->ru_running = RU_SUSPENDED;
2217 static irqreturn_t e100_intr(int irq, void *dev_id)
2219 struct net_device *netdev = dev_id;
2220 struct nic *nic = netdev_priv(netdev);
2221 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2223 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2224 "stat_ack = 0x%02X\n", stat_ack);
2226 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
2227 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2230 /* Ack interrupt(s) */
2231 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
2233 /* We hit Receive No Resource (RNR); restart RU after cleaning */
2234 if (stat_ack & stat_ack_rnr)
2235 nic->ru_running = RU_SUSPENDED;
2237 if (likely(napi_schedule_prep(&nic->napi))) {
2238 e100_disable_irq(nic);
2239 __napi_schedule(&nic->napi);
2245 static int e100_poll(struct napi_struct *napi, int budget)
2247 struct nic *nic = container_of(napi, struct nic, napi);
2248 unsigned int work_done = 0;
2250 e100_rx_clean(nic, &work_done, budget);
2253 /* If budget not fully consumed, exit the polling mode */
2254 if (work_done < budget) {
2255 napi_complete(napi);
2256 e100_enable_irq(nic);
2262 #ifdef CONFIG_NET_POLL_CONTROLLER
2263 static void e100_netpoll(struct net_device *netdev)
2265 struct nic *nic = netdev_priv(netdev);
2267 e100_disable_irq(nic);
2268 e100_intr(nic->pdev->irq, netdev);
2270 e100_enable_irq(nic);
2274 static int e100_set_mac_address(struct net_device *netdev, void *p)
2276 struct nic *nic = netdev_priv(netdev);
2277 struct sockaddr *addr = p;
2279 if (!is_valid_ether_addr(addr->sa_data))
2280 return -EADDRNOTAVAIL;
2282 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2283 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2288 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2290 if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2292 netdev->mtu = new_mtu;
2296 static int e100_asf(struct nic *nic)
2298 /* ASF can be enabled from eeprom */
2299 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2300 (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) &&
2301 !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) &&
2302 ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE);
2305 static int e100_up(struct nic *nic)
2309 if ((err = e100_rx_alloc_list(nic)))
2311 if ((err = e100_alloc_cbs(nic)))
2312 goto err_rx_clean_list;
2313 if ((err = e100_hw_init(nic)))
2315 e100_set_multicast_list(nic->netdev);
2316 e100_start_receiver(nic, NULL);
2317 mod_timer(&nic->watchdog, jiffies);
2318 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2319 nic->netdev->name, nic->netdev)))
2321 netif_wake_queue(nic->netdev);
2322 napi_enable(&nic->napi);
2323 /* enable ints _after_ enabling poll, preventing a race between
2324 * disable ints+schedule */
2325 e100_enable_irq(nic);
2329 del_timer_sync(&nic->watchdog);
2331 e100_clean_cbs(nic);
2333 e100_rx_clean_list(nic);
2337 static void e100_down(struct nic *nic)
2339 /* wait here for poll to complete */
2340 napi_disable(&nic->napi);
2341 netif_stop_queue(nic->netdev);
2343 free_irq(nic->pdev->irq, nic->netdev);
2344 del_timer_sync(&nic->watchdog);
2345 netif_carrier_off(nic->netdev);
2346 e100_clean_cbs(nic);
2347 e100_rx_clean_list(nic);
2350 static void e100_tx_timeout(struct net_device *netdev)
2352 struct nic *nic = netdev_priv(netdev);
2354 /* Reset outside of interrupt context, to avoid request_irq
2355 * in interrupt context */
2356 schedule_work(&nic->tx_timeout_task);
2359 static void e100_tx_timeout_task(struct work_struct *work)
2361 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2362 struct net_device *netdev = nic->netdev;
2364 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2365 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2368 if (netif_running(netdev)) {
2369 e100_down(netdev_priv(netdev));
2370 e100_up(netdev_priv(netdev));
2375 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2378 struct sk_buff *skb;
2380 /* Use driver resources to perform internal MAC or PHY
2381 * loopback test. A single packet is prepared and transmitted
2382 * in loopback mode, and the test passes if the received
2383 * packet compares byte-for-byte to the transmitted packet. */
2385 if ((err = e100_rx_alloc_list(nic)))
2387 if ((err = e100_alloc_cbs(nic)))
2390 /* ICH PHY loopback is broken so do MAC loopback instead */
2391 if (nic->flags & ich && loopback_mode == lb_phy)
2392 loopback_mode = lb_mac;
2394 nic->loopback = loopback_mode;
2395 if ((err = e100_hw_init(nic)))
2396 goto err_loopback_none;
2398 if (loopback_mode == lb_phy)
2399 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2402 e100_start_receiver(nic, NULL);
2404 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2406 goto err_loopback_none;
2408 skb_put(skb, ETH_DATA_LEN);
2409 memset(skb->data, 0xFF, ETH_DATA_LEN);
2410 e100_xmit_frame(skb, nic->netdev);
2414 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2415 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2417 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2418 skb->data, ETH_DATA_LEN))
2422 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2423 nic->loopback = lb_none;
2424 e100_clean_cbs(nic);
2427 e100_rx_clean_list(nic);
2431 #define MII_LED_CONTROL 0x1B
2432 #define E100_82552_LED_OVERRIDE 0x19
2433 #define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2434 #define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
2436 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2438 struct nic *nic = netdev_priv(netdev);
2439 return mii_ethtool_gset(&nic->mii, cmd);
2442 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2444 struct nic *nic = netdev_priv(netdev);
2447 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2448 err = mii_ethtool_sset(&nic->mii, cmd);
2449 e100_exec_cb(nic, NULL, e100_configure);
2454 static void e100_get_drvinfo(struct net_device *netdev,
2455 struct ethtool_drvinfo *info)
2457 struct nic *nic = netdev_priv(netdev);
2458 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2459 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2460 strlcpy(info->bus_info, pci_name(nic->pdev),
2461 sizeof(info->bus_info));
2464 #define E100_PHY_REGS 0x1D
2465 static int e100_get_regs_len(struct net_device *netdev)
2467 struct nic *nic = netdev_priv(netdev);
2469 /* We know the number of registers, and the size of the dump buffer.
2470 * Calculate the total size in bytes.
2472 return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);
2475 static void e100_get_regs(struct net_device *netdev,
2476 struct ethtool_regs *regs, void *p)
2478 struct nic *nic = netdev_priv(netdev);
2482 regs->version = (1 << 24) | nic->pdev->revision;
2483 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2484 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2485 ioread16(&nic->csr->scb.status);
2486 for (i = 0; i < E100_PHY_REGS; i++)
2487 /* Note that we read the registers in reverse order. This
2488 * ordering is the ABI apparently used by ethtool and other
2491 buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,
2492 E100_PHY_REGS - 1 - i);
2493 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2494 e100_exec_cb(nic, NULL, e100_dump);
2496 memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,
2497 sizeof(nic->mem->dump_buf));
2500 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2502 struct nic *nic = netdev_priv(netdev);
2503 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2504 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2507 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2509 struct nic *nic = netdev_priv(netdev);
2511 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2512 !device_can_wakeup(&nic->pdev->dev))
2516 nic->flags |= wol_magic;
2518 nic->flags &= ~wol_magic;
2520 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2522 e100_exec_cb(nic, NULL, e100_configure);
2527 static u32 e100_get_msglevel(struct net_device *netdev)
2529 struct nic *nic = netdev_priv(netdev);
2530 return nic->msg_enable;
2533 static void e100_set_msglevel(struct net_device *netdev, u32 value)
2535 struct nic *nic = netdev_priv(netdev);
2536 nic->msg_enable = value;
2539 static int e100_nway_reset(struct net_device *netdev)
2541 struct nic *nic = netdev_priv(netdev);
2542 return mii_nway_restart(&nic->mii);
2545 static u32 e100_get_link(struct net_device *netdev)
2547 struct nic *nic = netdev_priv(netdev);
2548 return mii_link_ok(&nic->mii);
2551 static int e100_get_eeprom_len(struct net_device *netdev)
2553 struct nic *nic = netdev_priv(netdev);
2554 return nic->eeprom_wc << 1;
2557 #define E100_EEPROM_MAGIC 0x1234
2558 static int e100_get_eeprom(struct net_device *netdev,
2559 struct ethtool_eeprom *eeprom, u8 *bytes)
2561 struct nic *nic = netdev_priv(netdev);
2563 eeprom->magic = E100_EEPROM_MAGIC;
2564 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2569 static int e100_set_eeprom(struct net_device *netdev,
2570 struct ethtool_eeprom *eeprom, u8 *bytes)
2572 struct nic *nic = netdev_priv(netdev);
2574 if (eeprom->magic != E100_EEPROM_MAGIC)
2577 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2579 return e100_eeprom_save(nic, eeprom->offset >> 1,
2580 (eeprom->len >> 1) + 1);
2583 static void e100_get_ringparam(struct net_device *netdev,
2584 struct ethtool_ringparam *ring)
2586 struct nic *nic = netdev_priv(netdev);
2587 struct param_range *rfds = &nic->params.rfds;
2588 struct param_range *cbs = &nic->params.cbs;
2590 ring->rx_max_pending = rfds->max;
2591 ring->tx_max_pending = cbs->max;
2592 ring->rx_pending = rfds->count;
2593 ring->tx_pending = cbs->count;
2596 static int e100_set_ringparam(struct net_device *netdev,
2597 struct ethtool_ringparam *ring)
2599 struct nic *nic = netdev_priv(netdev);
2600 struct param_range *rfds = &nic->params.rfds;
2601 struct param_range *cbs = &nic->params.cbs;
2603 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2606 if (netif_running(netdev))
2608 rfds->count = max(ring->rx_pending, rfds->min);
2609 rfds->count = min(rfds->count, rfds->max);
2610 cbs->count = max(ring->tx_pending, cbs->min);
2611 cbs->count = min(cbs->count, cbs->max);
2612 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2613 rfds->count, cbs->count);
2614 if (netif_running(netdev))
2620 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2621 "Link test (on/offline)",
2622 "Eeprom test (on/offline)",
2623 "Self test (offline)",
2624 "Mac loopback (offline)",
2625 "Phy loopback (offline)",
2627 #define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
2629 static void e100_diag_test(struct net_device *netdev,
2630 struct ethtool_test *test, u64 *data)
2632 struct ethtool_cmd cmd;
2633 struct nic *nic = netdev_priv(netdev);
2636 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2637 data[0] = !mii_link_ok(&nic->mii);
2638 data[1] = e100_eeprom_load(nic);
2639 if (test->flags & ETH_TEST_FL_OFFLINE) {
2641 /* save speed, duplex & autoneg settings */
2642 err = mii_ethtool_gset(&nic->mii, &cmd);
2644 if (netif_running(netdev))
2646 data[2] = e100_self_test(nic);
2647 data[3] = e100_loopback_test(nic, lb_mac);
2648 data[4] = e100_loopback_test(nic, lb_phy);
2650 /* restore speed, duplex & autoneg settings */
2651 err = mii_ethtool_sset(&nic->mii, &cmd);
2653 if (netif_running(netdev))
2656 for (i = 0; i < E100_TEST_LEN; i++)
2657 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2659 msleep_interruptible(4 * 1000);
2662 static int e100_set_phys_id(struct net_device *netdev,
2663 enum ethtool_phys_id_state state)
2665 struct nic *nic = netdev_priv(netdev);
2672 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2677 case ETHTOOL_ID_ACTIVE:
2681 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2682 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2685 case ETHTOOL_ID_OFF:
2686 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2689 case ETHTOOL_ID_INACTIVE:
2693 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
2697 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2698 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2699 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2700 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2701 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2702 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2703 "tx_heartbeat_errors", "tx_window_errors",
2704 /* device-specific stats */
2705 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2706 "tx_flow_control_pause", "rx_flow_control_pause",
2707 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2708 "rx_short_frame_errors", "rx_over_length_errors",
2710 #define E100_NET_STATS_LEN 21
2711 #define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
2713 static int e100_get_sset_count(struct net_device *netdev, int sset)
2717 return E100_TEST_LEN;
2719 return E100_STATS_LEN;
2725 static void e100_get_ethtool_stats(struct net_device *netdev,
2726 struct ethtool_stats *stats, u64 *data)
2728 struct nic *nic = netdev_priv(netdev);
2731 for (i = 0; i < E100_NET_STATS_LEN; i++)
2732 data[i] = ((unsigned long *)&netdev->stats)[i];
2734 data[i++] = nic->tx_deferred;
2735 data[i++] = nic->tx_single_collisions;
2736 data[i++] = nic->tx_multiple_collisions;
2737 data[i++] = nic->tx_fc_pause;
2738 data[i++] = nic->rx_fc_pause;
2739 data[i++] = nic->rx_fc_unsupported;
2740 data[i++] = nic->tx_tco_frames;
2741 data[i++] = nic->rx_tco_frames;
2742 data[i++] = nic->rx_short_frame_errors;
2743 data[i++] = nic->rx_over_length_errors;
2746 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2748 switch (stringset) {
2750 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2753 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2758 static const struct ethtool_ops e100_ethtool_ops = {
2759 .get_settings = e100_get_settings,
2760 .set_settings = e100_set_settings,
2761 .get_drvinfo = e100_get_drvinfo,
2762 .get_regs_len = e100_get_regs_len,
2763 .get_regs = e100_get_regs,
2764 .get_wol = e100_get_wol,
2765 .set_wol = e100_set_wol,
2766 .get_msglevel = e100_get_msglevel,
2767 .set_msglevel = e100_set_msglevel,
2768 .nway_reset = e100_nway_reset,
2769 .get_link = e100_get_link,
2770 .get_eeprom_len = e100_get_eeprom_len,
2771 .get_eeprom = e100_get_eeprom,
2772 .set_eeprom = e100_set_eeprom,
2773 .get_ringparam = e100_get_ringparam,
2774 .set_ringparam = e100_set_ringparam,
2775 .self_test = e100_diag_test,
2776 .get_strings = e100_get_strings,
2777 .set_phys_id = e100_set_phys_id,
2778 .get_ethtool_stats = e100_get_ethtool_stats,
2779 .get_sset_count = e100_get_sset_count,
2780 .get_ts_info = ethtool_op_get_ts_info,
2783 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2785 struct nic *nic = netdev_priv(netdev);
2787 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2790 static int e100_alloc(struct nic *nic)
2792 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2794 return nic->mem ? 0 : -ENOMEM;
2797 static void e100_free(struct nic *nic)
2800 pci_free_consistent(nic->pdev, sizeof(struct mem),
2801 nic->mem, nic->dma_addr);
2806 static int e100_open(struct net_device *netdev)
2808 struct nic *nic = netdev_priv(netdev);
2811 netif_carrier_off(netdev);
2812 if ((err = e100_up(nic)))
2813 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2817 static int e100_close(struct net_device *netdev)
2819 e100_down(netdev_priv(netdev));
2823 static int e100_set_features(struct net_device *netdev,
2824 netdev_features_t features)
2826 struct nic *nic = netdev_priv(netdev);
2827 netdev_features_t changed = features ^ netdev->features;
2829 if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
2832 netdev->features = features;
2833 e100_exec_cb(nic, NULL, e100_configure);
2837 static const struct net_device_ops e100_netdev_ops = {
2838 .ndo_open = e100_open,
2839 .ndo_stop = e100_close,
2840 .ndo_start_xmit = e100_xmit_frame,
2841 .ndo_validate_addr = eth_validate_addr,
2842 .ndo_set_rx_mode = e100_set_multicast_list,
2843 .ndo_set_mac_address = e100_set_mac_address,
2844 .ndo_change_mtu = e100_change_mtu,
2845 .ndo_do_ioctl = e100_do_ioctl,
2846 .ndo_tx_timeout = e100_tx_timeout,
2847 #ifdef CONFIG_NET_POLL_CONTROLLER
2848 .ndo_poll_controller = e100_netpoll,
2850 .ndo_set_features = e100_set_features,
2853 static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2855 struct net_device *netdev;
2859 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
2862 netdev->hw_features |= NETIF_F_RXFCS;
2863 netdev->priv_flags |= IFF_SUPP_NOFCS;
2864 netdev->hw_features |= NETIF_F_RXALL;
2866 netdev->netdev_ops = &e100_netdev_ops;
2867 netdev->ethtool_ops = &e100_ethtool_ops;
2868 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2869 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2871 nic = netdev_priv(netdev);
2872 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2873 nic->netdev = netdev;
2875 nic->msg_enable = (1 << debug) - 1;
2876 nic->mdio_ctrl = mdio_ctrl_hw;
2877 pci_set_drvdata(pdev, netdev);
2879 if ((err = pci_enable_device(pdev))) {
2880 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2881 goto err_out_free_dev;
2884 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2885 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2887 goto err_out_disable_pdev;
2890 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2891 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2892 goto err_out_disable_pdev;
2895 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2896 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2897 goto err_out_free_res;
2900 SET_NETDEV_DEV(netdev, &pdev->dev);
2903 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2905 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2907 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2909 goto err_out_free_res;
2912 if (ent->driver_data)
2917 e100_get_defaults(nic);
2919 /* D100 MAC doesn't allow rx of vlan packets with normal MTU */
2920 if (nic->mac < mac_82558_D101_A4)
2921 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2923 /* locks must be initialized before calling hw_reset */
2924 spin_lock_init(&nic->cb_lock);
2925 spin_lock_init(&nic->cmd_lock);
2926 spin_lock_init(&nic->mdio_lock);
2928 /* Reset the device before pci_set_master() in case device is in some
2929 * funky state and has an interrupt pending - hint: we don't have the
2930 * interrupt handler registered yet. */
2933 pci_set_master(pdev);
2935 setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic);
2937 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2939 if ((err = e100_alloc(nic))) {
2940 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2941 goto err_out_iounmap;
2944 if ((err = e100_eeprom_load(nic)))
2949 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2950 if (!is_valid_ether_addr(netdev->dev_addr)) {
2951 if (!eeprom_bad_csum_allow) {
2952 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2956 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2960 /* Wol magic packet can be enabled from eeprom */
2961 if ((nic->mac >= mac_82558_D101_A4) &&
2962 (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) {
2963 nic->flags |= wol_magic;
2964 device_set_wakeup_enable(&pdev->dev, true);
2967 /* ack any pending wake events, disable PME */
2968 pci_pme_active(pdev, false);
2970 strcpy(netdev->name, "eth%d");
2971 if ((err = register_netdev(netdev))) {
2972 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2975 nic->cbs_pool = pci_pool_create(netdev->name,
2977 nic->params.cbs.max * sizeof(struct cb),
2980 if (!nic->cbs_pool) {
2981 netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
2985 netif_info(nic, probe, nic->netdev,
2986 "addr 0x%llx, irq %d, MAC addr %pM\n",
2987 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2988 pdev->irq, netdev->dev_addr);
2993 unregister_netdev(netdev);
2997 pci_iounmap(pdev, nic->csr);
2999 pci_release_regions(pdev);
3000 err_out_disable_pdev:
3001 pci_disable_device(pdev);
3003 free_netdev(netdev);
3007 static void e100_remove(struct pci_dev *pdev)
3009 struct net_device *netdev = pci_get_drvdata(pdev);
3012 struct nic *nic = netdev_priv(netdev);
3013 unregister_netdev(netdev);
3015 pci_iounmap(pdev, nic->csr);
3016 pci_pool_destroy(nic->cbs_pool);
3017 free_netdev(netdev);
3018 pci_release_regions(pdev);
3019 pci_disable_device(pdev);
3023 #define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
3024 #define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
3025 #define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
3026 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
3028 struct net_device *netdev = pci_get_drvdata(pdev);
3029 struct nic *nic = netdev_priv(netdev);
3031 if (netif_running(netdev))
3033 netif_device_detach(netdev);
3035 pci_save_state(pdev);
3037 if ((nic->flags & wol_magic) | e100_asf(nic)) {
3038 /* enable reverse auto-negotiation */
3039 if (nic->phy == phy_82552_v) {
3040 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3041 E100_82552_SMARTSPEED);
3043 mdio_write(netdev, nic->mii.phy_id,
3044 E100_82552_SMARTSPEED, smartspeed |
3045 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
3047 *enable_wake = true;
3049 *enable_wake = false;
3052 pci_clear_master(pdev);
3055 static int __e100_power_off(struct pci_dev *pdev, bool wake)
3058 return pci_prepare_to_sleep(pdev);
3060 pci_wake_from_d3(pdev, false);
3061 pci_set_power_state(pdev, PCI_D3hot);
3067 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
3070 __e100_shutdown(pdev, &wake);
3071 return __e100_power_off(pdev, wake);
3074 static int e100_resume(struct pci_dev *pdev)
3076 struct net_device *netdev = pci_get_drvdata(pdev);
3077 struct nic *nic = netdev_priv(netdev);
3079 pci_set_power_state(pdev, PCI_D0);
3080 pci_restore_state(pdev);
3081 /* ack any pending wake events, disable PME */
3082 pci_enable_wake(pdev, PCI_D0, 0);
3084 /* disable reverse auto-negotiation */
3085 if (nic->phy == phy_82552_v) {
3086 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3087 E100_82552_SMARTSPEED);
3089 mdio_write(netdev, nic->mii.phy_id,
3090 E100_82552_SMARTSPEED,
3091 smartspeed & ~(E100_82552_REV_ANEG));
3094 netif_device_attach(netdev);
3095 if (netif_running(netdev))
3100 #endif /* CONFIG_PM */
3102 static void e100_shutdown(struct pci_dev *pdev)
3105 __e100_shutdown(pdev, &wake);
3106 if (system_state == SYSTEM_POWER_OFF)
3107 __e100_power_off(pdev, wake);
3110 /* ------------------ PCI Error Recovery infrastructure -------------- */
3112 * e100_io_error_detected - called when PCI error is detected.
3113 * @pdev: Pointer to PCI device
3114 * @state: The current pci connection state
3116 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3118 struct net_device *netdev = pci_get_drvdata(pdev);
3119 struct nic *nic = netdev_priv(netdev);
3121 netif_device_detach(netdev);
3123 if (state == pci_channel_io_perm_failure)
3124 return PCI_ERS_RESULT_DISCONNECT;
3126 if (netif_running(netdev))
3128 pci_disable_device(pdev);
3130 /* Request a slot reset. */
3131 return PCI_ERS_RESULT_NEED_RESET;
3135 * e100_io_slot_reset - called after the pci bus has been reset.
3136 * @pdev: Pointer to PCI device
3138 * Restart the card from scratch.
3140 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3142 struct net_device *netdev = pci_get_drvdata(pdev);
3143 struct nic *nic = netdev_priv(netdev);
3145 if (pci_enable_device(pdev)) {
3146 pr_err("Cannot re-enable PCI device after reset\n");
3147 return PCI_ERS_RESULT_DISCONNECT;
3149 pci_set_master(pdev);
3151 /* Only one device per card can do a reset */
3152 if (0 != PCI_FUNC(pdev->devfn))
3153 return PCI_ERS_RESULT_RECOVERED;
3157 return PCI_ERS_RESULT_RECOVERED;
3161 * e100_io_resume - resume normal operations
3162 * @pdev: Pointer to PCI device
3164 * Resume normal operations after an error recovery
3165 * sequence has been completed.
3167 static void e100_io_resume(struct pci_dev *pdev)
3169 struct net_device *netdev = pci_get_drvdata(pdev);
3170 struct nic *nic = netdev_priv(netdev);
3172 /* ack any pending wake events, disable PME */
3173 pci_enable_wake(pdev, PCI_D0, 0);
3175 netif_device_attach(netdev);
3176 if (netif_running(netdev)) {
3178 mod_timer(&nic->watchdog, jiffies);
3182 static const struct pci_error_handlers e100_err_handler = {
3183 .error_detected = e100_io_error_detected,
3184 .slot_reset = e100_io_slot_reset,
3185 .resume = e100_io_resume,
3188 static struct pci_driver e100_driver = {
3190 .id_table = e100_id_table,
3191 .probe = e100_probe,
3192 .remove = e100_remove,
3194 /* Power Management hooks */
3195 .suspend = e100_suspend,
3196 .resume = e100_resume,
3198 .shutdown = e100_shutdown,
3199 .err_handler = &e100_err_handler,
3202 static int __init e100_init_module(void)
3204 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3205 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3206 pr_info("%s\n", DRV_COPYRIGHT);
3208 return pci_register_driver(&e100_driver);
3211 static void __exit e100_cleanup_module(void)
3213 pci_unregister_driver(&e100_driver);
3216 module_init(e100_init_module);
3217 module_exit(e100_cleanup_module);