1 /* winbond-840.c: A Linux PCI network adapter device driver. */
3 Written 1998-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/drivers.html
20 Do not remove the copyright information.
21 Do not change the version information unless an improvement has been made.
22 Merely removing my name, as Compex has done in the past, does not count
28 * spin lock update, memory barriers, new style dma mappings
29 limit each tx buffer to < 1024 bytes
30 remove DescIntr from Rx descriptors (that's an Tx flag)
31 remove next pointer from Tx descriptors
32 synchronize tx_q_bytes
33 software reset in tx_timeout
34 Copyright (C) 2000 Manfred Spraul
37 support for big endian descriptors
38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik)
43 * enable pci_power_off
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 #define DRV_NAME "winbond-840"
50 #define DRV_VERSION "1.01-e"
51 #define DRV_RELDATE "Sep-11-2006"
54 /* Automatically extracted configuration info:
55 probe-func: winbond840_probe
56 config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
58 c-help-name: Winbond W89c840 PCI Ethernet support
59 c-help-symbol: CONFIG_WINBOND_840
60 c-help: This driver is for the Winbond W89c840 chip. It also works with
61 c-help: the TX9882 chip on the Compex RL100-ATX board.
62 c-help: More specific information and updates are available from
63 c-help: http://www.scyld.com/network/drivers.html
66 /* The user-configurable values.
67 These may be modified when a driver module is loaded.*/
69 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
70 static int max_interrupt_work = 20;
71 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
72 The '840 uses a 64 element hash table based on the Ethernet CRC. */
73 static int multicast_filter_limit = 32;
75 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
76 Setting to > 1518 effectively disables this feature. */
77 static int rx_copybreak;
79 /* Used to pass the media type, etc.
80 Both 'options[]' and 'full_duplex[]' should exist for driver
82 The media type is usually passed in 'options[]'.
84 #define MAX_UNITS 8 /* More are supported, limit only on options */
85 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
88 /* Operational parameters that are set at compile time. */
90 /* Keep the ring sizes a power of two for compile efficiency.
91 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
92 Making the Tx ring too large decreases the effectiveness of channel
93 bonding and packet priority.
94 There are no ill effects from too-large receive rings. */
95 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
96 #define TX_QUEUE_LEN_RESTART 5
98 #define TX_BUFLIMIT (1024-128)
100 /* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
101 To avoid overflowing we don't queue again until we have room for a
104 #define TX_FIFO_SIZE (2048)
105 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
108 /* Operational parameters that usually are not changed. */
109 /* Time in jiffies before concluding the transmitter is hung. */
110 #define TX_TIMEOUT (2*HZ)
112 /* Include files, designed to support most kernel versions 2.0.0 and later. */
113 #include <linux/module.h>
114 #include <linux/kernel.h>
115 #include <linux/string.h>
116 #include <linux/timer.h>
117 #include <linux/errno.h>
118 #include <linux/ioport.h>
119 #include <linux/interrupt.h>
120 #include <linux/pci.h>
121 #include <linux/dma-mapping.h>
122 #include <linux/netdevice.h>
123 #include <linux/etherdevice.h>
124 #include <linux/skbuff.h>
125 #include <linux/init.h>
126 #include <linux/delay.h>
127 #include <linux/ethtool.h>
128 #include <linux/mii.h>
129 #include <linux/rtnetlink.h>
130 #include <linux/crc32.h>
131 #include <linux/bitops.h>
132 #include <linux/uaccess.h>
133 #include <asm/processor.h> /* Processor type for cache alignment. */
139 #undef PKT_BUF_SZ /* tulip.h also defines this */
140 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
142 /* These identify the driver base version and may not be removed. */
143 static const char version[] __initconst =
144 "v" DRV_VERSION " (2.4 port) "
145 DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
146 " http://www.scyld.com/network/drivers.html\n";
148 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
149 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
150 MODULE_LICENSE("GPL");
151 MODULE_VERSION(DRV_VERSION);
153 module_param(max_interrupt_work, int, 0);
154 module_param(debug, int, 0);
155 module_param(rx_copybreak, int, 0);
156 module_param(multicast_filter_limit, int, 0);
157 module_param_array(options, int, NULL, 0);
158 module_param_array(full_duplex, int, NULL, 0);
159 MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
160 MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
161 MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
162 MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
163 MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
164 MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
169 I. Board Compatibility
171 This driver is for the Winbond w89c840 chip.
173 II. Board-specific settings
177 III. Driver operation
179 This chip is very similar to the Digital 21*4* "Tulip" family. The first
180 twelve registers and the descriptor format are nearly identical. Read a
181 Tulip manual for operational details.
183 A significant difference is that the multicast filter and station address are
184 stored in registers rather than loaded through a pseudo-transmit packet.
186 Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
187 full-sized packet we must use both data buffers in a descriptor. Thus the
188 driver uses ring mode where descriptors are implicitly sequential in memory,
189 rather than using the second descriptor address as a chain pointer to
190 subsequent descriptors.
194 If you are going to almost clone a Tulip, why not go all the way and avoid
195 the need for a new driver?
199 http://www.scyld.com/expert/100mbps.html
200 http://www.scyld.com/expert/NWay.html
201 http://www.winbond.com.tw/
205 A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
206 correctly detect a full FIFO, and queuing more than 2048 bytes may result in
207 silent data corruption.
209 Test with 'ping -s 10000' on a fast computer.
218 enum chip_capability_flags {
219 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
222 static const struct pci_device_id w840_pci_tbl[] = {
223 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
224 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
225 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
228 MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
231 netdev_res_size = 128, /* size of PCI BAR resource */
236 int drv_flags; /* Driver use, intended as capability flags. */
239 static const struct pci_id_info pci_id_tbl[] = {
240 { /* Sometime a Level-One switch card. */
241 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
242 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
243 { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
244 { } /* terminate list. */
247 /* This driver was written to use PCI memory space, however some x86 systems
248 work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
251 /* Offsets to the Command and Status Registers, "CSRs".
252 While similar to the Tulip, these registers are longword aligned.
253 Note: It's not useful to define symbolic names for every register bit in
254 the device. The name can only partially document the semantics and make
255 the driver longer and more difficult to read.
258 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
259 RxRingPtr=0x0C, TxRingPtr=0x10,
260 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
261 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
262 CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
263 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
264 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
267 /* Bits in the NetworkConfig register. */
270 RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
271 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
275 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
276 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
279 /* The Tulip Rx and Tx buffer descriptors. */
280 struct w840_rx_desc {
287 struct w840_tx_desc {
290 u32 buffer1, buffer2;
293 #define MII_CNT 1 /* winbond only supports one MII */
294 struct netdev_private {
295 struct w840_rx_desc *rx_ring;
296 dma_addr_t rx_addr[RX_RING_SIZE];
297 struct w840_tx_desc *tx_ring;
298 dma_addr_t tx_addr[TX_RING_SIZE];
299 dma_addr_t ring_dma_addr;
300 /* The addresses of receive-in-place skbuffs. */
301 struct sk_buff* rx_skbuff[RX_RING_SIZE];
302 /* The saved address of a sent-in-place packet/buffer, for later free(). */
303 struct sk_buff* tx_skbuff[TX_RING_SIZE];
304 struct net_device_stats stats;
305 struct timer_list timer; /* Media monitoring timer. */
306 /* Frequently used values: keep some adjacent for cache effect. */
308 int chip_id, drv_flags;
309 struct pci_dev *pci_dev;
311 struct w840_rx_desc *rx_head_desc;
312 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
313 unsigned int rx_buf_sz; /* Based on MTU+slack. */
314 unsigned int cur_tx, dirty_tx;
315 unsigned int tx_q_bytes;
316 unsigned int tx_full; /* The Tx queue is full. */
317 /* MII transceiver section. */
318 int mii_cnt; /* MII device addresses. */
319 unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
321 struct mii_if_info mii_if;
322 void __iomem *base_addr;
325 static int eeprom_read(void __iomem *ioaddr, int location);
326 static int mdio_read(struct net_device *dev, int phy_id, int location);
327 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
328 static int netdev_open(struct net_device *dev);
329 static int update_link(struct net_device *dev);
330 static void netdev_timer(struct timer_list *t);
331 static void init_rxtx_rings(struct net_device *dev);
332 static void free_rxtx_rings(struct netdev_private *np);
333 static void init_registers(struct net_device *dev);
334 static void tx_timeout(struct net_device *dev);
335 static int alloc_ringdesc(struct net_device *dev);
336 static void free_ringdesc(struct netdev_private *np);
337 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
338 static irqreturn_t intr_handler(int irq, void *dev_instance);
339 static void netdev_error(struct net_device *dev, int intr_status);
340 static int netdev_rx(struct net_device *dev);
341 static u32 __set_rx_mode(struct net_device *dev);
342 static void set_rx_mode(struct net_device *dev);
343 static struct net_device_stats *get_stats(struct net_device *dev);
344 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
345 static const struct ethtool_ops netdev_ethtool_ops;
346 static int netdev_close(struct net_device *dev);
348 static const struct net_device_ops netdev_ops = {
349 .ndo_open = netdev_open,
350 .ndo_stop = netdev_close,
351 .ndo_start_xmit = start_tx,
352 .ndo_get_stats = get_stats,
353 .ndo_set_rx_mode = set_rx_mode,
354 .ndo_do_ioctl = netdev_ioctl,
355 .ndo_tx_timeout = tx_timeout,
356 .ndo_set_mac_address = eth_mac_addr,
357 .ndo_validate_addr = eth_validate_addr,
360 static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
362 struct net_device *dev;
363 struct netdev_private *np;
365 int chip_idx = ent->driver_data;
367 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
368 void __iomem *ioaddr;
370 i = pcim_enable_device(pdev);
373 pci_set_master(pdev);
377 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
378 pr_warn("Device %s disabled due to DMA limitations\n",
382 dev = alloc_etherdev(sizeof(*np));
385 SET_NETDEV_DEV(dev, &pdev->dev);
387 if (pci_request_regions(pdev, DRV_NAME))
390 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
394 for (i = 0; i < 3; i++)
395 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
397 /* Reset the chip to erase previous misconfiguration.
398 No hold time required! */
399 iowrite32(0x00000001, ioaddr + PCIBusCfg);
401 np = netdev_priv(dev);
403 np->chip_id = chip_idx;
404 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
405 spin_lock_init(&np->lock);
406 np->mii_if.dev = dev;
407 np->mii_if.mdio_read = mdio_read;
408 np->mii_if.mdio_write = mdio_write;
409 np->base_addr = ioaddr;
411 pci_set_drvdata(pdev, dev);
414 option = dev->mem_start;
416 /* The lower four bits are the media type. */
419 np->mii_if.full_duplex = 1;
422 "ignoring user supplied media type %d",
425 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
426 np->mii_if.full_duplex = 1;
428 if (np->mii_if.full_duplex)
429 np->mii_if.force_media = 1;
431 /* The chip-specific entries in the device structure. */
432 dev->netdev_ops = &netdev_ops;
433 dev->ethtool_ops = &netdev_ethtool_ops;
434 dev->watchdog_timeo = TX_TIMEOUT;
436 i = register_netdev(dev);
438 goto err_out_cleardev;
440 dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
441 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
443 if (np->drv_flags & CanHaveMII) {
444 int phy, phy_idx = 0;
445 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
446 int mii_status = mdio_read(dev, phy, MII_BMSR);
447 if (mii_status != 0xffff && mii_status != 0x0000) {
448 np->phys[phy_idx++] = phy;
449 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
450 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
451 mdio_read(dev, phy, MII_PHYSID2);
453 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
454 np->mii, phy, mii_status,
455 np->mii_if.advertising);
458 np->mii_cnt = phy_idx;
459 np->mii_if.phy_id = np->phys[0];
462 "MII PHY not found -- this device may not operate correctly\n");
470 pci_iounmap(pdev, ioaddr);
477 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
478 often serial bit streams generated by the host processor.
479 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
481 /* Delay between EEPROM clock transitions.
482 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
483 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
484 made udelay() unreliable.
485 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
488 #define eeprom_delay(ee_addr) ioread32(ee_addr)
490 enum EEPROM_Ctrl_Bits {
491 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
492 EE_ChipSelect=0x801, EE_DataIn=0x08,
495 /* The EEPROM commands include the alway-set leading bit. */
497 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
500 static int eeprom_read(void __iomem *addr, int location)
504 void __iomem *ee_addr = addr + EECtrl;
505 int read_cmd = location | EE_ReadCmd;
506 iowrite32(EE_ChipSelect, ee_addr);
508 /* Shift the read command bits out. */
509 for (i = 10; i >= 0; i--) {
510 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
511 iowrite32(dataval, ee_addr);
512 eeprom_delay(ee_addr);
513 iowrite32(dataval | EE_ShiftClk, ee_addr);
514 eeprom_delay(ee_addr);
516 iowrite32(EE_ChipSelect, ee_addr);
517 eeprom_delay(ee_addr);
519 for (i = 16; i > 0; i--) {
520 iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
521 eeprom_delay(ee_addr);
522 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
523 iowrite32(EE_ChipSelect, ee_addr);
524 eeprom_delay(ee_addr);
527 /* Terminate the EEPROM access. */
528 iowrite32(0, ee_addr);
532 /* MII transceiver control section.
533 Read and write the MII registers using software-generated serial
534 MDIO protocol. See the MII specifications or DP83840A data sheet
537 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
538 met by back-to-back 33Mhz PCI cycles. */
539 #define mdio_delay(mdio_addr) ioread32(mdio_addr)
541 /* Set iff a MII transceiver on any interface requires mdio preamble.
542 This only set with older transceivers, so the extra
543 code size of a per-interface flag is not worthwhile. */
544 static char mii_preamble_required = 1;
546 #define MDIO_WRITE0 (MDIO_EnbOutput)
547 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
549 /* Generate the preamble required for initial synchronization and
550 a few older transceivers. */
551 static void mdio_sync(void __iomem *mdio_addr)
555 /* Establish sync by sending at least 32 logic ones. */
556 while (--bits >= 0) {
557 iowrite32(MDIO_WRITE1, mdio_addr);
558 mdio_delay(mdio_addr);
559 iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
560 mdio_delay(mdio_addr);
564 static int mdio_read(struct net_device *dev, int phy_id, int location)
566 struct netdev_private *np = netdev_priv(dev);
567 void __iomem *mdio_addr = np->base_addr + MIICtrl;
568 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
571 if (mii_preamble_required)
572 mdio_sync(mdio_addr);
574 /* Shift the read command bits out. */
575 for (i = 15; i >= 0; i--) {
576 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
578 iowrite32(dataval, mdio_addr);
579 mdio_delay(mdio_addr);
580 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
581 mdio_delay(mdio_addr);
583 /* Read the two transition, 16 data, and wire-idle bits. */
584 for (i = 20; i > 0; i--) {
585 iowrite32(MDIO_EnbIn, mdio_addr);
586 mdio_delay(mdio_addr);
587 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
588 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
589 mdio_delay(mdio_addr);
591 return (retval>>1) & 0xffff;
594 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
596 struct netdev_private *np = netdev_priv(dev);
597 void __iomem *mdio_addr = np->base_addr + MIICtrl;
598 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
601 if (location == 4 && phy_id == np->phys[0])
602 np->mii_if.advertising = value;
604 if (mii_preamble_required)
605 mdio_sync(mdio_addr);
607 /* Shift the command bits out. */
608 for (i = 31; i >= 0; i--) {
609 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
611 iowrite32(dataval, mdio_addr);
612 mdio_delay(mdio_addr);
613 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
614 mdio_delay(mdio_addr);
616 /* Clear out extra bits. */
617 for (i = 2; i > 0; i--) {
618 iowrite32(MDIO_EnbIn, mdio_addr);
619 mdio_delay(mdio_addr);
620 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
621 mdio_delay(mdio_addr);
626 static int netdev_open(struct net_device *dev)
628 struct netdev_private *np = netdev_priv(dev);
629 void __iomem *ioaddr = np->base_addr;
630 const int irq = np->pci_dev->irq;
633 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
635 netif_device_detach(dev);
636 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
641 netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
643 if((i=alloc_ringdesc(dev)))
646 spin_lock_irq(&np->lock);
647 netif_device_attach(dev);
649 spin_unlock_irq(&np->lock);
651 netif_start_queue(dev);
653 netdev_dbg(dev, "Done netdev_open()\n");
655 /* Set the timer to check for link beat. */
656 timer_setup(&np->timer, netdev_timer, 0);
657 np->timer.expires = jiffies + 1*HZ;
658 add_timer(&np->timer);
661 netif_device_attach(dev);
665 #define MII_DAVICOM_DM9101 0x0181b800
667 static int update_link(struct net_device *dev)
669 struct netdev_private *np = netdev_priv(dev);
670 int duplex, fasteth, result, mii_reg;
673 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
675 if (mii_reg == 0xffff)
677 /* reread: the link status bit is sticky */
678 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
679 if (!(mii_reg & 0x4)) {
680 if (netif_carrier_ok(dev)) {
683 "MII #%d reports no link. Disabling watchdog\n",
685 netif_carrier_off(dev);
689 if (!netif_carrier_ok(dev)) {
692 "MII #%d link is back. Enabling watchdog\n",
694 netif_carrier_on(dev);
697 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
698 /* If the link partner doesn't support autonegotiation
699 * the MII detects it's abilities with the "parallel detection".
700 * Some MIIs update the LPA register to the result of the parallel
701 * detection, some don't.
702 * The Davicom PHY [at least 0181b800] doesn't.
703 * Instead bit 9 and 13 of the BMCR are updated to the result
704 * of the negotiation..
706 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
707 duplex = mii_reg & BMCR_FULLDPLX;
708 fasteth = mii_reg & BMCR_SPEED100;
711 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
712 negotiated = mii_reg & np->mii_if.advertising;
714 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
715 fasteth = negotiated & 0x380;
717 duplex |= np->mii_if.force_media;
718 /* remove fastether and fullduplex */
719 result = np->csr6 & ~0x20000200;
723 result |= 0x20000000;
724 if (result != np->csr6 && debug)
726 "Setting %dMBit-%s-duplex based on MII#%d\n",
727 fasteth ? 100 : 10, duplex ? "full" : "half",
732 #define RXTX_TIMEOUT 2000
733 static inline void update_csr6(struct net_device *dev, int new)
735 struct netdev_private *np = netdev_priv(dev);
736 void __iomem *ioaddr = np->base_addr;
737 int limit = RXTX_TIMEOUT;
739 if (!netif_device_present(dev))
743 /* stop both Tx and Rx processes */
744 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
745 /* wait until they have really stopped */
747 int csr5 = ioread32(ioaddr + IntrStatus);
750 t = (csr5 >> 17) & 0x07;
753 t = (csr5 >> 20) & 0x07;
761 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
767 /* and restart them with the new configuration */
768 iowrite32(np->csr6, ioaddr + NetworkConfig);
770 np->mii_if.full_duplex = 1;
773 static void netdev_timer(struct timer_list *t)
775 struct netdev_private *np = from_timer(np, t, timer);
776 struct net_device *dev = pci_get_drvdata(np->pci_dev);
777 void __iomem *ioaddr = np->base_addr;
780 netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
781 ioread32(ioaddr + IntrStatus),
782 ioread32(ioaddr + NetworkConfig));
783 spin_lock_irq(&np->lock);
784 update_csr6(dev, update_link(dev));
785 spin_unlock_irq(&np->lock);
786 np->timer.expires = jiffies + 10*HZ;
787 add_timer(&np->timer);
790 static void init_rxtx_rings(struct net_device *dev)
792 struct netdev_private *np = netdev_priv(dev);
795 np->rx_head_desc = &np->rx_ring[0];
796 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
798 /* Initial all Rx descriptors. */
799 for (i = 0; i < RX_RING_SIZE; i++) {
800 np->rx_ring[i].length = np->rx_buf_sz;
801 np->rx_ring[i].status = 0;
802 np->rx_skbuff[i] = NULL;
804 /* Mark the last entry as wrapping the ring. */
805 np->rx_ring[i-1].length |= DescEndRing;
807 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
808 for (i = 0; i < RX_RING_SIZE; i++) {
809 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
810 np->rx_skbuff[i] = skb;
813 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
814 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
816 np->rx_ring[i].buffer1 = np->rx_addr[i];
817 np->rx_ring[i].status = DescOwned;
821 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
823 /* Initialize the Tx descriptors */
824 for (i = 0; i < TX_RING_SIZE; i++) {
825 np->tx_skbuff[i] = NULL;
826 np->tx_ring[i].status = 0;
829 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
831 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
832 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
833 np->base_addr + TxRingPtr);
837 static void free_rxtx_rings(struct netdev_private* np)
840 /* Free all the skbuffs in the Rx queue. */
841 for (i = 0; i < RX_RING_SIZE; i++) {
842 np->rx_ring[i].status = 0;
843 if (np->rx_skbuff[i]) {
844 pci_unmap_single(np->pci_dev,
846 np->rx_skbuff[i]->len,
848 dev_kfree_skb(np->rx_skbuff[i]);
850 np->rx_skbuff[i] = NULL;
852 for (i = 0; i < TX_RING_SIZE; i++) {
853 if (np->tx_skbuff[i]) {
854 pci_unmap_single(np->pci_dev,
856 np->tx_skbuff[i]->len,
858 dev_kfree_skb(np->tx_skbuff[i]);
860 np->tx_skbuff[i] = NULL;
864 static void init_registers(struct net_device *dev)
866 struct netdev_private *np = netdev_priv(dev);
867 void __iomem *ioaddr = np->base_addr;
870 for (i = 0; i < 6; i++)
871 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
873 /* Initialize other registers. */
875 i = (1<<20); /* Big-endian descriptors */
879 i |= (0x04<<2); /* skip length 4 u32 */
880 i |= 0x02; /* give Rx priority */
882 /* Configure the PCI bus bursts and FIFO thresholds.
883 486: Set 8 longword cache alignment, 8 longword burst.
884 586: Set 16 longword cache alignment, no burst limit.
885 Cache alignment bits 15:14 Burst length 13:8
886 0000 <not allowed> 0000 align to cache 0800 8 longwords
887 4000 8 longwords 0100 1 longword 1000 16 longwords
888 8000 16 longwords 0200 2 longwords 2000 32 longwords
889 C000 32 longwords 0400 4 longwords */
891 #if defined (__i386__) && !defined(MODULE)
892 /* When not a module we can work around broken '486 PCI boards. */
893 if (boot_cpu_data.x86 <= 4) {
896 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
900 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
902 #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
905 dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
908 iowrite32(i, ioaddr + PCIBusCfg);
911 /* 128 byte Tx threshold;
912 Transmit on; Receive on; */
913 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
915 /* Clear and Enable interrupts by setting the interrupt mask. */
916 iowrite32(0x1A0F5, ioaddr + IntrStatus);
917 iowrite32(0x1A0F5, ioaddr + IntrEnable);
919 iowrite32(0, ioaddr + RxStartDemand);
922 static void tx_timeout(struct net_device *dev)
924 struct netdev_private *np = netdev_priv(dev);
925 void __iomem *ioaddr = np->base_addr;
926 const int irq = np->pci_dev->irq;
928 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
929 ioread32(ioaddr + IntrStatus));
933 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
934 for (i = 0; i < RX_RING_SIZE; i++)
935 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
936 printk(KERN_CONT "\n");
937 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
938 for (i = 0; i < TX_RING_SIZE; i++)
939 printk(KERN_CONT " %08x", np->tx_ring[i].status);
940 printk(KERN_CONT "\n");
942 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
943 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
944 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
947 spin_lock_irq(&np->lock);
949 * Under high load dirty_tx and the internal tx descriptor pointer
950 * come out of sync, thus perform a software reset and reinitialize
954 iowrite32(1, np->base_addr+PCIBusCfg);
958 init_rxtx_rings(dev);
960 spin_unlock_irq(&np->lock);
963 netif_wake_queue(dev);
964 netif_trans_update(dev); /* prevent tx timeout */
965 np->stats.tx_errors++;
968 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
969 static int alloc_ringdesc(struct net_device *dev)
971 struct netdev_private *np = netdev_priv(dev);
973 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
975 np->rx_ring = pci_alloc_consistent(np->pci_dev,
976 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
977 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
981 init_rxtx_rings(dev);
985 static void free_ringdesc(struct netdev_private *np)
987 pci_free_consistent(np->pci_dev,
988 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
989 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
990 np->rx_ring, np->ring_dma_addr);
994 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
996 struct netdev_private *np = netdev_priv(dev);
999 /* Caution: the write order is important here, set the field
1000 with the "ownership" bits last. */
1002 /* Calculate the next Tx descriptor entry. */
1003 entry = np->cur_tx % TX_RING_SIZE;
1005 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1006 skb->data,skb->len, PCI_DMA_TODEVICE);
1007 np->tx_skbuff[entry] = skb;
1009 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1010 if (skb->len < TX_BUFLIMIT) {
1011 np->tx_ring[entry].length = DescWholePkt | skb->len;
1013 int len = skb->len - TX_BUFLIMIT;
1015 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1016 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1018 if(entry == TX_RING_SIZE-1)
1019 np->tx_ring[entry].length |= DescEndRing;
1021 /* Now acquire the irq spinlock.
1022 * The difficult race is the ordering between
1023 * increasing np->cur_tx and setting DescOwned:
1024 * - if np->cur_tx is increased first the interrupt
1025 * handler could consider the packet as transmitted
1026 * since DescOwned is cleared.
1027 * - If DescOwned is set first the NIC could report the
1028 * packet as sent, but the interrupt handler would ignore it
1029 * since the np->cur_tx was not yet increased.
1031 spin_lock_irq(&np->lock);
1034 wmb(); /* flush length, buffer1, buffer2 */
1035 np->tx_ring[entry].status = DescOwned;
1036 wmb(); /* flush status and kick the hardware */
1037 iowrite32(0, np->base_addr + TxStartDemand);
1038 np->tx_q_bytes += skb->len;
1039 /* Work around horrible bug in the chip by marking the queue as full
1040 when we do not have FIFO room for a maximum sized packet. */
1041 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1042 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1043 netif_stop_queue(dev);
1047 spin_unlock_irq(&np->lock);
1050 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1053 return NETDEV_TX_OK;
1056 static void netdev_tx_done(struct net_device *dev)
1058 struct netdev_private *np = netdev_priv(dev);
1059 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1060 int entry = np->dirty_tx % TX_RING_SIZE;
1061 int tx_status = np->tx_ring[entry].status;
1065 if (tx_status & 0x8000) { /* There was an error, log it. */
1066 #ifndef final_version
1068 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1071 np->stats.tx_errors++;
1072 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1073 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1074 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1075 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1076 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1077 np->stats.tx_heartbeat_errors++;
1079 #ifndef final_version
1081 netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1084 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1085 np->stats.collisions += (tx_status >> 3) & 15;
1086 np->stats.tx_packets++;
1088 /* Free the original skb. */
1089 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1090 np->tx_skbuff[entry]->len,
1092 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1093 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1094 np->tx_skbuff[entry] = NULL;
1097 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1098 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1099 /* The ring is no longer full, clear tbusy. */
1102 netif_wake_queue(dev);
1106 /* The interrupt handler does all of the Rx thread work and cleans up
1107 after the Tx thread. */
1108 static irqreturn_t intr_handler(int irq, void *dev_instance)
1110 struct net_device *dev = (struct net_device *)dev_instance;
1111 struct netdev_private *np = netdev_priv(dev);
1112 void __iomem *ioaddr = np->base_addr;
1113 int work_limit = max_interrupt_work;
1116 if (!netif_device_present(dev))
1119 u32 intr_status = ioread32(ioaddr + IntrStatus);
1121 /* Acknowledge all of the current interrupt sources ASAP. */
1122 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1125 netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1127 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1132 if (intr_status & (RxIntr | RxNoBuf))
1134 if (intr_status & RxNoBuf)
1135 iowrite32(0, ioaddr + RxStartDemand);
1137 if (intr_status & (TxNoBuf | TxIntr) &&
1138 np->cur_tx != np->dirty_tx) {
1139 spin_lock(&np->lock);
1140 netdev_tx_done(dev);
1141 spin_unlock(&np->lock);
1144 /* Abnormal error summary/uncommon events handlers. */
1145 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1147 netdev_error(dev, intr_status);
1149 if (--work_limit < 0) {
1151 "Too much work at interrupt, status=0x%04x\n",
1153 /* Set the timer to re-enable the other interrupts after
1155 spin_lock(&np->lock);
1156 if (netif_device_present(dev)) {
1157 iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1158 iowrite32(10, ioaddr + GPTimer);
1160 spin_unlock(&np->lock);
1166 netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1167 ioread32(ioaddr + IntrStatus));
1168 return IRQ_RETVAL(handled);
1171 /* This routine is logically part of the interrupt handler, but separated
1172 for clarity and better register allocation. */
1173 static int netdev_rx(struct net_device *dev)
1175 struct netdev_private *np = netdev_priv(dev);
1176 int entry = np->cur_rx % RX_RING_SIZE;
1177 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1180 netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1181 entry, np->rx_ring[entry].status);
1184 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1185 while (--work_limit >= 0) {
1186 struct w840_rx_desc *desc = np->rx_head_desc;
1187 s32 status = desc->status;
1190 netdev_dbg(dev, " netdev_rx() status was %08x\n",
1194 if ((status & 0x38008300) != 0x0300) {
1195 if ((status & 0x38000300) != 0x0300) {
1196 /* Ingore earlier buffers. */
1197 if ((status & 0xffff) != 0x7fff) {
1199 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1200 np->cur_rx, status);
1201 np->stats.rx_length_errors++;
1203 } else if (status & 0x8000) {
1204 /* There was a fatal error. */
1206 netdev_dbg(dev, "Receive error, Rx status %08x\n",
1208 np->stats.rx_errors++; /* end of a packet.*/
1209 if (status & 0x0890) np->stats.rx_length_errors++;
1210 if (status & 0x004C) np->stats.rx_frame_errors++;
1211 if (status & 0x0002) np->stats.rx_crc_errors++;
1214 struct sk_buff *skb;
1215 /* Omit the four octet CRC from the length. */
1216 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1218 #ifndef final_version
1220 netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n",
1223 /* Check if the packet is long enough to accept without copying
1224 to a minimally-sized skbuff. */
1225 if (pkt_len < rx_copybreak &&
1226 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1227 skb_reserve(skb, 2); /* 16 byte align the IP header */
1228 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1229 np->rx_skbuff[entry]->len,
1230 PCI_DMA_FROMDEVICE);
1231 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1232 skb_put(skb, pkt_len);
1233 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1234 np->rx_skbuff[entry]->len,
1235 PCI_DMA_FROMDEVICE);
1237 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1238 np->rx_skbuff[entry]->len,
1239 PCI_DMA_FROMDEVICE);
1240 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1241 np->rx_skbuff[entry] = NULL;
1243 #ifndef final_version /* Remove after testing. */
1244 /* You will want this info for the initial debug. */
1246 netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n",
1247 &skb->data[0], &skb->data[6],
1248 skb->data[12], skb->data[13],
1251 skb->protocol = eth_type_trans(skb, dev);
1253 np->stats.rx_packets++;
1254 np->stats.rx_bytes += pkt_len;
1256 entry = (++np->cur_rx) % RX_RING_SIZE;
1257 np->rx_head_desc = &np->rx_ring[entry];
1260 /* Refill the Rx ring buffers. */
1261 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1262 struct sk_buff *skb;
1263 entry = np->dirty_rx % RX_RING_SIZE;
1264 if (np->rx_skbuff[entry] == NULL) {
1265 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1266 np->rx_skbuff[entry] = skb;
1268 break; /* Better luck next round. */
1269 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1271 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1272 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1275 np->rx_ring[entry].status = DescOwned;
1281 static void netdev_error(struct net_device *dev, int intr_status)
1283 struct netdev_private *np = netdev_priv(dev);
1284 void __iomem *ioaddr = np->base_addr;
1287 netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1288 if (intr_status == 0xffffffff)
1290 spin_lock(&np->lock);
1291 if (intr_status & TxFIFOUnderflow) {
1293 /* Bump up the Tx threshold */
1295 /* This causes lots of dropped packets,
1296 * and under high load even tx_timeouts
1298 new = np->csr6 + 0x4000;
1300 new = (np->csr6 >> 14)&0x7f;
1304 new = 127; /* load full packet before starting */
1305 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1307 netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1308 update_csr6(dev, new);
1310 if (intr_status & RxDied) { /* Missed a Rx frame. */
1311 np->stats.rx_errors++;
1313 if (intr_status & TimerInt) {
1314 /* Re-enable other interrupts. */
1315 if (netif_device_present(dev))
1316 iowrite32(0x1A0F5, ioaddr + IntrEnable);
1318 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1319 iowrite32(0, ioaddr + RxStartDemand);
1320 spin_unlock(&np->lock);
1323 static struct net_device_stats *get_stats(struct net_device *dev)
1325 struct netdev_private *np = netdev_priv(dev);
1326 void __iomem *ioaddr = np->base_addr;
1328 /* The chip only need report frame silently dropped. */
1329 spin_lock_irq(&np->lock);
1330 if (netif_running(dev) && netif_device_present(dev))
1331 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1332 spin_unlock_irq(&np->lock);
1338 static u32 __set_rx_mode(struct net_device *dev)
1340 struct netdev_private *np = netdev_priv(dev);
1341 void __iomem *ioaddr = np->base_addr;
1342 u32 mc_filter[2]; /* Multicast hash filter */
1345 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1346 memset(mc_filter, 0xff, sizeof(mc_filter));
1347 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1349 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1350 (dev->flags & IFF_ALLMULTI)) {
1351 /* Too many to match, or accept all multicasts. */
1352 memset(mc_filter, 0xff, sizeof(mc_filter));
1353 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1355 struct netdev_hw_addr *ha;
1357 memset(mc_filter, 0, sizeof(mc_filter));
1358 netdev_for_each_mc_addr(ha, dev) {
1361 filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1363 mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1365 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1367 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1368 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1372 static void set_rx_mode(struct net_device *dev)
1374 struct netdev_private *np = netdev_priv(dev);
1375 u32 rx_mode = __set_rx_mode(dev);
1376 spin_lock_irq(&np->lock);
1377 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1378 spin_unlock_irq(&np->lock);
1381 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1383 struct netdev_private *np = netdev_priv(dev);
1385 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1386 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1387 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1390 static int netdev_get_link_ksettings(struct net_device *dev,
1391 struct ethtool_link_ksettings *cmd)
1393 struct netdev_private *np = netdev_priv(dev);
1395 spin_lock_irq(&np->lock);
1396 mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1397 spin_unlock_irq(&np->lock);
1402 static int netdev_set_link_ksettings(struct net_device *dev,
1403 const struct ethtool_link_ksettings *cmd)
1405 struct netdev_private *np = netdev_priv(dev);
1408 spin_lock_irq(&np->lock);
1409 rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1410 spin_unlock_irq(&np->lock);
1415 static int netdev_nway_reset(struct net_device *dev)
1417 struct netdev_private *np = netdev_priv(dev);
1418 return mii_nway_restart(&np->mii_if);
1421 static u32 netdev_get_link(struct net_device *dev)
1423 struct netdev_private *np = netdev_priv(dev);
1424 return mii_link_ok(&np->mii_if);
1427 static u32 netdev_get_msglevel(struct net_device *dev)
1432 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1437 static const struct ethtool_ops netdev_ethtool_ops = {
1438 .get_drvinfo = netdev_get_drvinfo,
1439 .nway_reset = netdev_nway_reset,
1440 .get_link = netdev_get_link,
1441 .get_msglevel = netdev_get_msglevel,
1442 .set_msglevel = netdev_set_msglevel,
1443 .get_link_ksettings = netdev_get_link_ksettings,
1444 .set_link_ksettings = netdev_set_link_ksettings,
1447 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1449 struct mii_ioctl_data *data = if_mii(rq);
1450 struct netdev_private *np = netdev_priv(dev);
1453 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1454 data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1457 case SIOCGMIIREG: /* Read MII PHY register. */
1458 spin_lock_irq(&np->lock);
1459 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1460 spin_unlock_irq(&np->lock);
1463 case SIOCSMIIREG: /* Write MII PHY register. */
1464 spin_lock_irq(&np->lock);
1465 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1466 spin_unlock_irq(&np->lock);
1473 static int netdev_close(struct net_device *dev)
1475 struct netdev_private *np = netdev_priv(dev);
1476 void __iomem *ioaddr = np->base_addr;
1478 netif_stop_queue(dev);
1481 netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1482 ioread32(ioaddr + IntrStatus),
1483 ioread32(ioaddr + NetworkConfig));
1484 netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1485 np->cur_tx, np->dirty_tx,
1486 np->cur_rx, np->dirty_rx);
1489 /* Stop the chip's Tx and Rx processes. */
1490 spin_lock_irq(&np->lock);
1491 netif_device_detach(dev);
1492 update_csr6(dev, 0);
1493 iowrite32(0x0000, ioaddr + IntrEnable);
1494 spin_unlock_irq(&np->lock);
1496 free_irq(np->pci_dev->irq, dev);
1498 netif_device_attach(dev);
1500 if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1501 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1507 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
1508 for (i = 0; i < TX_RING_SIZE; i++)
1509 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1510 i, np->tx_ring[i].length,
1511 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1512 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1513 for (i = 0; i < RX_RING_SIZE; i++) {
1514 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1515 i, np->rx_ring[i].length,
1516 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1519 #endif /* __i386__ debugging only */
1521 del_timer_sync(&np->timer);
1523 free_rxtx_rings(np);
1529 static void w840_remove1(struct pci_dev *pdev)
1531 struct net_device *dev = pci_get_drvdata(pdev);
1534 struct netdev_private *np = netdev_priv(dev);
1535 unregister_netdev(dev);
1536 pci_iounmap(pdev, np->base_addr);
1544 * suspend/resume synchronization:
1545 * - open, close, do_ioctl:
1546 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1548 * spin_lock_irq(np->lock), doesn't touch hw if not present
1550 * synchronize_irq + netif_tx_disable;
1552 * netif_device_detach + netif_tx_disable;
1553 * - set_multicast_list
1554 * netif_device_detach + netif_tx_disable;
1555 * - interrupt handler
1556 * doesn't touch hw if not present, synchronize_irq waits for
1557 * running instances of the interrupt handler.
1559 * Disabling hw requires clearing csr6 & IntrEnable.
1560 * update_csr6 & all function that write IntrEnable check netif_device_present
1561 * before settings any bits.
1563 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1564 * device would cause an irq storm.
1566 static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1568 struct net_device *dev = pci_get_drvdata (pdev);
1569 struct netdev_private *np = netdev_priv(dev);
1570 void __iomem *ioaddr = np->base_addr;
1573 if (netif_running (dev)) {
1574 del_timer_sync(&np->timer);
1576 spin_lock_irq(&np->lock);
1577 netif_device_detach(dev);
1578 update_csr6(dev, 0);
1579 iowrite32(0, ioaddr + IntrEnable);
1580 spin_unlock_irq(&np->lock);
1582 synchronize_irq(np->pci_dev->irq);
1583 netif_tx_disable(dev);
1585 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1587 /* no more hardware accesses behind this line. */
1589 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1591 /* pci_power_off(pdev, -1); */
1593 free_rxtx_rings(np);
1595 netif_device_detach(dev);
1601 static int w840_resume (struct pci_dev *pdev)
1603 struct net_device *dev = pci_get_drvdata (pdev);
1604 struct netdev_private *np = netdev_priv(dev);
1608 if (netif_device_present(dev))
1609 goto out; /* device not suspended */
1610 if (netif_running(dev)) {
1611 if ((retval = pci_enable_device(pdev))) {
1613 "pci_enable_device failed in resume\n");
1616 spin_lock_irq(&np->lock);
1617 iowrite32(1, np->base_addr+PCIBusCfg);
1618 ioread32(np->base_addr+PCIBusCfg);
1620 netif_device_attach(dev);
1621 init_rxtx_rings(dev);
1622 init_registers(dev);
1623 spin_unlock_irq(&np->lock);
1625 netif_wake_queue(dev);
1627 mod_timer(&np->timer, jiffies + 1*HZ);
1629 netif_device_attach(dev);
1637 static struct pci_driver w840_driver = {
1639 .id_table = w840_pci_tbl,
1640 .probe = w840_probe1,
1641 .remove = w840_remove1,
1643 .suspend = w840_suspend,
1644 .resume = w840_resume,
1648 static int __init w840_init(void)
1651 return pci_register_driver(&w840_driver);
1654 static void __exit w840_exit(void)
1656 pci_unregister_driver(&w840_driver);
1659 module_init(w840_init);
1660 module_exit(w840_exit);