1 /* winbond-840.c: A Linux PCI network adapter device driver. */
3 Written 1998-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/drivers.html
20 Do not remove the copyright information.
21 Do not change the version information unless an improvement has been made.
22 Merely removing my name, as Compex has done in the past, does not count
28 * spin lock update, memory barriers, new style dma mappings
29 limit each tx buffer to < 1024 bytes
30 remove DescIntr from Rx descriptors (that's an Tx flag)
31 remove next pointer from Tx descriptors
32 synchronize tx_q_bytes
33 software reset in tx_timeout
34 Copyright (C) 2000 Manfred Spraul
37 support for big endian descriptors
38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik)
43 * enable pci_power_off
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 #define DRV_NAME "winbond-840"
50 #define DRV_VERSION "1.01-e"
51 #define DRV_RELDATE "Sep-11-2006"
54 /* Automatically extracted configuration info:
55 probe-func: winbond840_probe
56 config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
58 c-help-name: Winbond W89c840 PCI Ethernet support
59 c-help-symbol: CONFIG_WINBOND_840
60 c-help: This driver is for the Winbond W89c840 chip. It also works with
61 c-help: the TX9882 chip on the Compex RL100-ATX board.
62 c-help: More specific information and updates are available from
63 c-help: http://www.scyld.com/network/drivers.html
66 /* The user-configurable values.
67 These may be modified when a driver module is loaded.*/
69 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
70 static int max_interrupt_work = 20;
71 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
72 The '840 uses a 64 element hash table based on the Ethernet CRC. */
73 static int multicast_filter_limit = 32;
75 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
76 Setting to > 1518 effectively disables this feature. */
77 static int rx_copybreak;
79 /* Used to pass the media type, etc.
80 Both 'options[]' and 'full_duplex[]' should exist for driver
82 The media type is usually passed in 'options[]'.
84 #define MAX_UNITS 8 /* More are supported, limit only on options */
85 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
88 /* Operational parameters that are set at compile time. */
90 /* Keep the ring sizes a power of two for compile efficiency.
91 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
92 Making the Tx ring too large decreases the effectiveness of channel
93 bonding and packet priority.
94 There are no ill effects from too-large receive rings. */
95 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
96 #define TX_QUEUE_LEN_RESTART 5
98 #define TX_BUFLIMIT (1024-128)
100 /* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
101 To avoid overflowing we don't queue again until we have room for a
104 #define TX_FIFO_SIZE (2048)
105 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
108 /* Operational parameters that usually are not changed. */
109 /* Time in jiffies before concluding the transmitter is hung. */
110 #define TX_TIMEOUT (2*HZ)
112 /* Include files, designed to support most kernel versions 2.0.0 and later. */
113 #include <linux/module.h>
114 #include <linux/kernel.h>
115 #include <linux/string.h>
116 #include <linux/timer.h>
117 #include <linux/errno.h>
118 #include <linux/ioport.h>
119 #include <linux/interrupt.h>
120 #include <linux/pci.h>
121 #include <linux/dma-mapping.h>
122 #include <linux/netdevice.h>
123 #include <linux/etherdevice.h>
124 #include <linux/skbuff.h>
125 #include <linux/init.h>
126 #include <linux/delay.h>
127 #include <linux/ethtool.h>
128 #include <linux/mii.h>
129 #include <linux/rtnetlink.h>
130 #include <linux/crc32.h>
131 #include <linux/bitops.h>
132 #include <asm/uaccess.h>
133 #include <asm/processor.h> /* Processor type for cache alignment. */
139 #undef PKT_BUF_SZ /* tulip.h also defines this */
140 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
142 /* These identify the driver base version and may not be removed. */
143 static const char version[] __initconst =
144 "v" DRV_VERSION " (2.4 port) "
145 DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
146 " http://www.scyld.com/network/drivers.html\n";
148 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
149 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
150 MODULE_LICENSE("GPL");
151 MODULE_VERSION(DRV_VERSION);
153 module_param(max_interrupt_work, int, 0);
154 module_param(debug, int, 0);
155 module_param(rx_copybreak, int, 0);
156 module_param(multicast_filter_limit, int, 0);
157 module_param_array(options, int, NULL, 0);
158 module_param_array(full_duplex, int, NULL, 0);
159 MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
160 MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
161 MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
162 MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
163 MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
164 MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
169 I. Board Compatibility
171 This driver is for the Winbond w89c840 chip.
173 II. Board-specific settings
177 III. Driver operation
179 This chip is very similar to the Digital 21*4* "Tulip" family. The first
180 twelve registers and the descriptor format are nearly identical. Read a
181 Tulip manual for operational details.
183 A significant difference is that the multicast filter and station address are
184 stored in registers rather than loaded through a pseudo-transmit packet.
186 Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
187 full-sized packet we must use both data buffers in a descriptor. Thus the
188 driver uses ring mode where descriptors are implicitly sequential in memory,
189 rather than using the second descriptor address as a chain pointer to
190 subsequent descriptors.
194 If you are going to almost clone a Tulip, why not go all the way and avoid
195 the need for a new driver?
199 http://www.scyld.com/expert/100mbps.html
200 http://www.scyld.com/expert/NWay.html
201 http://www.winbond.com.tw/
205 A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
206 correctly detect a full FIFO, and queuing more than 2048 bytes may result in
207 silent data corruption.
209 Test with 'ping -s 10000' on a fast computer.
218 enum chip_capability_flags {
219 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
222 static const struct pci_device_id w840_pci_tbl[] = {
223 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
224 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
225 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
228 MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
231 netdev_res_size = 128, /* size of PCI BAR resource */
236 int drv_flags; /* Driver use, intended as capability flags. */
239 static const struct pci_id_info pci_id_tbl[] = {
240 { /* Sometime a Level-One switch card. */
241 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
242 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
243 { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
244 { } /* terminate list. */
247 /* This driver was written to use PCI memory space, however some x86 systems
248 work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
251 /* Offsets to the Command and Status Registers, "CSRs".
252 While similar to the Tulip, these registers are longword aligned.
253 Note: It's not useful to define symbolic names for every register bit in
254 the device. The name can only partially document the semantics and make
255 the driver longer and more difficult to read.
258 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
259 RxRingPtr=0x0C, TxRingPtr=0x10,
260 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
261 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
262 CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
263 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
264 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
267 /* Bits in the NetworkConfig register. */
270 RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
271 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
275 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
276 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
279 /* The Tulip Rx and Tx buffer descriptors. */
280 struct w840_rx_desc {
287 struct w840_tx_desc {
290 u32 buffer1, buffer2;
293 #define MII_CNT 1 /* winbond only supports one MII */
294 struct netdev_private {
295 struct w840_rx_desc *rx_ring;
296 dma_addr_t rx_addr[RX_RING_SIZE];
297 struct w840_tx_desc *tx_ring;
298 dma_addr_t tx_addr[TX_RING_SIZE];
299 dma_addr_t ring_dma_addr;
300 /* The addresses of receive-in-place skbuffs. */
301 struct sk_buff* rx_skbuff[RX_RING_SIZE];
302 /* The saved address of a sent-in-place packet/buffer, for later free(). */
303 struct sk_buff* tx_skbuff[TX_RING_SIZE];
304 struct net_device_stats stats;
305 struct timer_list timer; /* Media monitoring timer. */
306 /* Frequently used values: keep some adjacent for cache effect. */
308 int chip_id, drv_flags;
309 struct pci_dev *pci_dev;
311 struct w840_rx_desc *rx_head_desc;
312 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
313 unsigned int rx_buf_sz; /* Based on MTU+slack. */
314 unsigned int cur_tx, dirty_tx;
315 unsigned int tx_q_bytes;
316 unsigned int tx_full; /* The Tx queue is full. */
317 /* MII transceiver section. */
318 int mii_cnt; /* MII device addresses. */
319 unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
321 struct mii_if_info mii_if;
322 void __iomem *base_addr;
325 static int eeprom_read(void __iomem *ioaddr, int location);
326 static int mdio_read(struct net_device *dev, int phy_id, int location);
327 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
328 static int netdev_open(struct net_device *dev);
329 static int update_link(struct net_device *dev);
330 static void netdev_timer(unsigned long data);
331 static void init_rxtx_rings(struct net_device *dev);
332 static void free_rxtx_rings(struct netdev_private *np);
333 static void init_registers(struct net_device *dev);
334 static void tx_timeout(struct net_device *dev);
335 static int alloc_ringdesc(struct net_device *dev);
336 static void free_ringdesc(struct netdev_private *np);
337 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
338 static irqreturn_t intr_handler(int irq, void *dev_instance);
339 static void netdev_error(struct net_device *dev, int intr_status);
340 static int netdev_rx(struct net_device *dev);
341 static u32 __set_rx_mode(struct net_device *dev);
342 static void set_rx_mode(struct net_device *dev);
343 static struct net_device_stats *get_stats(struct net_device *dev);
344 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
345 static const struct ethtool_ops netdev_ethtool_ops;
346 static int netdev_close(struct net_device *dev);
348 static const struct net_device_ops netdev_ops = {
349 .ndo_open = netdev_open,
350 .ndo_stop = netdev_close,
351 .ndo_start_xmit = start_tx,
352 .ndo_get_stats = get_stats,
353 .ndo_set_rx_mode = set_rx_mode,
354 .ndo_do_ioctl = netdev_ioctl,
355 .ndo_tx_timeout = tx_timeout,
356 .ndo_change_mtu = eth_change_mtu,
357 .ndo_set_mac_address = eth_mac_addr,
358 .ndo_validate_addr = eth_validate_addr,
361 static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
363 struct net_device *dev;
364 struct netdev_private *np;
366 int chip_idx = ent->driver_data;
368 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
369 void __iomem *ioaddr;
371 i = pcim_enable_device(pdev);
374 pci_set_master(pdev);
378 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
379 pr_warn("Device %s disabled due to DMA limitations\n",
383 dev = alloc_etherdev(sizeof(*np));
386 SET_NETDEV_DEV(dev, &pdev->dev);
388 if (pci_request_regions(pdev, DRV_NAME))
391 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
395 for (i = 0; i < 3; i++)
396 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
398 /* Reset the chip to erase previous misconfiguration.
399 No hold time required! */
400 iowrite32(0x00000001, ioaddr + PCIBusCfg);
402 np = netdev_priv(dev);
404 np->chip_id = chip_idx;
405 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
406 spin_lock_init(&np->lock);
407 np->mii_if.dev = dev;
408 np->mii_if.mdio_read = mdio_read;
409 np->mii_if.mdio_write = mdio_write;
410 np->base_addr = ioaddr;
412 pci_set_drvdata(pdev, dev);
415 option = dev->mem_start;
417 /* The lower four bits are the media type. */
420 np->mii_if.full_duplex = 1;
423 "ignoring user supplied media type %d",
426 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
427 np->mii_if.full_duplex = 1;
429 if (np->mii_if.full_duplex)
430 np->mii_if.force_media = 1;
432 /* The chip-specific entries in the device structure. */
433 dev->netdev_ops = &netdev_ops;
434 dev->ethtool_ops = &netdev_ethtool_ops;
435 dev->watchdog_timeo = TX_TIMEOUT;
437 i = register_netdev(dev);
439 goto err_out_cleardev;
441 dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
442 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
444 if (np->drv_flags & CanHaveMII) {
445 int phy, phy_idx = 0;
446 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
447 int mii_status = mdio_read(dev, phy, MII_BMSR);
448 if (mii_status != 0xffff && mii_status != 0x0000) {
449 np->phys[phy_idx++] = phy;
450 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
451 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
452 mdio_read(dev, phy, MII_PHYSID2);
454 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
455 np->mii, phy, mii_status,
456 np->mii_if.advertising);
459 np->mii_cnt = phy_idx;
460 np->mii_if.phy_id = np->phys[0];
463 "MII PHY not found -- this device may not operate correctly\n");
471 pci_iounmap(pdev, ioaddr);
478 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
479 often serial bit streams generated by the host processor.
480 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
482 /* Delay between EEPROM clock transitions.
483 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
484 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
485 made udelay() unreliable.
486 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
489 #define eeprom_delay(ee_addr) ioread32(ee_addr)
491 enum EEPROM_Ctrl_Bits {
492 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
493 EE_ChipSelect=0x801, EE_DataIn=0x08,
496 /* The EEPROM commands include the alway-set leading bit. */
498 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
501 static int eeprom_read(void __iomem *addr, int location)
505 void __iomem *ee_addr = addr + EECtrl;
506 int read_cmd = location | EE_ReadCmd;
507 iowrite32(EE_ChipSelect, ee_addr);
509 /* Shift the read command bits out. */
510 for (i = 10; i >= 0; i--) {
511 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
512 iowrite32(dataval, ee_addr);
513 eeprom_delay(ee_addr);
514 iowrite32(dataval | EE_ShiftClk, ee_addr);
515 eeprom_delay(ee_addr);
517 iowrite32(EE_ChipSelect, ee_addr);
518 eeprom_delay(ee_addr);
520 for (i = 16; i > 0; i--) {
521 iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
522 eeprom_delay(ee_addr);
523 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
524 iowrite32(EE_ChipSelect, ee_addr);
525 eeprom_delay(ee_addr);
528 /* Terminate the EEPROM access. */
529 iowrite32(0, ee_addr);
533 /* MII transceiver control section.
534 Read and write the MII registers using software-generated serial
535 MDIO protocol. See the MII specifications or DP83840A data sheet
538 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
539 met by back-to-back 33Mhz PCI cycles. */
540 #define mdio_delay(mdio_addr) ioread32(mdio_addr)
542 /* Set iff a MII transceiver on any interface requires mdio preamble.
543 This only set with older transceivers, so the extra
544 code size of a per-interface flag is not worthwhile. */
545 static char mii_preamble_required = 1;
547 #define MDIO_WRITE0 (MDIO_EnbOutput)
548 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
550 /* Generate the preamble required for initial synchronization and
551 a few older transceivers. */
552 static void mdio_sync(void __iomem *mdio_addr)
556 /* Establish sync by sending at least 32 logic ones. */
557 while (--bits >= 0) {
558 iowrite32(MDIO_WRITE1, mdio_addr);
559 mdio_delay(mdio_addr);
560 iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
561 mdio_delay(mdio_addr);
565 static int mdio_read(struct net_device *dev, int phy_id, int location)
567 struct netdev_private *np = netdev_priv(dev);
568 void __iomem *mdio_addr = np->base_addr + MIICtrl;
569 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
572 if (mii_preamble_required)
573 mdio_sync(mdio_addr);
575 /* Shift the read command bits out. */
576 for (i = 15; i >= 0; i--) {
577 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
579 iowrite32(dataval, mdio_addr);
580 mdio_delay(mdio_addr);
581 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
582 mdio_delay(mdio_addr);
584 /* Read the two transition, 16 data, and wire-idle bits. */
585 for (i = 20; i > 0; i--) {
586 iowrite32(MDIO_EnbIn, mdio_addr);
587 mdio_delay(mdio_addr);
588 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
589 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
590 mdio_delay(mdio_addr);
592 return (retval>>1) & 0xffff;
595 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
597 struct netdev_private *np = netdev_priv(dev);
598 void __iomem *mdio_addr = np->base_addr + MIICtrl;
599 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
602 if (location == 4 && phy_id == np->phys[0])
603 np->mii_if.advertising = value;
605 if (mii_preamble_required)
606 mdio_sync(mdio_addr);
608 /* Shift the command bits out. */
609 for (i = 31; i >= 0; i--) {
610 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
612 iowrite32(dataval, mdio_addr);
613 mdio_delay(mdio_addr);
614 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
615 mdio_delay(mdio_addr);
617 /* Clear out extra bits. */
618 for (i = 2; i > 0; i--) {
619 iowrite32(MDIO_EnbIn, mdio_addr);
620 mdio_delay(mdio_addr);
621 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
622 mdio_delay(mdio_addr);
627 static int netdev_open(struct net_device *dev)
629 struct netdev_private *np = netdev_priv(dev);
630 void __iomem *ioaddr = np->base_addr;
631 const int irq = np->pci_dev->irq;
634 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
636 netif_device_detach(dev);
637 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
642 netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
644 if((i=alloc_ringdesc(dev)))
647 spin_lock_irq(&np->lock);
648 netif_device_attach(dev);
650 spin_unlock_irq(&np->lock);
652 netif_start_queue(dev);
654 netdev_dbg(dev, "Done netdev_open()\n");
656 /* Set the timer to check for link beat. */
657 init_timer(&np->timer);
658 np->timer.expires = jiffies + 1*HZ;
659 np->timer.data = (unsigned long)dev;
660 np->timer.function = netdev_timer; /* timer handler */
661 add_timer(&np->timer);
664 netif_device_attach(dev);
668 #define MII_DAVICOM_DM9101 0x0181b800
670 static int update_link(struct net_device *dev)
672 struct netdev_private *np = netdev_priv(dev);
673 int duplex, fasteth, result, mii_reg;
676 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
678 if (mii_reg == 0xffff)
680 /* reread: the link status bit is sticky */
681 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
682 if (!(mii_reg & 0x4)) {
683 if (netif_carrier_ok(dev)) {
686 "MII #%d reports no link. Disabling watchdog\n",
688 netif_carrier_off(dev);
692 if (!netif_carrier_ok(dev)) {
695 "MII #%d link is back. Enabling watchdog\n",
697 netif_carrier_on(dev);
700 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
701 /* If the link partner doesn't support autonegotiation
702 * the MII detects it's abilities with the "parallel detection".
703 * Some MIIs update the LPA register to the result of the parallel
704 * detection, some don't.
705 * The Davicom PHY [at least 0181b800] doesn't.
706 * Instead bit 9 and 13 of the BMCR are updated to the result
707 * of the negotiation..
709 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
710 duplex = mii_reg & BMCR_FULLDPLX;
711 fasteth = mii_reg & BMCR_SPEED100;
714 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
715 negotiated = mii_reg & np->mii_if.advertising;
717 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
718 fasteth = negotiated & 0x380;
720 duplex |= np->mii_if.force_media;
721 /* remove fastether and fullduplex */
722 result = np->csr6 & ~0x20000200;
726 result |= 0x20000000;
727 if (result != np->csr6 && debug)
729 "Setting %dMBit-%s-duplex based on MII#%d\n",
730 fasteth ? 100 : 10, duplex ? "full" : "half",
735 #define RXTX_TIMEOUT 2000
736 static inline void update_csr6(struct net_device *dev, int new)
738 struct netdev_private *np = netdev_priv(dev);
739 void __iomem *ioaddr = np->base_addr;
740 int limit = RXTX_TIMEOUT;
742 if (!netif_device_present(dev))
746 /* stop both Tx and Rx processes */
747 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
748 /* wait until they have really stopped */
750 int csr5 = ioread32(ioaddr + IntrStatus);
753 t = (csr5 >> 17) & 0x07;
756 t = (csr5 >> 20) & 0x07;
764 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
770 /* and restart them with the new configuration */
771 iowrite32(np->csr6, ioaddr + NetworkConfig);
773 np->mii_if.full_duplex = 1;
776 static void netdev_timer(unsigned long data)
778 struct net_device *dev = (struct net_device *)data;
779 struct netdev_private *np = netdev_priv(dev);
780 void __iomem *ioaddr = np->base_addr;
783 netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
784 ioread32(ioaddr + IntrStatus),
785 ioread32(ioaddr + NetworkConfig));
786 spin_lock_irq(&np->lock);
787 update_csr6(dev, update_link(dev));
788 spin_unlock_irq(&np->lock);
789 np->timer.expires = jiffies + 10*HZ;
790 add_timer(&np->timer);
793 static void init_rxtx_rings(struct net_device *dev)
795 struct netdev_private *np = netdev_priv(dev);
798 np->rx_head_desc = &np->rx_ring[0];
799 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
801 /* Initial all Rx descriptors. */
802 for (i = 0; i < RX_RING_SIZE; i++) {
803 np->rx_ring[i].length = np->rx_buf_sz;
804 np->rx_ring[i].status = 0;
805 np->rx_skbuff[i] = NULL;
807 /* Mark the last entry as wrapping the ring. */
808 np->rx_ring[i-1].length |= DescEndRing;
810 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
811 for (i = 0; i < RX_RING_SIZE; i++) {
812 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
813 np->rx_skbuff[i] = skb;
816 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
817 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
819 np->rx_ring[i].buffer1 = np->rx_addr[i];
820 np->rx_ring[i].status = DescOwned;
824 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
826 /* Initialize the Tx descriptors */
827 for (i = 0; i < TX_RING_SIZE; i++) {
828 np->tx_skbuff[i] = NULL;
829 np->tx_ring[i].status = 0;
832 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
834 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
835 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
836 np->base_addr + TxRingPtr);
840 static void free_rxtx_rings(struct netdev_private* np)
843 /* Free all the skbuffs in the Rx queue. */
844 for (i = 0; i < RX_RING_SIZE; i++) {
845 np->rx_ring[i].status = 0;
846 if (np->rx_skbuff[i]) {
847 pci_unmap_single(np->pci_dev,
849 np->rx_skbuff[i]->len,
851 dev_kfree_skb(np->rx_skbuff[i]);
853 np->rx_skbuff[i] = NULL;
855 for (i = 0; i < TX_RING_SIZE; i++) {
856 if (np->tx_skbuff[i]) {
857 pci_unmap_single(np->pci_dev,
859 np->tx_skbuff[i]->len,
861 dev_kfree_skb(np->tx_skbuff[i]);
863 np->tx_skbuff[i] = NULL;
867 static void init_registers(struct net_device *dev)
869 struct netdev_private *np = netdev_priv(dev);
870 void __iomem *ioaddr = np->base_addr;
873 for (i = 0; i < 6; i++)
874 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
876 /* Initialize other registers. */
878 i = (1<<20); /* Big-endian descriptors */
882 i |= (0x04<<2); /* skip length 4 u32 */
883 i |= 0x02; /* give Rx priority */
885 /* Configure the PCI bus bursts and FIFO thresholds.
886 486: Set 8 longword cache alignment, 8 longword burst.
887 586: Set 16 longword cache alignment, no burst limit.
888 Cache alignment bits 15:14 Burst length 13:8
889 0000 <not allowed> 0000 align to cache 0800 8 longwords
890 4000 8 longwords 0100 1 longword 1000 16 longwords
891 8000 16 longwords 0200 2 longwords 2000 32 longwords
892 C000 32 longwords 0400 4 longwords */
894 #if defined (__i386__) && !defined(MODULE)
895 /* When not a module we can work around broken '486 PCI boards. */
896 if (boot_cpu_data.x86 <= 4) {
899 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
903 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
905 #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
908 dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
911 iowrite32(i, ioaddr + PCIBusCfg);
914 /* 128 byte Tx threshold;
915 Transmit on; Receive on; */
916 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
918 /* Clear and Enable interrupts by setting the interrupt mask. */
919 iowrite32(0x1A0F5, ioaddr + IntrStatus);
920 iowrite32(0x1A0F5, ioaddr + IntrEnable);
922 iowrite32(0, ioaddr + RxStartDemand);
925 static void tx_timeout(struct net_device *dev)
927 struct netdev_private *np = netdev_priv(dev);
928 void __iomem *ioaddr = np->base_addr;
929 const int irq = np->pci_dev->irq;
931 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
932 ioread32(ioaddr + IntrStatus));
936 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
937 for (i = 0; i < RX_RING_SIZE; i++)
938 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
939 printk(KERN_CONT "\n");
940 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
941 for (i = 0; i < TX_RING_SIZE; i++)
942 printk(KERN_CONT " %08x", np->tx_ring[i].status);
943 printk(KERN_CONT "\n");
945 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
946 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
947 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
950 spin_lock_irq(&np->lock);
952 * Under high load dirty_tx and the internal tx descriptor pointer
953 * come out of sync, thus perform a software reset and reinitialize
957 iowrite32(1, np->base_addr+PCIBusCfg);
961 init_rxtx_rings(dev);
963 spin_unlock_irq(&np->lock);
966 netif_wake_queue(dev);
967 netif_trans_update(dev); /* prevent tx timeout */
968 np->stats.tx_errors++;
971 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
972 static int alloc_ringdesc(struct net_device *dev)
974 struct netdev_private *np = netdev_priv(dev);
976 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
978 np->rx_ring = pci_alloc_consistent(np->pci_dev,
979 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
980 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
984 init_rxtx_rings(dev);
988 static void free_ringdesc(struct netdev_private *np)
990 pci_free_consistent(np->pci_dev,
991 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
992 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
993 np->rx_ring, np->ring_dma_addr);
997 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
999 struct netdev_private *np = netdev_priv(dev);
1002 /* Caution: the write order is important here, set the field
1003 with the "ownership" bits last. */
1005 /* Calculate the next Tx descriptor entry. */
1006 entry = np->cur_tx % TX_RING_SIZE;
1008 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1009 skb->data,skb->len, PCI_DMA_TODEVICE);
1010 np->tx_skbuff[entry] = skb;
1012 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1013 if (skb->len < TX_BUFLIMIT) {
1014 np->tx_ring[entry].length = DescWholePkt | skb->len;
1016 int len = skb->len - TX_BUFLIMIT;
1018 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1019 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1021 if(entry == TX_RING_SIZE-1)
1022 np->tx_ring[entry].length |= DescEndRing;
1024 /* Now acquire the irq spinlock.
1025 * The difficult race is the ordering between
1026 * increasing np->cur_tx and setting DescOwned:
1027 * - if np->cur_tx is increased first the interrupt
1028 * handler could consider the packet as transmitted
1029 * since DescOwned is cleared.
1030 * - If DescOwned is set first the NIC could report the
1031 * packet as sent, but the interrupt handler would ignore it
1032 * since the np->cur_tx was not yet increased.
1034 spin_lock_irq(&np->lock);
1037 wmb(); /* flush length, buffer1, buffer2 */
1038 np->tx_ring[entry].status = DescOwned;
1039 wmb(); /* flush status and kick the hardware */
1040 iowrite32(0, np->base_addr + TxStartDemand);
1041 np->tx_q_bytes += skb->len;
1042 /* Work around horrible bug in the chip by marking the queue as full
1043 when we do not have FIFO room for a maximum sized packet. */
1044 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1045 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1046 netif_stop_queue(dev);
1050 spin_unlock_irq(&np->lock);
1053 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1056 return NETDEV_TX_OK;
1059 static void netdev_tx_done(struct net_device *dev)
1061 struct netdev_private *np = netdev_priv(dev);
1062 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1063 int entry = np->dirty_tx % TX_RING_SIZE;
1064 int tx_status = np->tx_ring[entry].status;
1068 if (tx_status & 0x8000) { /* There was an error, log it. */
1069 #ifndef final_version
1071 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1074 np->stats.tx_errors++;
1075 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1076 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1077 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1078 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1079 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1080 np->stats.tx_heartbeat_errors++;
1082 #ifndef final_version
1084 netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1087 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1088 np->stats.collisions += (tx_status >> 3) & 15;
1089 np->stats.tx_packets++;
1091 /* Free the original skb. */
1092 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1093 np->tx_skbuff[entry]->len,
1095 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1096 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1097 np->tx_skbuff[entry] = NULL;
1100 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1101 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1102 /* The ring is no longer full, clear tbusy. */
1105 netif_wake_queue(dev);
1109 /* The interrupt handler does all of the Rx thread work and cleans up
1110 after the Tx thread. */
1111 static irqreturn_t intr_handler(int irq, void *dev_instance)
1113 struct net_device *dev = (struct net_device *)dev_instance;
1114 struct netdev_private *np = netdev_priv(dev);
1115 void __iomem *ioaddr = np->base_addr;
1116 int work_limit = max_interrupt_work;
1119 if (!netif_device_present(dev))
1122 u32 intr_status = ioread32(ioaddr + IntrStatus);
1124 /* Acknowledge all of the current interrupt sources ASAP. */
1125 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1128 netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1130 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1135 if (intr_status & (RxIntr | RxNoBuf))
1137 if (intr_status & RxNoBuf)
1138 iowrite32(0, ioaddr + RxStartDemand);
1140 if (intr_status & (TxNoBuf | TxIntr) &&
1141 np->cur_tx != np->dirty_tx) {
1142 spin_lock(&np->lock);
1143 netdev_tx_done(dev);
1144 spin_unlock(&np->lock);
1147 /* Abnormal error summary/uncommon events handlers. */
1148 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1150 netdev_error(dev, intr_status);
1152 if (--work_limit < 0) {
1154 "Too much work at interrupt, status=0x%04x\n",
1156 /* Set the timer to re-enable the other interrupts after
1158 spin_lock(&np->lock);
1159 if (netif_device_present(dev)) {
1160 iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1161 iowrite32(10, ioaddr + GPTimer);
1163 spin_unlock(&np->lock);
1169 netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1170 ioread32(ioaddr + IntrStatus));
1171 return IRQ_RETVAL(handled);
1174 /* This routine is logically part of the interrupt handler, but separated
1175 for clarity and better register allocation. */
1176 static int netdev_rx(struct net_device *dev)
1178 struct netdev_private *np = netdev_priv(dev);
1179 int entry = np->cur_rx % RX_RING_SIZE;
1180 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1183 netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1184 entry, np->rx_ring[entry].status);
1187 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1188 while (--work_limit >= 0) {
1189 struct w840_rx_desc *desc = np->rx_head_desc;
1190 s32 status = desc->status;
1193 netdev_dbg(dev, " netdev_rx() status was %08x\n",
1197 if ((status & 0x38008300) != 0x0300) {
1198 if ((status & 0x38000300) != 0x0300) {
1199 /* Ingore earlier buffers. */
1200 if ((status & 0xffff) != 0x7fff) {
1202 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1203 np->cur_rx, status);
1204 np->stats.rx_length_errors++;
1206 } else if (status & 0x8000) {
1207 /* There was a fatal error. */
1209 netdev_dbg(dev, "Receive error, Rx status %08x\n",
1211 np->stats.rx_errors++; /* end of a packet.*/
1212 if (status & 0x0890) np->stats.rx_length_errors++;
1213 if (status & 0x004C) np->stats.rx_frame_errors++;
1214 if (status & 0x0002) np->stats.rx_crc_errors++;
1217 struct sk_buff *skb;
1218 /* Omit the four octet CRC from the length. */
1219 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1221 #ifndef final_version
1223 netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n",
1226 /* Check if the packet is long enough to accept without copying
1227 to a minimally-sized skbuff. */
1228 if (pkt_len < rx_copybreak &&
1229 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1230 skb_reserve(skb, 2); /* 16 byte align the IP header */
1231 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1232 np->rx_skbuff[entry]->len,
1233 PCI_DMA_FROMDEVICE);
1234 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1235 skb_put(skb, pkt_len);
1236 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1237 np->rx_skbuff[entry]->len,
1238 PCI_DMA_FROMDEVICE);
1240 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1241 np->rx_skbuff[entry]->len,
1242 PCI_DMA_FROMDEVICE);
1243 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1244 np->rx_skbuff[entry] = NULL;
1246 #ifndef final_version /* Remove after testing. */
1247 /* You will want this info for the initial debug. */
1249 netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n",
1250 &skb->data[0], &skb->data[6],
1251 skb->data[12], skb->data[13],
1254 skb->protocol = eth_type_trans(skb, dev);
1256 np->stats.rx_packets++;
1257 np->stats.rx_bytes += pkt_len;
1259 entry = (++np->cur_rx) % RX_RING_SIZE;
1260 np->rx_head_desc = &np->rx_ring[entry];
1263 /* Refill the Rx ring buffers. */
1264 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1265 struct sk_buff *skb;
1266 entry = np->dirty_rx % RX_RING_SIZE;
1267 if (np->rx_skbuff[entry] == NULL) {
1268 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1269 np->rx_skbuff[entry] = skb;
1271 break; /* Better luck next round. */
1272 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1274 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1275 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1278 np->rx_ring[entry].status = DescOwned;
1284 static void netdev_error(struct net_device *dev, int intr_status)
1286 struct netdev_private *np = netdev_priv(dev);
1287 void __iomem *ioaddr = np->base_addr;
1290 netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1291 if (intr_status == 0xffffffff)
1293 spin_lock(&np->lock);
1294 if (intr_status & TxFIFOUnderflow) {
1296 /* Bump up the Tx threshold */
1298 /* This causes lots of dropped packets,
1299 * and under high load even tx_timeouts
1301 new = np->csr6 + 0x4000;
1303 new = (np->csr6 >> 14)&0x7f;
1307 new = 127; /* load full packet before starting */
1308 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1310 netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1311 update_csr6(dev, new);
1313 if (intr_status & RxDied) { /* Missed a Rx frame. */
1314 np->stats.rx_errors++;
1316 if (intr_status & TimerInt) {
1317 /* Re-enable other interrupts. */
1318 if (netif_device_present(dev))
1319 iowrite32(0x1A0F5, ioaddr + IntrEnable);
1321 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1322 iowrite32(0, ioaddr + RxStartDemand);
1323 spin_unlock(&np->lock);
1326 static struct net_device_stats *get_stats(struct net_device *dev)
1328 struct netdev_private *np = netdev_priv(dev);
1329 void __iomem *ioaddr = np->base_addr;
1331 /* The chip only need report frame silently dropped. */
1332 spin_lock_irq(&np->lock);
1333 if (netif_running(dev) && netif_device_present(dev))
1334 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1335 spin_unlock_irq(&np->lock);
1341 static u32 __set_rx_mode(struct net_device *dev)
1343 struct netdev_private *np = netdev_priv(dev);
1344 void __iomem *ioaddr = np->base_addr;
1345 u32 mc_filter[2]; /* Multicast hash filter */
1348 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1349 memset(mc_filter, 0xff, sizeof(mc_filter));
1350 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1352 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1353 (dev->flags & IFF_ALLMULTI)) {
1354 /* Too many to match, or accept all multicasts. */
1355 memset(mc_filter, 0xff, sizeof(mc_filter));
1356 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1358 struct netdev_hw_addr *ha;
1360 memset(mc_filter, 0, sizeof(mc_filter));
1361 netdev_for_each_mc_addr(ha, dev) {
1364 filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1366 mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1368 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1370 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1371 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1375 static void set_rx_mode(struct net_device *dev)
1377 struct netdev_private *np = netdev_priv(dev);
1378 u32 rx_mode = __set_rx_mode(dev);
1379 spin_lock_irq(&np->lock);
1380 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1381 spin_unlock_irq(&np->lock);
1384 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1386 struct netdev_private *np = netdev_priv(dev);
1388 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1389 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1390 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1393 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1395 struct netdev_private *np = netdev_priv(dev);
1398 spin_lock_irq(&np->lock);
1399 rc = mii_ethtool_gset(&np->mii_if, cmd);
1400 spin_unlock_irq(&np->lock);
1405 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1407 struct netdev_private *np = netdev_priv(dev);
1410 spin_lock_irq(&np->lock);
1411 rc = mii_ethtool_sset(&np->mii_if, cmd);
1412 spin_unlock_irq(&np->lock);
1417 static int netdev_nway_reset(struct net_device *dev)
1419 struct netdev_private *np = netdev_priv(dev);
1420 return mii_nway_restart(&np->mii_if);
1423 static u32 netdev_get_link(struct net_device *dev)
1425 struct netdev_private *np = netdev_priv(dev);
1426 return mii_link_ok(&np->mii_if);
1429 static u32 netdev_get_msglevel(struct net_device *dev)
1434 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1439 static const struct ethtool_ops netdev_ethtool_ops = {
1440 .get_drvinfo = netdev_get_drvinfo,
1441 .get_settings = netdev_get_settings,
1442 .set_settings = netdev_set_settings,
1443 .nway_reset = netdev_nway_reset,
1444 .get_link = netdev_get_link,
1445 .get_msglevel = netdev_get_msglevel,
1446 .set_msglevel = netdev_set_msglevel,
1449 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1451 struct mii_ioctl_data *data = if_mii(rq);
1452 struct netdev_private *np = netdev_priv(dev);
1455 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1456 data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1459 case SIOCGMIIREG: /* Read MII PHY register. */
1460 spin_lock_irq(&np->lock);
1461 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1462 spin_unlock_irq(&np->lock);
1465 case SIOCSMIIREG: /* Write MII PHY register. */
1466 spin_lock_irq(&np->lock);
1467 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1468 spin_unlock_irq(&np->lock);
1475 static int netdev_close(struct net_device *dev)
1477 struct netdev_private *np = netdev_priv(dev);
1478 void __iomem *ioaddr = np->base_addr;
1480 netif_stop_queue(dev);
1483 netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1484 ioread32(ioaddr + IntrStatus),
1485 ioread32(ioaddr + NetworkConfig));
1486 netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1487 np->cur_tx, np->dirty_tx,
1488 np->cur_rx, np->dirty_rx);
1491 /* Stop the chip's Tx and Rx processes. */
1492 spin_lock_irq(&np->lock);
1493 netif_device_detach(dev);
1494 update_csr6(dev, 0);
1495 iowrite32(0x0000, ioaddr + IntrEnable);
1496 spin_unlock_irq(&np->lock);
1498 free_irq(np->pci_dev->irq, dev);
1500 netif_device_attach(dev);
1502 if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1503 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1509 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
1510 for (i = 0; i < TX_RING_SIZE; i++)
1511 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1512 i, np->tx_ring[i].length,
1513 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1514 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1515 for (i = 0; i < RX_RING_SIZE; i++) {
1516 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1517 i, np->rx_ring[i].length,
1518 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1521 #endif /* __i386__ debugging only */
1523 del_timer_sync(&np->timer);
1525 free_rxtx_rings(np);
1531 static void w840_remove1(struct pci_dev *pdev)
1533 struct net_device *dev = pci_get_drvdata(pdev);
1536 struct netdev_private *np = netdev_priv(dev);
1537 unregister_netdev(dev);
1538 pci_iounmap(pdev, np->base_addr);
1546 * suspend/resume synchronization:
1547 * - open, close, do_ioctl:
1548 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1550 * spin_lock_irq(np->lock), doesn't touch hw if not present
1552 * synchronize_irq + netif_tx_disable;
1554 * netif_device_detach + netif_tx_disable;
1555 * - set_multicast_list
1556 * netif_device_detach + netif_tx_disable;
1557 * - interrupt handler
1558 * doesn't touch hw if not present, synchronize_irq waits for
1559 * running instances of the interrupt handler.
1561 * Disabling hw requires clearing csr6 & IntrEnable.
1562 * update_csr6 & all function that write IntrEnable check netif_device_present
1563 * before settings any bits.
1565 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1566 * device would cause an irq storm.
1568 static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1570 struct net_device *dev = pci_get_drvdata (pdev);
1571 struct netdev_private *np = netdev_priv(dev);
1572 void __iomem *ioaddr = np->base_addr;
1575 if (netif_running (dev)) {
1576 del_timer_sync(&np->timer);
1578 spin_lock_irq(&np->lock);
1579 netif_device_detach(dev);
1580 update_csr6(dev, 0);
1581 iowrite32(0, ioaddr + IntrEnable);
1582 spin_unlock_irq(&np->lock);
1584 synchronize_irq(np->pci_dev->irq);
1585 netif_tx_disable(dev);
1587 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1589 /* no more hardware accesses behind this line. */
1591 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1593 /* pci_power_off(pdev, -1); */
1595 free_rxtx_rings(np);
1597 netif_device_detach(dev);
1603 static int w840_resume (struct pci_dev *pdev)
1605 struct net_device *dev = pci_get_drvdata (pdev);
1606 struct netdev_private *np = netdev_priv(dev);
1610 if (netif_device_present(dev))
1611 goto out; /* device not suspended */
1612 if (netif_running(dev)) {
1613 if ((retval = pci_enable_device(pdev))) {
1615 "pci_enable_device failed in resume\n");
1618 spin_lock_irq(&np->lock);
1619 iowrite32(1, np->base_addr+PCIBusCfg);
1620 ioread32(np->base_addr+PCIBusCfg);
1622 netif_device_attach(dev);
1623 init_rxtx_rings(dev);
1624 init_registers(dev);
1625 spin_unlock_irq(&np->lock);
1627 netif_wake_queue(dev);
1629 mod_timer(&np->timer, jiffies + 1*HZ);
1631 netif_device_attach(dev);
1639 static struct pci_driver w840_driver = {
1641 .id_table = w840_pci_tbl,
1642 .probe = w840_probe1,
1643 .remove = w840_remove1,
1645 .suspend = w840_suspend,
1646 .resume = w840_resume,
1650 static int __init w840_init(void)
1653 return pci_register_driver(&w840_driver);
1656 static void __exit w840_exit(void)
1658 pci_unregister_driver(&w840_driver);
1661 module_init(w840_init);
1662 module_exit(w840_exit);