1 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
3 Written 1998-2000 by Donald Becker.
5 Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
6 send all bug reports to me, and not to Donald Becker, as this code
7 has been heavily modified from Donald's original version.
9 This software may be used and distributed according to the terms of
10 the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on or derived from this code fall under the GPL and must
12 retain the authorship, copyright and license notice. This file is not
13 a complete program and may only be used when the entire operating
14 system is licensed under the GPL.
16 The information below comes from Donald Becker's original driver:
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
23 Support and updates available at
24 http://www.scyld.com/network/starfire.html
25 [link no longer provides useful info -jgarzik]
29 #define DRV_NAME "starfire"
31 #include <linux/interrupt.h>
32 #include <linux/module.h>
33 #include <linux/kernel.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/init.h>
38 #include <linux/delay.h>
39 #include <linux/crc32.h>
40 #include <linux/ethtool.h>
41 #include <linux/mii.h>
42 #include <linux/if_vlan.h>
44 #include <linux/firmware.h>
45 #include <asm/processor.h> /* Processor type for cache alignment. */
46 #include <linux/uaccess.h>
50 * The current frame processor firmware fails to checksum a fragment
51 * of length 1. If and when this is fixed, the #define below can be removed.
53 #define HAS_BROKEN_FIRMWARE
56 * If using the broken firmware, data must be padded to the next 32-bit boundary.
58 #ifdef HAS_BROKEN_FIRMWARE
59 #define PADDING_MASK 3
63 * Define this if using the driver with the zero-copy patch
67 #if IS_ENABLED(CONFIG_VLAN_8021Q)
71 /* The user-configurable values.
72 These may be modified when a driver module is loaded.*/
74 /* Used for tuning interrupt latency vs. overhead. */
75 static int intr_latency;
76 static int small_frames;
78 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
79 static int max_interrupt_work = 20;
81 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
82 The Starfire has a 512 element hash table based on the Ethernet CRC. */
83 static const int multicast_filter_limit = 512;
84 /* Whether to do TCP/UDP checksums in hardware */
85 static int enable_hw_cksum = 1;
87 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
89 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
90 * Setting to > 1518 effectively disables this feature.
93 * The ia64 doesn't allow for unaligned loads even of integers being
94 * misaligned on a 2 byte boundary. Thus always force copying of
95 * packets as the starfire doesn't allow for misaligned DMAs ;-(
98 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
99 * at least, having unaligned frames leads to a rather serious performance
102 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
103 static int rx_copybreak = PKT_BUF_SZ;
105 static int rx_copybreak /* = 0 */;
108 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
110 #define DMA_BURST_SIZE 64
112 #define DMA_BURST_SIZE 128
115 /* Operational parameters that are set at compile time. */
117 /* The "native" ring sizes are either 256 or 2048.
118 However in some modes a descriptor may be marked to wrap the ring earlier.
120 #define RX_RING_SIZE 256
121 #define TX_RING_SIZE 32
122 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
123 #define DONE_Q_SIZE 1024
124 /* All queues must be aligned on a 256-byte boundary */
125 #define QUEUE_ALIGN 256
127 #if RX_RING_SIZE > 256
128 #define RX_Q_ENTRIES Rx2048QEntries
130 #define RX_Q_ENTRIES Rx256QEntries
133 /* Operational parameters that usually are not changed. */
134 /* Time in jiffies before concluding the transmitter is hung. */
135 #define TX_TIMEOUT (2 * HZ)
137 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
138 /* 64-bit dma_addr_t */
139 #define ADDR_64BITS /* This chip uses 64 bit addresses. */
140 #define netdrv_addr_t __le64
141 #define cpu_to_dma(x) cpu_to_le64(x)
142 #define dma_to_cpu(x) le64_to_cpu(x)
143 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
144 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
145 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
146 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
147 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
148 #else /* 32-bit dma_addr_t */
149 #define netdrv_addr_t __le32
150 #define cpu_to_dma(x) cpu_to_le32(x)
151 #define dma_to_cpu(x) le32_to_cpu(x)
152 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
153 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
154 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
155 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
156 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
159 #define skb_first_frag_len(skb) skb_headlen(skb)
160 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
163 #define FIRMWARE_RX "/*(DEBLOBBED)*/"
164 #define FIRMWARE_TX "/*(DEBLOBBED)*/"
166 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
167 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
168 MODULE_LICENSE("GPL");
171 module_param(max_interrupt_work, int, 0);
172 module_param(mtu, int, 0);
173 module_param(debug, int, 0);
174 module_param(rx_copybreak, int, 0);
175 module_param(intr_latency, int, 0);
176 module_param(small_frames, int, 0);
177 module_param(enable_hw_cksum, int, 0);
178 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
179 MODULE_PARM_DESC(mtu, "MTU (all boards)");
180 MODULE_PARM_DESC(debug, "Debug level (0-6)");
181 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
182 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
183 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
184 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
189 I. Board Compatibility
191 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
193 II. Board-specific settings
195 III. Driver operation
199 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
200 ring sizes are set fixed by the hardware, but may optionally be wrapped
201 earlier by the END bit in the descriptor.
202 This driver uses that hardware queue size for the Rx ring, where a large
203 number of entries has no ill effect beyond increases the potential backlog.
204 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
205 disables the queue layer priority ordering and we have no mechanism to
206 utilize the hardware two-level priority queue. When modifying the
207 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
210 IIIb/c. Transmit/Receive Structure
212 See the Adaptec manual for the many possible structures, and options for
213 each structure. There are far too many to document all of them here.
215 For transmit this driver uses type 0/1 transmit descriptors (depending
216 on the 32/64 bitness of the architecture), and relies on automatic
217 minimum-length padding. It does not use the completion queue
218 consumer index, but instead checks for non-zero status entries.
220 For receive this driver uses type 2/3 receive descriptors. The driver
221 allocates full frame size skbuffs for the Rx ring buffers, so all frames
222 should fit in a single descriptor. The driver does not use the completion
223 queue consumer index, but instead checks for non-zero status entries.
225 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
226 is allocated and the frame is copied to the new skbuff. When the incoming
227 frame is larger, the skbuff is passed directly up the protocol stack.
228 Buffers consumed this way are replaced by newly allocated skbuffs in a later
231 A notable aspect of operation is that unaligned buffers are not permitted by
232 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
233 isn't longword aligned, which may cause problems on some machine
234 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
235 the frame into a new skbuff unconditionally. Copied frames are put into the
236 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
238 IIId. Synchronization
240 The driver runs as two independent, single-threaded flows of control. One
241 is the send-packet routine, which enforces single-threaded use by the
242 dev->tbusy flag. The other thread is the interrupt handler, which is single
243 threaded by the hardware and interrupt handling software.
245 The send packet thread has partial control over the Tx ring and the netif_queue
246 status. If the number of free Tx slots in the ring falls below a certain number
247 (currently hardcoded to 4), it signals the upper layer to stop the queue.
249 The interrupt handler has exclusive control over the Rx ring and records stats
250 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
251 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
252 number of free Tx slow is above the threshold, it signals the upper layer to
259 The Adaptec Starfire manuals, available only from Adaptec.
260 http://www.scyld.com/expert/100mbps.html
261 http://www.scyld.com/expert/NWay.html
265 - StopOnPerr is broken, don't enable
266 - Hardware ethernet padding exposes random data, perform software padding
267 instead (unverified -- works correctly for all the hardware I have)
273 enum chip_capability_flags {CanHaveMII=1, };
279 static const struct pci_device_id starfire_pci_tbl[] = {
280 { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
283 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
285 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
286 static const struct chip_info {
290 { "Adaptec Starfire 6915", CanHaveMII },
294 /* Offsets to the device registers.
295 Unlike software-only systems, device drivers interact with complex hardware.
296 It's not useful to define symbolic names for every register bit in the
297 device. The name can only partially document the semantics and make
298 the driver longer and more difficult to read.
299 In general, only the important configuration values or bits changed
300 multiple times should be defined symbolically.
302 enum register_offsets {
303 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
304 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
305 MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
306 GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
307 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
308 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
309 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
311 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
312 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
313 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
314 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
315 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
316 TxMode=0x55000, VlanType=0x55064,
317 PerfFilterTable=0x56000, HashTable=0x56100,
318 TxGfpMem=0x58000, RxGfpMem=0x5a000,
322 * Bits in the interrupt status/mask registers.
323 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
324 * enables all the interrupt sources that are or'ed into those status bits.
326 enum intr_status_bits {
327 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
328 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
329 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
330 IntrTxComplQLow=0x200000, IntrPCI=0x100000,
331 IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
332 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
333 IntrNormalSummary=0x8000, IntrTxDone=0x4000,
334 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
335 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
336 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
337 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
338 IntrNoTxCsum=0x20, IntrTxBadID=0x10,
339 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
340 IntrTxGfp=0x02, IntrPCIPad=0x01,
342 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
343 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
344 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
347 /* Bits in the RxFilterMode register. */
349 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
350 AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
351 PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
355 /* Bits in the TxMode register */
357 MiiSoftReset=0x8000, MIILoopback=0x4000,
358 TxFlowEnable=0x0800, RxFlowEnable=0x0400,
359 PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
362 /* Bits in the TxDescCtrl register. */
364 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
365 TxDescSpace128=0x30, TxDescSpace256=0x40,
366 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
367 TxDescType3=0x03, TxDescType4=0x04,
368 TxNoDMACompletion=0x08,
369 TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
370 TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
371 TxDMABurstSizeShift=8,
374 /* Bits in the RxDescQCtrl register. */
376 RxBufferLenShift=16, RxMinDescrThreshShift=0,
377 RxPrefetchMode=0x8000, RxVariableQ=0x2000,
378 Rx2048QEntries=0x4000, Rx256QEntries=0,
379 RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
380 RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
381 RxDescSpace4=0x000, RxDescSpace8=0x100,
382 RxDescSpace16=0x200, RxDescSpace32=0x300,
383 RxDescSpace64=0x400, RxDescSpace128=0x500,
387 /* Bits in the RxDMACtrl register. */
388 enum rx_dmactrl_bits {
389 RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
390 RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
391 RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
392 RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
393 RxChecksumRejectTCPOnly=0x01000000,
394 RxCompletionQ2Enable=0x800000,
395 RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
396 RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
397 RxDMAQ2NonIP=0x400000,
398 RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
399 RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
403 /* Bits in the RxCompletionAddr register */
405 RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
406 RxComplProducerWrEn=0x40,
407 RxComplType0=0x00, RxComplType1=0x10,
408 RxComplType2=0x20, RxComplType3=0x30,
409 RxComplThreshShift=0,
412 /* Bits in the TxCompletionAddr register */
414 TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
415 TxComplProducerWrEn=0x40,
416 TxComplIntrStatus=0x20,
417 CommonQueueMode=0x10,
418 TxComplThreshShift=0,
421 /* Bits in the GenCtrl register */
423 RxEnable=0x05, TxEnable=0x0a,
424 RxGFPEnable=0x10, TxGFPEnable=0x20,
427 /* Bits in the IntrTimerCtrl register */
428 enum intr_ctrl_bits {
429 Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
430 SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
431 IntrLatencyMask=0x1f,
434 /* The Rx and Tx buffer descriptors. */
435 struct starfire_rx_desc {
436 netdrv_addr_t rxaddr;
439 RxDescValid=1, RxDescEndRing=2,
442 /* Completion queue entry. */
443 struct short_rx_done_desc {
444 __le32 status; /* Low 16 bits is length. */
446 struct basic_rx_done_desc {
447 __le32 status; /* Low 16 bits is length. */
451 struct csum_rx_done_desc {
452 __le32 status; /* Low 16 bits is length. */
453 __le16 csum; /* Partial checksum */
456 struct full_rx_done_desc {
457 __le32 status; /* Low 16 bits is length. */
461 __le16 csum; /* partial checksum */
464 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
466 typedef struct full_rx_done_desc rx_done_desc;
467 #define RxComplType RxComplType3
468 #else /* not VLAN_SUPPORT */
469 typedef struct csum_rx_done_desc rx_done_desc;
470 #define RxComplType RxComplType2
471 #endif /* not VLAN_SUPPORT */
474 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
477 /* Type 1 Tx descriptor. */
478 struct starfire_tx_desc_1 {
479 __le32 status; /* Upper bits are status, lower 16 length. */
483 /* Type 2 Tx descriptor. */
484 struct starfire_tx_desc_2 {
485 __le32 status; /* Upper bits are status, lower 16 length. */
491 typedef struct starfire_tx_desc_2 starfire_tx_desc;
492 #define TX_DESC_TYPE TxDescType2
493 #else /* not ADDR_64BITS */
494 typedef struct starfire_tx_desc_1 starfire_tx_desc;
495 #define TX_DESC_TYPE TxDescType1
496 #endif /* not ADDR_64BITS */
497 #define TX_DESC_SPACING TxDescSpaceUnlim
501 TxCRCEn=0x01000000, TxDescIntr=0x08000000,
502 TxRingWrap=0x04000000, TxCalTCP=0x02000000,
504 struct tx_done_desc {
505 __le32 status; /* timestamp, index. */
507 __le32 intrstatus; /* interrupt status */
511 struct rx_ring_info {
515 struct tx_ring_info {
518 unsigned int used_slots;
522 struct netdev_private {
523 /* Descriptor rings first for alignment. */
524 struct starfire_rx_desc *rx_ring;
525 starfire_tx_desc *tx_ring;
526 dma_addr_t rx_ring_dma;
527 dma_addr_t tx_ring_dma;
528 /* The addresses of rx/tx-in-place skbuffs. */
529 struct rx_ring_info rx_info[RX_RING_SIZE];
530 struct tx_ring_info tx_info[TX_RING_SIZE];
531 /* Pointers to completion queues (full pages). */
532 rx_done_desc *rx_done_q;
533 dma_addr_t rx_done_q_dma;
534 unsigned int rx_done;
535 struct tx_done_desc *tx_done_q;
536 dma_addr_t tx_done_q_dma;
537 unsigned int tx_done;
538 struct napi_struct napi;
539 struct net_device *dev;
540 struct pci_dev *pci_dev;
542 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
545 dma_addr_t queue_mem_dma;
546 size_t queue_mem_size;
548 /* Frequently used values: keep some adjacent for cache effect. */
550 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
551 unsigned int cur_tx, dirty_tx, reap_tx;
552 unsigned int rx_buf_sz; /* Based on MTU+slack. */
553 /* These values keep track of the transceiver/media in use. */
554 int speed100; /* Set if speed == 100MBit. */
558 /* MII transceiver section. */
559 struct mii_if_info mii_if; /* MII lib hooks/info */
560 int phy_cnt; /* MII device addresses. */
561 unsigned char phys[PHY_CNT]; /* MII device addresses. */
566 static int mdio_read(struct net_device *dev, int phy_id, int location);
567 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
568 static int netdev_open(struct net_device *dev);
569 static void check_duplex(struct net_device *dev);
570 static void tx_timeout(struct net_device *dev, unsigned int txqueue);
571 static void init_ring(struct net_device *dev);
572 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
573 static irqreturn_t intr_handler(int irq, void *dev_instance);
574 static void netdev_error(struct net_device *dev, int intr_status);
575 static int __netdev_rx(struct net_device *dev, int *quota);
576 static int netdev_poll(struct napi_struct *napi, int budget);
577 static void refill_rx_ring(struct net_device *dev);
578 static void netdev_error(struct net_device *dev, int intr_status);
579 static void set_rx_mode(struct net_device *dev);
580 static struct net_device_stats *get_stats(struct net_device *dev);
581 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
582 static int netdev_close(struct net_device *dev);
583 static void netdev_media_change(struct net_device *dev);
584 static const struct ethtool_ops ethtool_ops;
588 static int netdev_vlan_rx_add_vid(struct net_device *dev,
589 __be16 proto, u16 vid)
591 struct netdev_private *np = netdev_priv(dev);
593 spin_lock(&np->lock);
595 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
596 set_bit(vid, np->active_vlans);
598 spin_unlock(&np->lock);
603 static int netdev_vlan_rx_kill_vid(struct net_device *dev,
604 __be16 proto, u16 vid)
606 struct netdev_private *np = netdev_priv(dev);
608 spin_lock(&np->lock);
610 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
611 clear_bit(vid, np->active_vlans);
613 spin_unlock(&np->lock);
617 #endif /* VLAN_SUPPORT */
620 static const struct net_device_ops netdev_ops = {
621 .ndo_open = netdev_open,
622 .ndo_stop = netdev_close,
623 .ndo_start_xmit = start_tx,
624 .ndo_tx_timeout = tx_timeout,
625 .ndo_get_stats = get_stats,
626 .ndo_set_rx_mode = set_rx_mode,
627 .ndo_do_ioctl = netdev_ioctl,
628 .ndo_set_mac_address = eth_mac_addr,
629 .ndo_validate_addr = eth_validate_addr,
631 .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid,
632 .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid,
636 static int starfire_init_one(struct pci_dev *pdev,
637 const struct pci_device_id *ent)
639 struct device *d = &pdev->dev;
640 struct netdev_private *np;
641 int i, irq, chip_idx = ent->driver_data;
642 struct net_device *dev;
645 int drv_flags, io_size;
648 if (pci_enable_device (pdev))
651 ioaddr = pci_resource_start(pdev, 0);
652 io_size = pci_resource_len(pdev, 0);
653 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
654 dev_err(d, "no PCI MEM resources, aborting\n");
658 dev = alloc_etherdev(sizeof(*np));
662 SET_NETDEV_DEV(dev, &pdev->dev);
666 if (pci_request_regions (pdev, DRV_NAME)) {
667 dev_err(d, "cannot reserve PCI resources, aborting\n");
668 goto err_out_free_netdev;
671 base = ioremap(ioaddr, io_size);
673 dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
675 goto err_out_free_res;
678 pci_set_master(pdev);
680 /* enable MWI -- it vastly improves Rx performance on sparc64 */
681 pci_try_set_mwi(pdev);
684 /* Starfire can do TCP/UDP checksumming */
686 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
687 #endif /* ZEROCOPY */
690 dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
691 #endif /* VLAN_RX_KILL_VID */
693 dev->features |= NETIF_F_HIGHDMA;
694 #endif /* ADDR_64BITS */
696 /* Serial EEPROM reads are hidden by the hardware. */
697 for (i = 0; i < 6; i++)
698 dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
700 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
702 for (i = 0; i < 0x20; i++)
704 (unsigned int)readb(base + EEPROMCtrl + i),
705 i % 16 != 15 ? " " : "\n");
708 /* Issue soft reset */
709 writel(MiiSoftReset, base + TxMode);
711 writel(0, base + TxMode);
713 /* Reset the chip to erase previous misconfiguration. */
714 writel(1, base + PCIDeviceConfig);
716 while (--boguscnt > 0) {
718 if ((readl(base + PCIDeviceConfig) & 1) == 0)
722 printk("%s: chipset reset never completed!\n", dev->name);
723 /* wait a little longer */
726 np = netdev_priv(dev);
729 spin_lock_init(&np->lock);
730 pci_set_drvdata(pdev, dev);
734 np->mii_if.dev = dev;
735 np->mii_if.mdio_read = mdio_read;
736 np->mii_if.mdio_write = mdio_write;
737 np->mii_if.phy_id_mask = 0x1f;
738 np->mii_if.reg_num_mask = 0x1f;
740 drv_flags = netdrv_tbl[chip_idx].drv_flags;
744 /* timer resolution is 128 * 0.8us */
745 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
746 Timer10X | EnableIntrMasking;
748 if (small_frames > 0) {
749 np->intr_timer_ctrl |= SmallFrameBypass;
750 switch (small_frames) {
752 np->intr_timer_ctrl |= SmallFrame64;
755 np->intr_timer_ctrl |= SmallFrame128;
758 np->intr_timer_ctrl |= SmallFrame256;
761 np->intr_timer_ctrl |= SmallFrame512;
762 if (small_frames > 512)
763 printk("Adjusting small_frames down to 512\n");
768 dev->netdev_ops = &netdev_ops;
769 dev->watchdog_timeo = TX_TIMEOUT;
770 dev->ethtool_ops = ðtool_ops;
772 netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
777 if (register_netdev(dev))
778 goto err_out_cleardev;
780 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
781 dev->name, netdrv_tbl[chip_idx].name, base,
784 if (drv_flags & CanHaveMII) {
785 int phy, phy_idx = 0;
787 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
788 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
791 while (--boguscnt > 0)
792 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
795 printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
798 mii_status = mdio_read(dev, phy, MII_BMSR);
799 if (mii_status != 0) {
800 np->phys[phy_idx++] = phy;
801 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
802 printk(KERN_INFO "%s: MII PHY found at address %d, status "
803 "%#4.4x advertising %#4.4x.\n",
804 dev->name, phy, mii_status, np->mii_if.advertising);
805 /* there can be only one PHY on-board */
809 np->phy_cnt = phy_idx;
811 np->mii_if.phy_id = np->phys[0];
813 memset(&np->mii_if, 0, sizeof(np->mii_if));
816 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
817 dev->name, enable_hw_cksum ? "enabled" : "disabled");
823 pci_release_regions (pdev);
830 /* Read the MII Management Data I/O (MDIO) interfaces. */
831 static int mdio_read(struct net_device *dev, int phy_id, int location)
833 struct netdev_private *np = netdev_priv(dev);
834 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
835 int result, boguscnt=1000;
836 /* ??? Should we add a busy-wait here? */
838 result = readl(mdio_addr);
839 } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
842 if ((result & 0xffff) == 0xffff)
844 return result & 0xffff;
848 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
850 struct netdev_private *np = netdev_priv(dev);
851 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
852 writel(value, mdio_addr);
853 /* The busy-wait will occur before a read. */
857 static int netdev_open(struct net_device *dev)
859 const struct firmware *fw_rx, *fw_tx;
860 const __be32 *fw_rx_data, *fw_tx_data;
861 struct netdev_private *np = netdev_priv(dev);
862 void __iomem *ioaddr = np->base;
863 const int irq = np->pci_dev->irq;
865 size_t tx_size, rx_size;
866 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
868 /* Do we ever need to reset the chip??? */
870 retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
874 /* Disable the Rx and Tx, and reset the chip. */
875 writel(0, ioaddr + GenCtrl);
876 writel(1, ioaddr + PCIDeviceConfig);
878 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
881 /* Allocate the various queues. */
882 if (!np->queue_mem) {
883 tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
884 rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
885 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
886 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
887 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
888 np->queue_mem = dma_alloc_coherent(&np->pci_dev->dev,
890 &np->queue_mem_dma, GFP_ATOMIC);
891 if (np->queue_mem == NULL) {
896 np->tx_done_q = np->queue_mem;
897 np->tx_done_q_dma = np->queue_mem_dma;
898 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
899 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
900 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
901 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
902 np->rx_ring = (void *) np->tx_ring + tx_ring_size;
903 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
906 /* Start with no carrier, it gets adjusted later */
907 netif_carrier_off(dev);
909 /* Set the size of the Rx buffers. */
910 writel((np->rx_buf_sz << RxBufferLenShift) |
911 (0 << RxMinDescrThreshShift) |
912 RxPrefetchMode | RxVariableQ |
914 RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
916 ioaddr + RxDescQCtrl);
918 /* Set up the Rx DMA controller. */
919 writel(RxChecksumIgnore |
920 (0 << RxEarlyIntThreshShift) |
921 (6 << RxHighPrioThreshShift) |
922 ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
925 /* Set Tx descriptor */
926 writel((2 << TxHiPriFIFOThreshShift) |
927 (0 << TxPadLenShift) |
928 ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
929 TX_DESC_Q_ADDR_SIZE |
930 TX_DESC_SPACING | TX_DESC_TYPE,
931 ioaddr + TxDescCtrl);
933 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
934 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
935 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
936 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
937 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
939 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
940 writel(np->rx_done_q_dma |
942 (0 << RxComplThreshShift),
943 ioaddr + RxCompletionAddr);
946 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
948 /* Fill both the Tx SA register and the Rx perfect filter. */
949 for (i = 0; i < 6; i++)
950 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
951 /* The first entry is special because it bypasses the VLAN filter.
953 writew(0, ioaddr + PerfFilterTable);
954 writew(0, ioaddr + PerfFilterTable + 4);
955 writew(0, ioaddr + PerfFilterTable + 8);
956 for (i = 1; i < 16; i++) {
957 __be16 *eaddrs = (__be16 *)dev->dev_addr;
958 void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
959 writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
960 writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
961 writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
964 /* Initialize other registers. */
965 /* Configure the PCI bus bursts and FIFO thresholds. */
966 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
967 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
969 writel(np->tx_mode, ioaddr + TxMode);
970 np->tx_threshold = 4;
971 writel(np->tx_threshold, ioaddr + TxThreshold);
973 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
975 napi_enable(&np->napi);
977 netif_start_queue(dev);
980 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
983 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
986 /* Enable GPIO interrupts on link change */
987 writel(0x0f00ff00, ioaddr + GPIOCtrl);
989 /* Set the interrupt mask */
990 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
991 IntrTxDMADone | IntrStatsMax | IntrLinkChange |
992 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
993 ioaddr + IntrEnable);
994 /* Enable PCI interrupts. */
995 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
996 ioaddr + PCIDeviceConfig);
999 /* Set VLAN type to 802.1q */
1000 writel(ETH_P_8021Q, ioaddr + VlanType);
1001 #endif /* VLAN_SUPPORT */
1003 retval = reject_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1005 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1009 if (fw_rx->size % 4) {
1010 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1011 fw_rx->size, FIRMWARE_RX);
1015 retval = reject_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1017 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1021 if (fw_tx->size % 4) {
1022 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1023 fw_tx->size, FIRMWARE_TX);
1027 fw_rx_data = (const __be32 *)&fw_rx->data[0];
1028 fw_tx_data = (const __be32 *)&fw_tx->data[0];
1029 rx_size = fw_rx->size / 4;
1030 tx_size = fw_tx->size / 4;
1032 /* Load Rx/Tx firmware into the frame processors */
1033 for (i = 0; i < rx_size; i++)
1034 writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1035 for (i = 0; i < tx_size; i++)
1036 writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1037 if (enable_hw_cksum)
1038 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1039 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1041 /* Enable the Rx and Tx units only. */
1042 writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1045 printk(KERN_DEBUG "%s: Done netdev_open().\n",
1049 release_firmware(fw_tx);
1051 release_firmware(fw_rx);
1059 static void check_duplex(struct net_device *dev)
1061 struct netdev_private *np = netdev_priv(dev);
1063 int silly_count = 1000;
1065 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1066 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1068 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1071 printk("%s: MII reset failed!\n", dev->name);
1075 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1077 if (!np->mii_if.force_media) {
1078 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1080 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1082 reg0 |= BMCR_SPEED100;
1083 if (np->mii_if.full_duplex)
1084 reg0 |= BMCR_FULLDPLX;
1085 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1087 np->speed100 ? "100" : "10",
1088 np->mii_if.full_duplex ? "full" : "half");
1090 mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1094 static void tx_timeout(struct net_device *dev, unsigned int txqueue)
1096 struct netdev_private *np = netdev_priv(dev);
1097 void __iomem *ioaddr = np->base;
1100 printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1101 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1103 /* Perhaps we should reinitialize the hardware here. */
1106 * Stop and restart the interface.
1107 * Cheat and increase the debug level temporarily.
1115 /* Trigger an immediate transmit demand. */
1117 netif_trans_update(dev); /* prevent tx timeout */
1118 dev->stats.tx_errors++;
1119 netif_wake_queue(dev);
1123 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1124 static void init_ring(struct net_device *dev)
1126 struct netdev_private *np = netdev_priv(dev);
1129 np->cur_rx = np->cur_tx = np->reap_tx = 0;
1130 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1132 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1134 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1135 for (i = 0; i < RX_RING_SIZE; i++) {
1136 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1137 np->rx_info[i].skb = skb;
1140 np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev,
1144 if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) {
1146 np->rx_info[i].skb = NULL;
1149 /* Grrr, we cannot offset to correctly align the IP header. */
1150 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1152 writew(i - 1, np->base + RxDescQIdx);
1153 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1155 /* Clear the remainder of the Rx buffer ring. */
1156 for ( ; i < RX_RING_SIZE; i++) {
1157 np->rx_ring[i].rxaddr = 0;
1158 np->rx_info[i].skb = NULL;
1159 np->rx_info[i].mapping = 0;
1161 /* Mark the last entry as wrapping the ring. */
1162 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1164 /* Clear the completion rings. */
1165 for (i = 0; i < DONE_Q_SIZE; i++) {
1166 np->rx_done_q[i].status = 0;
1167 np->tx_done_q[i].status = 0;
1170 for (i = 0; i < TX_RING_SIZE; i++)
1171 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1175 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1177 struct netdev_private *np = netdev_priv(dev);
1179 unsigned int prev_tx;
1184 * be cautious here, wrapping the queue has weird semantics
1185 * and we may not have enough slots even when it seems we do.
1187 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1188 netif_stop_queue(dev);
1189 return NETDEV_TX_BUSY;
1192 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1193 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1194 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1195 return NETDEV_TX_OK;
1197 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1199 prev_tx = np->cur_tx;
1200 entry = np->cur_tx % TX_RING_SIZE;
1201 for (i = 0; i < skb_num_frags(skb); i++) {
1206 np->tx_info[entry].skb = skb;
1208 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1209 status |= TxRingWrap;
1213 status |= TxDescIntr;
1216 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1218 dev->stats.tx_compressed++;
1220 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1222 np->tx_info[entry].mapping =
1223 dma_map_single(&np->pci_dev->dev, skb->data,
1224 skb_first_frag_len(skb),
1227 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1228 status |= skb_frag_size(this_frag);
1229 np->tx_info[entry].mapping =
1230 dma_map_single(&np->pci_dev->dev,
1231 skb_frag_address(this_frag),
1232 skb_frag_size(this_frag),
1235 if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) {
1236 dev->stats.tx_dropped++;
1240 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1241 np->tx_ring[entry].status = cpu_to_le32(status);
1243 printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1244 dev->name, np->cur_tx, np->dirty_tx,
1247 np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1248 np->cur_tx += np->tx_info[entry].used_slots;
1251 np->tx_info[entry].used_slots = 1;
1252 np->cur_tx += np->tx_info[entry].used_slots;
1255 /* scavenge the tx descriptors twice per TX_RING_SIZE */
1256 if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1260 /* Non-x86: explicitly flush descriptor cache lines here. */
1261 /* Ensure all descriptors are written back before the transmit is
1265 /* Update the producer index. */
1266 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1268 /* 4 is arbitrary, but should be ok */
1269 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1270 netif_stop_queue(dev);
1272 return NETDEV_TX_OK;
1275 entry = prev_tx % TX_RING_SIZE;
1276 np->tx_info[entry].skb = NULL;
1278 dma_unmap_single(&np->pci_dev->dev,
1279 np->tx_info[entry].mapping,
1280 skb_first_frag_len(skb), DMA_TO_DEVICE);
1281 np->tx_info[entry].mapping = 0;
1282 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1283 for (j = 1; j < i; j++) {
1284 dma_unmap_single(&np->pci_dev->dev,
1285 np->tx_info[entry].mapping,
1286 skb_frag_size(&skb_shinfo(skb)->frags[j - 1]),
1291 dev_kfree_skb_any(skb);
1292 np->cur_tx = prev_tx;
1293 return NETDEV_TX_OK;
1296 /* The interrupt handler does all of the Rx thread work and cleans up
1297 after the Tx thread. */
1298 static irqreturn_t intr_handler(int irq, void *dev_instance)
1300 struct net_device *dev = dev_instance;
1301 struct netdev_private *np = netdev_priv(dev);
1302 void __iomem *ioaddr = np->base;
1303 int boguscnt = max_interrupt_work;
1309 u32 intr_status = readl(ioaddr + IntrClear);
1312 printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1313 dev->name, intr_status);
1315 if (intr_status == 0 || intr_status == (u32) -1)
1320 if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1323 if (likely(napi_schedule_prep(&np->napi))) {
1324 __napi_schedule(&np->napi);
1325 enable = readl(ioaddr + IntrEnable);
1326 enable &= ~(IntrRxDone | IntrRxEmpty);
1327 writel(enable, ioaddr + IntrEnable);
1328 /* flush PCI posting buffers */
1329 readl(ioaddr + IntrEnable);
1331 /* Paranoia check */
1332 enable = readl(ioaddr + IntrEnable);
1333 if (enable & (IntrRxDone | IntrRxEmpty)) {
1335 "%s: interrupt while in poll!\n",
1337 enable &= ~(IntrRxDone | IntrRxEmpty);
1338 writel(enable, ioaddr + IntrEnable);
1343 /* Scavenge the skbuff list based on the Tx-done queue.
1344 There are redundant checks here that may be cleaned up
1345 after the driver has proven to be reliable. */
1346 consumer = readl(ioaddr + TxConsumerIdx);
1348 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1349 dev->name, consumer);
1351 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1353 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1354 dev->name, np->dirty_tx, np->tx_done, tx_status);
1355 if ((tx_status & 0xe0000000) == 0xa0000000) {
1356 dev->stats.tx_packets++;
1357 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1358 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1359 struct sk_buff *skb = np->tx_info[entry].skb;
1360 np->tx_info[entry].skb = NULL;
1361 dma_unmap_single(&np->pci_dev->dev,
1362 np->tx_info[entry].mapping,
1363 skb_first_frag_len(skb),
1365 np->tx_info[entry].mapping = 0;
1366 np->dirty_tx += np->tx_info[entry].used_slots;
1367 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1370 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1371 dma_unmap_single(&np->pci_dev->dev,
1372 np->tx_info[entry].mapping,
1373 skb_frag_size(&skb_shinfo(skb)->frags[i]),
1380 dev_consume_skb_irq(skb);
1382 np->tx_done_q[np->tx_done].status = 0;
1383 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1385 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1387 if (netif_queue_stopped(dev) &&
1388 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1389 /* The ring is no longer full, wake the queue. */
1390 netif_wake_queue(dev);
1393 /* Stats overflow */
1394 if (intr_status & IntrStatsMax)
1397 /* Media change interrupt. */
1398 if (intr_status & IntrLinkChange)
1399 netdev_media_change(dev);
1401 /* Abnormal error summary/uncommon events handlers. */
1402 if (intr_status & IntrAbnormalSummary)
1403 netdev_error(dev, intr_status);
1405 if (--boguscnt < 0) {
1407 printk(KERN_WARNING "%s: Too much work at interrupt, "
1409 dev->name, intr_status);
1415 printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1416 dev->name, (int) readl(ioaddr + IntrStatus));
1417 return IRQ_RETVAL(handled);
1422 * This routine is logically part of the interrupt/poll handler, but separated
1423 * for clarity and better register allocation.
1425 static int __netdev_rx(struct net_device *dev, int *quota)
1427 struct netdev_private *np = netdev_priv(dev);
1431 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1432 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1433 struct sk_buff *skb;
1436 rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1439 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1440 if (!(desc_status & RxOK)) {
1441 /* There was an error. */
1443 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1444 dev->stats.rx_errors++;
1445 if (desc_status & RxFIFOErr)
1446 dev->stats.rx_fifo_errors++;
1450 if (*quota <= 0) { /* out of rx quota */
1456 pkt_len = desc_status; /* Implicitly Truncate */
1457 entry = (desc_status >> 16) & 0x7ff;
1460 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1461 /* Check if the packet is long enough to accept without copying
1462 to a minimally-sized skbuff. */
1463 if (pkt_len < rx_copybreak &&
1464 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1465 skb_reserve(skb, 2); /* 16 byte align the IP header */
1466 dma_sync_single_for_cpu(&np->pci_dev->dev,
1467 np->rx_info[entry].mapping,
1468 pkt_len, DMA_FROM_DEVICE);
1469 skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1470 dma_sync_single_for_device(&np->pci_dev->dev,
1471 np->rx_info[entry].mapping,
1472 pkt_len, DMA_FROM_DEVICE);
1473 skb_put(skb, pkt_len);
1475 dma_unmap_single(&np->pci_dev->dev,
1476 np->rx_info[entry].mapping,
1477 np->rx_buf_sz, DMA_FROM_DEVICE);
1478 skb = np->rx_info[entry].skb;
1479 skb_put(skb, pkt_len);
1480 np->rx_info[entry].skb = NULL;
1481 np->rx_info[entry].mapping = 0;
1483 #ifndef final_version /* Remove after testing. */
1484 /* You will want this info for the initial debug. */
1486 printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n",
1487 skb->data, skb->data + 6,
1488 skb->data[12], skb->data[13]);
1492 skb->protocol = eth_type_trans(skb, dev);
1495 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1497 if (le16_to_cpu(desc->status2) & 0x0100) {
1498 skb->ip_summed = CHECKSUM_UNNECESSARY;
1499 dev->stats.rx_compressed++;
1502 * This feature doesn't seem to be working, at least
1503 * with the two firmware versions I have. If the GFP sees
1504 * an IP fragment, it either ignores it completely, or reports
1505 * "bad checksum" on it.
1507 * Maybe I missed something -- corrections are welcome.
1508 * Until then, the printk stays. :-) -Ion
1510 else if (le16_to_cpu(desc->status2) & 0x0040) {
1511 skb->ip_summed = CHECKSUM_COMPLETE;
1512 skb->csum = le16_to_cpu(desc->csum);
1513 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1516 if (le16_to_cpu(desc->status2) & 0x0200) {
1517 u16 vlid = le16_to_cpu(desc->vlanid);
1520 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
1523 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
1525 #endif /* VLAN_SUPPORT */
1526 netif_receive_skb(skb);
1527 dev->stats.rx_packets++;
1532 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1535 if (*quota == 0) { /* out of rx quota */
1539 writew(np->rx_done, np->base + CompletionQConsumerIdx);
1542 refill_rx_ring(dev);
1544 printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1545 retcode, np->rx_done, desc_status);
1549 static int netdev_poll(struct napi_struct *napi, int budget)
1551 struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1552 struct net_device *dev = np->dev;
1554 void __iomem *ioaddr = np->base;
1558 writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1560 if (__netdev_rx(dev, "a))
1563 intr_status = readl(ioaddr + IntrStatus);
1564 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1566 napi_complete(napi);
1567 intr_status = readl(ioaddr + IntrEnable);
1568 intr_status |= IntrRxDone | IntrRxEmpty;
1569 writel(intr_status, ioaddr + IntrEnable);
1573 printk(KERN_DEBUG " exiting netdev_poll(): %d.\n",
1576 /* Restart Rx engine if stopped. */
1577 return budget - quota;
1580 static void refill_rx_ring(struct net_device *dev)
1582 struct netdev_private *np = netdev_priv(dev);
1583 struct sk_buff *skb;
1586 /* Refill the Rx ring buffers. */
1587 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1588 entry = np->dirty_rx % RX_RING_SIZE;
1589 if (np->rx_info[entry].skb == NULL) {
1590 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1591 np->rx_info[entry].skb = skb;
1593 break; /* Better luck next round. */
1594 np->rx_info[entry].mapping =
1595 dma_map_single(&np->pci_dev->dev, skb->data,
1596 np->rx_buf_sz, DMA_FROM_DEVICE);
1597 if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) {
1599 np->rx_info[entry].skb = NULL;
1602 np->rx_ring[entry].rxaddr =
1603 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1605 if (entry == RX_RING_SIZE - 1)
1606 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1609 writew(entry, np->base + RxDescQIdx);
1613 static void netdev_media_change(struct net_device *dev)
1615 struct netdev_private *np = netdev_priv(dev);
1616 void __iomem *ioaddr = np->base;
1617 u16 reg0, reg1, reg4, reg5;
1619 u32 new_intr_timer_ctrl;
1621 /* reset status first */
1622 mdio_read(dev, np->phys[0], MII_BMCR);
1623 mdio_read(dev, np->phys[0], MII_BMSR);
1625 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1626 reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1628 if (reg1 & BMSR_LSTATUS) {
1630 if (reg0 & BMCR_ANENABLE) {
1631 /* autonegotiation is enabled */
1632 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1633 reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1634 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1636 np->mii_if.full_duplex = 1;
1637 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1639 np->mii_if.full_duplex = 0;
1640 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1642 np->mii_if.full_duplex = 1;
1645 np->mii_if.full_duplex = 0;
1648 /* autonegotiation is disabled */
1649 if (reg0 & BMCR_SPEED100)
1653 if (reg0 & BMCR_FULLDPLX)
1654 np->mii_if.full_duplex = 1;
1656 np->mii_if.full_duplex = 0;
1658 netif_carrier_on(dev);
1659 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1661 np->speed100 ? "100" : "10",
1662 np->mii_if.full_duplex ? "full" : "half");
1664 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
1665 if (np->mii_if.full_duplex)
1666 new_tx_mode |= FullDuplex;
1667 if (np->tx_mode != new_tx_mode) {
1668 np->tx_mode = new_tx_mode;
1669 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1671 writel(np->tx_mode, ioaddr + TxMode);
1674 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1676 new_intr_timer_ctrl |= Timer10X;
1677 if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1678 np->intr_timer_ctrl = new_intr_timer_ctrl;
1679 writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1682 netif_carrier_off(dev);
1683 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1688 static void netdev_error(struct net_device *dev, int intr_status)
1690 struct netdev_private *np = netdev_priv(dev);
1692 /* Came close to underrunning the Tx FIFO, increase threshold. */
1693 if (intr_status & IntrTxDataLow) {
1694 if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1695 writel(++np->tx_threshold, np->base + TxThreshold);
1696 printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1697 dev->name, np->tx_threshold * 16);
1699 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1701 if (intr_status & IntrRxGFPDead) {
1702 dev->stats.rx_fifo_errors++;
1703 dev->stats.rx_errors++;
1705 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1706 dev->stats.tx_fifo_errors++;
1707 dev->stats.tx_errors++;
1709 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1710 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1711 dev->name, intr_status);
1715 static struct net_device_stats *get_stats(struct net_device *dev)
1717 struct netdev_private *np = netdev_priv(dev);
1718 void __iomem *ioaddr = np->base;
1720 /* This adapter architecture needs no SMP locks. */
1721 dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1722 dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1723 dev->stats.tx_packets = readl(ioaddr + 0x57000);
1724 dev->stats.tx_aborted_errors =
1725 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1726 dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1727 dev->stats.collisions =
1728 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1730 /* The chip only need report frame silently dropped. */
1731 dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1732 writew(0, ioaddr + RxDMAStatus);
1733 dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1734 dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1735 dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1736 dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1742 static u32 set_vlan_mode(struct netdev_private *np)
1746 void __iomem *filter_addr = np->base + HashTable + 8;
1749 for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
1750 if (vlan_count == 32)
1752 writew(vid, filter_addr);
1756 if (vlan_count == 32) {
1757 ret |= PerfectFilterVlan;
1758 while (vlan_count < 32) {
1759 writew(0, filter_addr);
1766 #endif /* VLAN_SUPPORT */
1768 static void set_rx_mode(struct net_device *dev)
1770 struct netdev_private *np = netdev_priv(dev);
1771 void __iomem *ioaddr = np->base;
1772 u32 rx_mode = MinVLANPrio;
1773 struct netdev_hw_addr *ha;
1777 rx_mode |= set_vlan_mode(np);
1778 #endif /* VLAN_SUPPORT */
1780 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1781 rx_mode |= AcceptAll;
1782 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1783 (dev->flags & IFF_ALLMULTI)) {
1784 /* Too many to match, or accept all multicasts. */
1785 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1786 } else if (netdev_mc_count(dev) <= 14) {
1787 /* Use the 16 element perfect filter, skip first two entries. */
1788 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1790 netdev_for_each_mc_addr(ha, dev) {
1791 eaddrs = (__be16 *) ha->addr;
1792 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1793 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1794 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1796 eaddrs = (__be16 *)dev->dev_addr;
1797 i = netdev_mc_count(dev) + 2;
1799 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1800 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1801 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1803 rx_mode |= AcceptBroadcast|PerfectFilter;
1805 /* Must use a multicast hash table. */
1806 void __iomem *filter_addr;
1808 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1810 memset(mc_filter, 0, sizeof(mc_filter));
1811 netdev_for_each_mc_addr(ha, dev) {
1812 /* The chip uses the upper 9 CRC bits
1813 as index into the hash table */
1814 int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1815 __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1817 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1819 /* Clear the perfect filter list, skip first two entries. */
1820 filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1821 eaddrs = (__be16 *)dev->dev_addr;
1822 for (i = 2; i < 16; i++) {
1823 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1824 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1825 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1827 for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1828 writew(mc_filter[i], filter_addr);
1829 rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1831 writel(rx_mode, ioaddr + RxFilterMode);
1834 static int check_if_running(struct net_device *dev)
1836 if (!netif_running(dev))
1841 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1843 struct netdev_private *np = netdev_priv(dev);
1844 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1845 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1848 static int get_link_ksettings(struct net_device *dev,
1849 struct ethtool_link_ksettings *cmd)
1851 struct netdev_private *np = netdev_priv(dev);
1852 spin_lock_irq(&np->lock);
1853 mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1854 spin_unlock_irq(&np->lock);
1858 static int set_link_ksettings(struct net_device *dev,
1859 const struct ethtool_link_ksettings *cmd)
1861 struct netdev_private *np = netdev_priv(dev);
1863 spin_lock_irq(&np->lock);
1864 res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1865 spin_unlock_irq(&np->lock);
1870 static int nway_reset(struct net_device *dev)
1872 struct netdev_private *np = netdev_priv(dev);
1873 return mii_nway_restart(&np->mii_if);
1876 static u32 get_link(struct net_device *dev)
1878 struct netdev_private *np = netdev_priv(dev);
1879 return mii_link_ok(&np->mii_if);
1882 static u32 get_msglevel(struct net_device *dev)
1887 static void set_msglevel(struct net_device *dev, u32 val)
1892 static const struct ethtool_ops ethtool_ops = {
1893 .begin = check_if_running,
1894 .get_drvinfo = get_drvinfo,
1895 .nway_reset = nway_reset,
1896 .get_link = get_link,
1897 .get_msglevel = get_msglevel,
1898 .set_msglevel = set_msglevel,
1899 .get_link_ksettings = get_link_ksettings,
1900 .set_link_ksettings = set_link_ksettings,
1903 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1905 struct netdev_private *np = netdev_priv(dev);
1906 struct mii_ioctl_data *data = if_mii(rq);
1909 if (!netif_running(dev))
1912 spin_lock_irq(&np->lock);
1913 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1914 spin_unlock_irq(&np->lock);
1916 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1922 static int netdev_close(struct net_device *dev)
1924 struct netdev_private *np = netdev_priv(dev);
1925 void __iomem *ioaddr = np->base;
1928 netif_stop_queue(dev);
1930 napi_disable(&np->napi);
1933 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1934 dev->name, (int) readl(ioaddr + IntrStatus));
1935 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1936 dev->name, np->cur_tx, np->dirty_tx,
1937 np->cur_rx, np->dirty_rx);
1940 /* Disable interrupts by clearing the interrupt mask. */
1941 writel(0, ioaddr + IntrEnable);
1943 /* Stop the chip's Tx and Rx processes. */
1944 writel(0, ioaddr + GenCtrl);
1945 readl(ioaddr + GenCtrl);
1948 printk(KERN_DEBUG" Tx ring at %#llx:\n",
1949 (long long) np->tx_ring_dma);
1950 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1951 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1952 i, le32_to_cpu(np->tx_ring[i].status),
1953 (long long) dma_to_cpu(np->tx_ring[i].addr),
1954 le32_to_cpu(np->tx_done_q[i].status));
1955 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
1956 (long long) np->rx_ring_dma, np->rx_done_q);
1958 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1959 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1960 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1964 free_irq(np->pci_dev->irq, dev);
1966 /* Free all the skbuffs in the Rx queue. */
1967 for (i = 0; i < RX_RING_SIZE; i++) {
1968 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1969 if (np->rx_info[i].skb != NULL) {
1970 dma_unmap_single(&np->pci_dev->dev,
1971 np->rx_info[i].mapping,
1972 np->rx_buf_sz, DMA_FROM_DEVICE);
1973 dev_kfree_skb(np->rx_info[i].skb);
1975 np->rx_info[i].skb = NULL;
1976 np->rx_info[i].mapping = 0;
1978 for (i = 0; i < TX_RING_SIZE; i++) {
1979 struct sk_buff *skb = np->tx_info[i].skb;
1982 dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping,
1983 skb_first_frag_len(skb), DMA_TO_DEVICE);
1984 np->tx_info[i].mapping = 0;
1986 np->tx_info[i].skb = NULL;
1992 static int __maybe_unused starfire_suspend(struct device *dev_d)
1994 struct net_device *dev = dev_get_drvdata(dev_d);
1996 if (netif_running(dev)) {
1997 netif_device_detach(dev);
2004 static int __maybe_unused starfire_resume(struct device *dev_d)
2006 struct net_device *dev = dev_get_drvdata(dev_d);
2008 if (netif_running(dev)) {
2010 netif_device_attach(dev);
2016 static void starfire_remove_one(struct pci_dev *pdev)
2018 struct net_device *dev = pci_get_drvdata(pdev);
2019 struct netdev_private *np = netdev_priv(dev);
2023 unregister_netdev(dev);
2026 dma_free_coherent(&pdev->dev, np->queue_mem_size,
2027 np->queue_mem, np->queue_mem_dma);
2030 /* XXX: add wakeup code -- requires firmware for MagicPacket */
2031 pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
2032 pci_disable_device(pdev);
2035 pci_release_regions(pdev);
2037 free_netdev(dev); /* Will also free np!! */
2040 static SIMPLE_DEV_PM_OPS(starfire_pm_ops, starfire_suspend, starfire_resume);
2042 static struct pci_driver starfire_driver = {
2044 .probe = starfire_init_one,
2045 .remove = starfire_remove_one,
2046 .driver.pm = &starfire_pm_ops,
2047 .id_table = starfire_pci_tbl,
2051 static int __init starfire_init (void)
2053 /* when a module, this is printed whether or not devices are found in probe */
2055 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2058 BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2060 return pci_register_driver(&starfire_driver);
2064 static void __exit starfire_cleanup (void)
2066 pci_unregister_driver (&starfire_driver);
2070 module_init(starfire_init);
2071 module_exit(starfire_cleanup);