1 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
3 Written 1998-2000 by Donald Becker.
5 Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
6 send all bug reports to me, and not to Donald Becker, as this code
7 has been heavily modified from Donald's original version.
9 This software may be used and distributed according to the terms of
10 the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on or derived from this code fall under the GPL and must
12 retain the authorship, copyright and license notice. This file is not
13 a complete program and may only be used when the entire operating
14 system is licensed under the GPL.
16 The information below comes from Donald Becker's original driver:
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
23 Support and updates available at
24 http://www.scyld.com/network/starfire.html
25 [link no longer provides useful info -jgarzik]
29 #define DRV_NAME "starfire"
31 #include <linux/interrupt.h>
32 #include <linux/module.h>
33 #include <linux/kernel.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/init.h>
38 #include <linux/delay.h>
39 #include <linux/crc32.h>
40 #include <linux/ethtool.h>
41 #include <linux/mii.h>
42 #include <linux/if_vlan.h>
44 #include <linux/firmware.h>
45 #include <asm/processor.h> /* Processor type for cache alignment. */
46 #include <linux/uaccess.h>
50 * The current frame processor firmware fails to checksum a fragment
51 * of length 1. If and when this is fixed, the #define below can be removed.
53 #define HAS_BROKEN_FIRMWARE
56 * If using the broken firmware, data must be padded to the next 32-bit boundary.
58 #ifdef HAS_BROKEN_FIRMWARE
59 #define PADDING_MASK 3
63 * Define this if using the driver with the zero-copy patch
67 #if IS_ENABLED(CONFIG_VLAN_8021Q)
71 /* The user-configurable values.
72 These may be modified when a driver module is loaded.*/
74 /* Used for tuning interrupt latency vs. overhead. */
75 static int intr_latency;
76 static int small_frames;
78 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
79 static int max_interrupt_work = 20;
81 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
82 The Starfire has a 512 element hash table based on the Ethernet CRC. */
83 static const int multicast_filter_limit = 512;
84 /* Whether to do TCP/UDP checksums in hardware */
85 static int enable_hw_cksum = 1;
87 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
89 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
90 * Setting to > 1518 effectively disables this feature.
93 * The ia64 doesn't allow for unaligned loads even of integers being
94 * misaligned on a 2 byte boundary. Thus always force copying of
95 * packets as the starfire doesn't allow for misaligned DMAs ;-(
98 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
99 * at least, having unaligned frames leads to a rather serious performance
102 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
103 static int rx_copybreak = PKT_BUF_SZ;
105 static int rx_copybreak /* = 0 */;
108 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
110 #define DMA_BURST_SIZE 64
112 #define DMA_BURST_SIZE 128
115 /* Operational parameters that are set at compile time. */
117 /* The "native" ring sizes are either 256 or 2048.
118 However in some modes a descriptor may be marked to wrap the ring earlier.
120 #define RX_RING_SIZE 256
121 #define TX_RING_SIZE 32
122 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
123 #define DONE_Q_SIZE 1024
124 /* All queues must be aligned on a 256-byte boundary */
125 #define QUEUE_ALIGN 256
127 #if RX_RING_SIZE > 256
128 #define RX_Q_ENTRIES Rx2048QEntries
130 #define RX_Q_ENTRIES Rx256QEntries
133 /* Operational parameters that usually are not changed. */
134 /* Time in jiffies before concluding the transmitter is hung. */
135 #define TX_TIMEOUT (2 * HZ)
137 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
138 /* 64-bit dma_addr_t */
139 #define ADDR_64BITS /* This chip uses 64 bit addresses. */
140 #define netdrv_addr_t __le64
141 #define cpu_to_dma(x) cpu_to_le64(x)
142 #define dma_to_cpu(x) le64_to_cpu(x)
143 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
144 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
145 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
146 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
147 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
148 #else /* 32-bit dma_addr_t */
149 #define netdrv_addr_t __le32
150 #define cpu_to_dma(x) cpu_to_le32(x)
151 #define dma_to_cpu(x) le32_to_cpu(x)
152 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
153 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
154 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
155 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
156 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
159 #define skb_first_frag_len(skb) skb_headlen(skb)
160 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
163 #define FIRMWARE_RX "/*(DEBLOBBED)*/"
164 #define FIRMWARE_TX "/*(DEBLOBBED)*/"
166 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
167 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
168 MODULE_LICENSE("GPL");
171 module_param(max_interrupt_work, int, 0);
172 module_param(mtu, int, 0);
173 module_param(debug, int, 0);
174 module_param(rx_copybreak, int, 0);
175 module_param(intr_latency, int, 0);
176 module_param(small_frames, int, 0);
177 module_param(enable_hw_cksum, int, 0);
178 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
179 MODULE_PARM_DESC(mtu, "MTU (all boards)");
180 MODULE_PARM_DESC(debug, "Debug level (0-6)");
181 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
182 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
183 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
184 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
189 I. Board Compatibility
191 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
193 II. Board-specific settings
195 III. Driver operation
199 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
200 ring sizes are set fixed by the hardware, but may optionally be wrapped
201 earlier by the END bit in the descriptor.
202 This driver uses that hardware queue size for the Rx ring, where a large
203 number of entries has no ill effect beyond increases the potential backlog.
204 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
205 disables the queue layer priority ordering and we have no mechanism to
206 utilize the hardware two-level priority queue. When modifying the
207 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
210 IIIb/c. Transmit/Receive Structure
212 See the Adaptec manual for the many possible structures, and options for
213 each structure. There are far too many to document all of them here.
215 For transmit this driver uses type 0/1 transmit descriptors (depending
216 on the 32/64 bitness of the architecture), and relies on automatic
217 minimum-length padding. It does not use the completion queue
218 consumer index, but instead checks for non-zero status entries.
220 For receive this driver uses type 2/3 receive descriptors. The driver
221 allocates full frame size skbuffs for the Rx ring buffers, so all frames
222 should fit in a single descriptor. The driver does not use the completion
223 queue consumer index, but instead checks for non-zero status entries.
225 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
226 is allocated and the frame is copied to the new skbuff. When the incoming
227 frame is larger, the skbuff is passed directly up the protocol stack.
228 Buffers consumed this way are replaced by newly allocated skbuffs in a later
231 A notable aspect of operation is that unaligned buffers are not permitted by
232 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
233 isn't longword aligned, which may cause problems on some machine
234 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
235 the frame into a new skbuff unconditionally. Copied frames are put into the
236 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
238 IIId. Synchronization
240 The driver runs as two independent, single-threaded flows of control. One
241 is the send-packet routine, which enforces single-threaded use by the
242 dev->tbusy flag. The other thread is the interrupt handler, which is single
243 threaded by the hardware and interrupt handling software.
245 The send packet thread has partial control over the Tx ring and the netif_queue
246 status. If the number of free Tx slots in the ring falls below a certain number
247 (currently hardcoded to 4), it signals the upper layer to stop the queue.
249 The interrupt handler has exclusive control over the Rx ring and records stats
250 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
251 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
252 number of free Tx slow is above the threshold, it signals the upper layer to
259 The Adaptec Starfire manuals, available only from Adaptec.
260 http://www.scyld.com/expert/100mbps.html
261 http://www.scyld.com/expert/NWay.html
265 - StopOnPerr is broken, don't enable
266 - Hardware ethernet padding exposes random data, perform software padding
267 instead (unverified -- works correctly for all the hardware I have)
273 enum chip_capability_flags {CanHaveMII=1, };
279 static const struct pci_device_id starfire_pci_tbl[] = {
280 { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
283 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
285 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
286 static const struct chip_info {
290 { "Adaptec Starfire 6915", CanHaveMII },
294 /* Offsets to the device registers.
295 Unlike software-only systems, device drivers interact with complex hardware.
296 It's not useful to define symbolic names for every register bit in the
297 device. The name can only partially document the semantics and make
298 the driver longer and more difficult to read.
299 In general, only the important configuration values or bits changed
300 multiple times should be defined symbolically.
302 enum register_offsets {
303 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
304 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
305 MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
306 GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
307 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
308 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
309 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
311 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
312 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
313 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
314 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
315 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
316 TxMode=0x55000, VlanType=0x55064,
317 PerfFilterTable=0x56000, HashTable=0x56100,
318 TxGfpMem=0x58000, RxGfpMem=0x5a000,
322 * Bits in the interrupt status/mask registers.
323 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
324 * enables all the interrupt sources that are or'ed into those status bits.
326 enum intr_status_bits {
327 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
328 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
329 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
330 IntrTxComplQLow=0x200000, IntrPCI=0x100000,
331 IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
332 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
333 IntrNormalSummary=0x8000, IntrTxDone=0x4000,
334 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
335 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
336 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
337 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
338 IntrNoTxCsum=0x20, IntrTxBadID=0x10,
339 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
340 IntrTxGfp=0x02, IntrPCIPad=0x01,
342 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
343 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
344 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
347 /* Bits in the RxFilterMode register. */
349 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
350 AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
351 PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
355 /* Bits in the TxMode register */
357 MiiSoftReset=0x8000, MIILoopback=0x4000,
358 TxFlowEnable=0x0800, RxFlowEnable=0x0400,
359 PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
362 /* Bits in the TxDescCtrl register. */
364 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
365 TxDescSpace128=0x30, TxDescSpace256=0x40,
366 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
367 TxDescType3=0x03, TxDescType4=0x04,
368 TxNoDMACompletion=0x08,
369 TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
370 TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
371 TxDMABurstSizeShift=8,
374 /* Bits in the RxDescQCtrl register. */
376 RxBufferLenShift=16, RxMinDescrThreshShift=0,
377 RxPrefetchMode=0x8000, RxVariableQ=0x2000,
378 Rx2048QEntries=0x4000, Rx256QEntries=0,
379 RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
380 RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
381 RxDescSpace4=0x000, RxDescSpace8=0x100,
382 RxDescSpace16=0x200, RxDescSpace32=0x300,
383 RxDescSpace64=0x400, RxDescSpace128=0x500,
387 /* Bits in the RxDMACtrl register. */
388 enum rx_dmactrl_bits {
389 RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
390 RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
391 RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
392 RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
393 RxChecksumRejectTCPOnly=0x01000000,
394 RxCompletionQ2Enable=0x800000,
395 RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
396 RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
397 RxDMAQ2NonIP=0x400000,
398 RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
399 RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
403 /* Bits in the RxCompletionAddr register */
405 RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
406 RxComplProducerWrEn=0x40,
407 RxComplType0=0x00, RxComplType1=0x10,
408 RxComplType2=0x20, RxComplType3=0x30,
409 RxComplThreshShift=0,
412 /* Bits in the TxCompletionAddr register */
414 TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
415 TxComplProducerWrEn=0x40,
416 TxComplIntrStatus=0x20,
417 CommonQueueMode=0x10,
418 TxComplThreshShift=0,
421 /* Bits in the GenCtrl register */
423 RxEnable=0x05, TxEnable=0x0a,
424 RxGFPEnable=0x10, TxGFPEnable=0x20,
427 /* Bits in the IntrTimerCtrl register */
428 enum intr_ctrl_bits {
429 Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
430 SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
431 IntrLatencyMask=0x1f,
434 /* The Rx and Tx buffer descriptors. */
435 struct starfire_rx_desc {
436 netdrv_addr_t rxaddr;
439 RxDescValid=1, RxDescEndRing=2,
442 /* Completion queue entry. */
443 struct short_rx_done_desc {
444 __le32 status; /* Low 16 bits is length. */
446 struct basic_rx_done_desc {
447 __le32 status; /* Low 16 bits is length. */
451 struct csum_rx_done_desc {
452 __le32 status; /* Low 16 bits is length. */
453 __le16 csum; /* Partial checksum */
456 struct full_rx_done_desc {
457 __le32 status; /* Low 16 bits is length. */
461 __le16 csum; /* partial checksum */
464 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
466 typedef struct full_rx_done_desc rx_done_desc;
467 #define RxComplType RxComplType3
468 #else /* not VLAN_SUPPORT */
469 typedef struct csum_rx_done_desc rx_done_desc;
470 #define RxComplType RxComplType2
471 #endif /* not VLAN_SUPPORT */
474 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
477 /* Type 1 Tx descriptor. */
478 struct starfire_tx_desc_1 {
479 __le32 status; /* Upper bits are status, lower 16 length. */
483 /* Type 2 Tx descriptor. */
484 struct starfire_tx_desc_2 {
485 __le32 status; /* Upper bits are status, lower 16 length. */
491 typedef struct starfire_tx_desc_2 starfire_tx_desc;
492 #define TX_DESC_TYPE TxDescType2
493 #else /* not ADDR_64BITS */
494 typedef struct starfire_tx_desc_1 starfire_tx_desc;
495 #define TX_DESC_TYPE TxDescType1
496 #endif /* not ADDR_64BITS */
497 #define TX_DESC_SPACING TxDescSpaceUnlim
501 TxCRCEn=0x01000000, TxDescIntr=0x08000000,
502 TxRingWrap=0x04000000, TxCalTCP=0x02000000,
504 struct tx_done_desc {
505 __le32 status; /* timestamp, index. */
507 __le32 intrstatus; /* interrupt status */
511 struct rx_ring_info {
515 struct tx_ring_info {
518 unsigned int used_slots;
522 struct netdev_private {
523 /* Descriptor rings first for alignment. */
524 struct starfire_rx_desc *rx_ring;
525 starfire_tx_desc *tx_ring;
526 dma_addr_t rx_ring_dma;
527 dma_addr_t tx_ring_dma;
528 /* The addresses of rx/tx-in-place skbuffs. */
529 struct rx_ring_info rx_info[RX_RING_SIZE];
530 struct tx_ring_info tx_info[TX_RING_SIZE];
531 /* Pointers to completion queues (full pages). */
532 rx_done_desc *rx_done_q;
533 dma_addr_t rx_done_q_dma;
534 unsigned int rx_done;
535 struct tx_done_desc *tx_done_q;
536 dma_addr_t tx_done_q_dma;
537 unsigned int tx_done;
538 struct napi_struct napi;
539 struct net_device *dev;
540 struct pci_dev *pci_dev;
542 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
545 dma_addr_t queue_mem_dma;
546 size_t queue_mem_size;
548 /* Frequently used values: keep some adjacent for cache effect. */
550 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
551 unsigned int cur_tx, dirty_tx, reap_tx;
552 unsigned int rx_buf_sz; /* Based on MTU+slack. */
553 /* These values keep track of the transceiver/media in use. */
554 int speed100; /* Set if speed == 100MBit. */
558 /* MII transceiver section. */
559 struct mii_if_info mii_if; /* MII lib hooks/info */
560 int phy_cnt; /* MII device addresses. */
561 unsigned char phys[PHY_CNT]; /* MII device addresses. */
566 static int mdio_read(struct net_device *dev, int phy_id, int location);
567 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
568 static int netdev_open(struct net_device *dev);
569 static void check_duplex(struct net_device *dev);
570 static void tx_timeout(struct net_device *dev, unsigned int txqueue);
571 static void init_ring(struct net_device *dev);
572 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
573 static irqreturn_t intr_handler(int irq, void *dev_instance);
574 static void netdev_error(struct net_device *dev, int intr_status);
575 static int __netdev_rx(struct net_device *dev, int *quota);
576 static int netdev_poll(struct napi_struct *napi, int budget);
577 static void refill_rx_ring(struct net_device *dev);
578 static void netdev_error(struct net_device *dev, int intr_status);
579 static void set_rx_mode(struct net_device *dev);
580 static struct net_device_stats *get_stats(struct net_device *dev);
581 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
582 static int netdev_close(struct net_device *dev);
583 static void netdev_media_change(struct net_device *dev);
584 static const struct ethtool_ops ethtool_ops;
588 static int netdev_vlan_rx_add_vid(struct net_device *dev,
589 __be16 proto, u16 vid)
591 struct netdev_private *np = netdev_priv(dev);
593 spin_lock(&np->lock);
595 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
596 set_bit(vid, np->active_vlans);
598 spin_unlock(&np->lock);
603 static int netdev_vlan_rx_kill_vid(struct net_device *dev,
604 __be16 proto, u16 vid)
606 struct netdev_private *np = netdev_priv(dev);
608 spin_lock(&np->lock);
610 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
611 clear_bit(vid, np->active_vlans);
613 spin_unlock(&np->lock);
617 #endif /* VLAN_SUPPORT */
620 static const struct net_device_ops netdev_ops = {
621 .ndo_open = netdev_open,
622 .ndo_stop = netdev_close,
623 .ndo_start_xmit = start_tx,
624 .ndo_tx_timeout = tx_timeout,
625 .ndo_get_stats = get_stats,
626 .ndo_set_rx_mode = set_rx_mode,
627 .ndo_eth_ioctl = netdev_ioctl,
628 .ndo_set_mac_address = eth_mac_addr,
629 .ndo_validate_addr = eth_validate_addr,
631 .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid,
632 .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid,
636 static int starfire_init_one(struct pci_dev *pdev,
637 const struct pci_device_id *ent)
639 struct device *d = &pdev->dev;
640 struct netdev_private *np;
641 int i, irq, chip_idx = ent->driver_data;
642 struct net_device *dev;
646 int drv_flags, io_size;
649 if (pci_enable_device (pdev))
652 ioaddr = pci_resource_start(pdev, 0);
653 io_size = pci_resource_len(pdev, 0);
654 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
655 dev_err(d, "no PCI MEM resources, aborting\n");
659 dev = alloc_etherdev(sizeof(*np));
663 SET_NETDEV_DEV(dev, &pdev->dev);
667 if (pci_request_regions (pdev, DRV_NAME)) {
668 dev_err(d, "cannot reserve PCI resources, aborting\n");
669 goto err_out_free_netdev;
672 base = ioremap(ioaddr, io_size);
674 dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
676 goto err_out_free_res;
679 pci_set_master(pdev);
681 /* enable MWI -- it vastly improves Rx performance on sparc64 */
682 pci_try_set_mwi(pdev);
685 /* Starfire can do TCP/UDP checksumming */
687 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
688 #endif /* ZEROCOPY */
691 dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
692 #endif /* VLAN_RX_KILL_VID */
694 dev->features |= NETIF_F_HIGHDMA;
695 #endif /* ADDR_64BITS */
697 /* Serial EEPROM reads are hidden by the hardware. */
698 for (i = 0; i < 6; i++)
699 addr[i] = readb(base + EEPROMCtrl + 20 - i);
700 eth_hw_addr_set(dev, addr);
702 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
704 for (i = 0; i < 0x20; i++)
706 (unsigned int)readb(base + EEPROMCtrl + i),
707 i % 16 != 15 ? " " : "\n");
710 /* Issue soft reset */
711 writel(MiiSoftReset, base + TxMode);
713 writel(0, base + TxMode);
715 /* Reset the chip to erase previous misconfiguration. */
716 writel(1, base + PCIDeviceConfig);
718 while (--boguscnt > 0) {
720 if ((readl(base + PCIDeviceConfig) & 1) == 0)
724 printk("%s: chipset reset never completed!\n", dev->name);
725 /* wait a little longer */
728 np = netdev_priv(dev);
731 spin_lock_init(&np->lock);
732 pci_set_drvdata(pdev, dev);
736 np->mii_if.dev = dev;
737 np->mii_if.mdio_read = mdio_read;
738 np->mii_if.mdio_write = mdio_write;
739 np->mii_if.phy_id_mask = 0x1f;
740 np->mii_if.reg_num_mask = 0x1f;
742 drv_flags = netdrv_tbl[chip_idx].drv_flags;
746 /* timer resolution is 128 * 0.8us */
747 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
748 Timer10X | EnableIntrMasking;
750 if (small_frames > 0) {
751 np->intr_timer_ctrl |= SmallFrameBypass;
752 switch (small_frames) {
754 np->intr_timer_ctrl |= SmallFrame64;
757 np->intr_timer_ctrl |= SmallFrame128;
760 np->intr_timer_ctrl |= SmallFrame256;
763 np->intr_timer_ctrl |= SmallFrame512;
764 if (small_frames > 512)
765 printk("Adjusting small_frames down to 512\n");
770 dev->netdev_ops = &netdev_ops;
771 dev->watchdog_timeo = TX_TIMEOUT;
772 dev->ethtool_ops = ðtool_ops;
774 netif_napi_add_weight(dev, &np->napi, netdev_poll, max_interrupt_work);
779 if (register_netdev(dev))
780 goto err_out_cleardev;
782 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
783 dev->name, netdrv_tbl[chip_idx].name, base,
786 if (drv_flags & CanHaveMII) {
787 int phy, phy_idx = 0;
789 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
790 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
793 while (--boguscnt > 0)
794 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
797 printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
800 mii_status = mdio_read(dev, phy, MII_BMSR);
801 if (mii_status != 0) {
802 np->phys[phy_idx++] = phy;
803 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
804 printk(KERN_INFO "%s: MII PHY found at address %d, status "
805 "%#4.4x advertising %#4.4x.\n",
806 dev->name, phy, mii_status, np->mii_if.advertising);
807 /* there can be only one PHY on-board */
811 np->phy_cnt = phy_idx;
813 np->mii_if.phy_id = np->phys[0];
815 memset(&np->mii_if, 0, sizeof(np->mii_if));
818 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
819 dev->name, enable_hw_cksum ? "enabled" : "disabled");
825 pci_release_regions (pdev);
832 /* Read the MII Management Data I/O (MDIO) interfaces. */
833 static int mdio_read(struct net_device *dev, int phy_id, int location)
835 struct netdev_private *np = netdev_priv(dev);
836 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
837 int result, boguscnt=1000;
838 /* ??? Should we add a busy-wait here? */
840 result = readl(mdio_addr);
841 } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
844 if ((result & 0xffff) == 0xffff)
846 return result & 0xffff;
850 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
852 struct netdev_private *np = netdev_priv(dev);
853 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
854 writel(value, mdio_addr);
855 /* The busy-wait will occur before a read. */
859 static int netdev_open(struct net_device *dev)
861 const struct firmware *fw_rx, *fw_tx;
862 const __be32 *fw_rx_data, *fw_tx_data;
863 struct netdev_private *np = netdev_priv(dev);
864 void __iomem *ioaddr = np->base;
865 const int irq = np->pci_dev->irq;
867 size_t tx_size, rx_size;
868 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
870 /* Do we ever need to reset the chip??? */
872 retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
876 /* Disable the Rx and Tx, and reset the chip. */
877 writel(0, ioaddr + GenCtrl);
878 writel(1, ioaddr + PCIDeviceConfig);
880 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
883 /* Allocate the various queues. */
884 if (!np->queue_mem) {
885 tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
886 rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
887 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
888 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
889 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
890 np->queue_mem = dma_alloc_coherent(&np->pci_dev->dev,
892 &np->queue_mem_dma, GFP_ATOMIC);
893 if (np->queue_mem == NULL) {
898 np->tx_done_q = np->queue_mem;
899 np->tx_done_q_dma = np->queue_mem_dma;
900 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
901 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
902 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
903 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
904 np->rx_ring = (void *) np->tx_ring + tx_ring_size;
905 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
908 /* Start with no carrier, it gets adjusted later */
909 netif_carrier_off(dev);
911 /* Set the size of the Rx buffers. */
912 writel((np->rx_buf_sz << RxBufferLenShift) |
913 (0 << RxMinDescrThreshShift) |
914 RxPrefetchMode | RxVariableQ |
916 RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
918 ioaddr + RxDescQCtrl);
920 /* Set up the Rx DMA controller. */
921 writel(RxChecksumIgnore |
922 (0 << RxEarlyIntThreshShift) |
923 (6 << RxHighPrioThreshShift) |
924 ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
927 /* Set Tx descriptor */
928 writel((2 << TxHiPriFIFOThreshShift) |
929 (0 << TxPadLenShift) |
930 ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
931 TX_DESC_Q_ADDR_SIZE |
932 TX_DESC_SPACING | TX_DESC_TYPE,
933 ioaddr + TxDescCtrl);
935 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
936 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
937 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
938 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
939 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
941 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
942 writel(np->rx_done_q_dma |
944 (0 << RxComplThreshShift),
945 ioaddr + RxCompletionAddr);
948 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
950 /* Fill both the Tx SA register and the Rx perfect filter. */
951 for (i = 0; i < 6; i++)
952 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
953 /* The first entry is special because it bypasses the VLAN filter.
955 writew(0, ioaddr + PerfFilterTable);
956 writew(0, ioaddr + PerfFilterTable + 4);
957 writew(0, ioaddr + PerfFilterTable + 8);
958 for (i = 1; i < 16; i++) {
959 const __be16 *eaddrs = (const __be16 *)dev->dev_addr;
960 void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
961 writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
962 writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
963 writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
966 /* Initialize other registers. */
967 /* Configure the PCI bus bursts and FIFO thresholds. */
968 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
969 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
971 writel(np->tx_mode, ioaddr + TxMode);
972 np->tx_threshold = 4;
973 writel(np->tx_threshold, ioaddr + TxThreshold);
975 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
977 napi_enable(&np->napi);
979 netif_start_queue(dev);
982 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
985 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
988 /* Enable GPIO interrupts on link change */
989 writel(0x0f00ff00, ioaddr + GPIOCtrl);
991 /* Set the interrupt mask */
992 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
993 IntrTxDMADone | IntrStatsMax | IntrLinkChange |
994 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
995 ioaddr + IntrEnable);
996 /* Enable PCI interrupts. */
997 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
998 ioaddr + PCIDeviceConfig);
1001 /* Set VLAN type to 802.1q */
1002 writel(ETH_P_8021Q, ioaddr + VlanType);
1003 #endif /* VLAN_SUPPORT */
1005 retval = reject_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1007 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1011 if (fw_rx->size % 4) {
1012 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1013 fw_rx->size, FIRMWARE_RX);
1017 retval = reject_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1019 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1023 if (fw_tx->size % 4) {
1024 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1025 fw_tx->size, FIRMWARE_TX);
1029 fw_rx_data = (const __be32 *)&fw_rx->data[0];
1030 fw_tx_data = (const __be32 *)&fw_tx->data[0];
1031 rx_size = fw_rx->size / 4;
1032 tx_size = fw_tx->size / 4;
1034 /* Load Rx/Tx firmware into the frame processors */
1035 for (i = 0; i < rx_size; i++)
1036 writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1037 for (i = 0; i < tx_size; i++)
1038 writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1039 if (enable_hw_cksum)
1040 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1041 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1043 /* Enable the Rx and Tx units only. */
1044 writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1047 printk(KERN_DEBUG "%s: Done netdev_open().\n",
1051 release_firmware(fw_tx);
1053 release_firmware(fw_rx);
1061 static void check_duplex(struct net_device *dev)
1063 struct netdev_private *np = netdev_priv(dev);
1065 int silly_count = 1000;
1067 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1068 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1070 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1073 printk("%s: MII reset failed!\n", dev->name);
1077 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1079 if (!np->mii_if.force_media) {
1080 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1082 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1084 reg0 |= BMCR_SPEED100;
1085 if (np->mii_if.full_duplex)
1086 reg0 |= BMCR_FULLDPLX;
1087 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1089 np->speed100 ? "100" : "10",
1090 np->mii_if.full_duplex ? "full" : "half");
1092 mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1096 static void tx_timeout(struct net_device *dev, unsigned int txqueue)
1098 struct netdev_private *np = netdev_priv(dev);
1099 void __iomem *ioaddr = np->base;
1102 printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1103 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1105 /* Perhaps we should reinitialize the hardware here. */
1108 * Stop and restart the interface.
1109 * Cheat and increase the debug level temporarily.
1117 /* Trigger an immediate transmit demand. */
1119 netif_trans_update(dev); /* prevent tx timeout */
1120 dev->stats.tx_errors++;
1121 netif_wake_queue(dev);
1125 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1126 static void init_ring(struct net_device *dev)
1128 struct netdev_private *np = netdev_priv(dev);
1131 np->cur_rx = np->cur_tx = np->reap_tx = 0;
1132 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1134 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1136 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1137 for (i = 0; i < RX_RING_SIZE; i++) {
1138 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1139 np->rx_info[i].skb = skb;
1142 np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev,
1146 if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) {
1148 np->rx_info[i].skb = NULL;
1151 /* Grrr, we cannot offset to correctly align the IP header. */
1152 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1154 writew(i - 1, np->base + RxDescQIdx);
1155 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1157 /* Clear the remainder of the Rx buffer ring. */
1158 for ( ; i < RX_RING_SIZE; i++) {
1159 np->rx_ring[i].rxaddr = 0;
1160 np->rx_info[i].skb = NULL;
1161 np->rx_info[i].mapping = 0;
1163 /* Mark the last entry as wrapping the ring. */
1164 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1166 /* Clear the completion rings. */
1167 for (i = 0; i < DONE_Q_SIZE; i++) {
1168 np->rx_done_q[i].status = 0;
1169 np->tx_done_q[i].status = 0;
1172 for (i = 0; i < TX_RING_SIZE; i++)
1173 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1177 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1179 struct netdev_private *np = netdev_priv(dev);
1181 unsigned int prev_tx;
1186 * be cautious here, wrapping the queue has weird semantics
1187 * and we may not have enough slots even when it seems we do.
1189 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1190 netif_stop_queue(dev);
1191 return NETDEV_TX_BUSY;
1194 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1195 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1196 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1197 return NETDEV_TX_OK;
1199 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1201 prev_tx = np->cur_tx;
1202 entry = np->cur_tx % TX_RING_SIZE;
1203 for (i = 0; i < skb_num_frags(skb); i++) {
1208 np->tx_info[entry].skb = skb;
1210 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1211 status |= TxRingWrap;
1215 status |= TxDescIntr;
1218 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1220 dev->stats.tx_compressed++;
1222 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1224 np->tx_info[entry].mapping =
1225 dma_map_single(&np->pci_dev->dev, skb->data,
1226 skb_first_frag_len(skb),
1229 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1230 status |= skb_frag_size(this_frag);
1231 np->tx_info[entry].mapping =
1232 dma_map_single(&np->pci_dev->dev,
1233 skb_frag_address(this_frag),
1234 skb_frag_size(this_frag),
1237 if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) {
1238 dev->stats.tx_dropped++;
1242 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1243 np->tx_ring[entry].status = cpu_to_le32(status);
1245 printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1246 dev->name, np->cur_tx, np->dirty_tx,
1249 np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1250 np->cur_tx += np->tx_info[entry].used_slots;
1253 np->tx_info[entry].used_slots = 1;
1254 np->cur_tx += np->tx_info[entry].used_slots;
1257 /* scavenge the tx descriptors twice per TX_RING_SIZE */
1258 if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1262 /* Non-x86: explicitly flush descriptor cache lines here. */
1263 /* Ensure all descriptors are written back before the transmit is
1267 /* Update the producer index. */
1268 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1270 /* 4 is arbitrary, but should be ok */
1271 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1272 netif_stop_queue(dev);
1274 return NETDEV_TX_OK;
1277 entry = prev_tx % TX_RING_SIZE;
1278 np->tx_info[entry].skb = NULL;
1280 dma_unmap_single(&np->pci_dev->dev,
1281 np->tx_info[entry].mapping,
1282 skb_first_frag_len(skb), DMA_TO_DEVICE);
1283 np->tx_info[entry].mapping = 0;
1284 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1285 for (j = 1; j < i; j++) {
1286 dma_unmap_single(&np->pci_dev->dev,
1287 np->tx_info[entry].mapping,
1288 skb_frag_size(&skb_shinfo(skb)->frags[j - 1]),
1293 dev_kfree_skb_any(skb);
1294 np->cur_tx = prev_tx;
1295 return NETDEV_TX_OK;
1298 /* The interrupt handler does all of the Rx thread work and cleans up
1299 after the Tx thread. */
1300 static irqreturn_t intr_handler(int irq, void *dev_instance)
1302 struct net_device *dev = dev_instance;
1303 struct netdev_private *np = netdev_priv(dev);
1304 void __iomem *ioaddr = np->base;
1305 int boguscnt = max_interrupt_work;
1311 u32 intr_status = readl(ioaddr + IntrClear);
1314 printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1315 dev->name, intr_status);
1317 if (intr_status == 0 || intr_status == (u32) -1)
1322 if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1325 if (likely(napi_schedule_prep(&np->napi))) {
1326 __napi_schedule(&np->napi);
1327 enable = readl(ioaddr + IntrEnable);
1328 enable &= ~(IntrRxDone | IntrRxEmpty);
1329 writel(enable, ioaddr + IntrEnable);
1330 /* flush PCI posting buffers */
1331 readl(ioaddr + IntrEnable);
1333 /* Paranoia check */
1334 enable = readl(ioaddr + IntrEnable);
1335 if (enable & (IntrRxDone | IntrRxEmpty)) {
1337 "%s: interrupt while in poll!\n",
1339 enable &= ~(IntrRxDone | IntrRxEmpty);
1340 writel(enable, ioaddr + IntrEnable);
1345 /* Scavenge the skbuff list based on the Tx-done queue.
1346 There are redundant checks here that may be cleaned up
1347 after the driver has proven to be reliable. */
1348 consumer = readl(ioaddr + TxConsumerIdx);
1350 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1351 dev->name, consumer);
1353 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1355 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1356 dev->name, np->dirty_tx, np->tx_done, tx_status);
1357 if ((tx_status & 0xe0000000) == 0xa0000000) {
1358 dev->stats.tx_packets++;
1359 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1360 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1361 struct sk_buff *skb = np->tx_info[entry].skb;
1362 np->tx_info[entry].skb = NULL;
1363 dma_unmap_single(&np->pci_dev->dev,
1364 np->tx_info[entry].mapping,
1365 skb_first_frag_len(skb),
1367 np->tx_info[entry].mapping = 0;
1368 np->dirty_tx += np->tx_info[entry].used_slots;
1369 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1372 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1373 dma_unmap_single(&np->pci_dev->dev,
1374 np->tx_info[entry].mapping,
1375 skb_frag_size(&skb_shinfo(skb)->frags[i]),
1382 dev_consume_skb_irq(skb);
1384 np->tx_done_q[np->tx_done].status = 0;
1385 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1387 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1389 if (netif_queue_stopped(dev) &&
1390 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1391 /* The ring is no longer full, wake the queue. */
1392 netif_wake_queue(dev);
1395 /* Stats overflow */
1396 if (intr_status & IntrStatsMax)
1399 /* Media change interrupt. */
1400 if (intr_status & IntrLinkChange)
1401 netdev_media_change(dev);
1403 /* Abnormal error summary/uncommon events handlers. */
1404 if (intr_status & IntrAbnormalSummary)
1405 netdev_error(dev, intr_status);
1407 if (--boguscnt < 0) {
1409 printk(KERN_WARNING "%s: Too much work at interrupt, "
1411 dev->name, intr_status);
1417 printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1418 dev->name, (int) readl(ioaddr + IntrStatus));
1419 return IRQ_RETVAL(handled);
1424 * This routine is logically part of the interrupt/poll handler, but separated
1425 * for clarity and better register allocation.
1427 static int __netdev_rx(struct net_device *dev, int *quota)
1429 struct netdev_private *np = netdev_priv(dev);
1433 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1434 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1435 struct sk_buff *skb;
1438 rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1441 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1442 if (!(desc_status & RxOK)) {
1443 /* There was an error. */
1445 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1446 dev->stats.rx_errors++;
1447 if (desc_status & RxFIFOErr)
1448 dev->stats.rx_fifo_errors++;
1452 if (*quota <= 0) { /* out of rx quota */
1458 pkt_len = desc_status; /* Implicitly Truncate */
1459 entry = (desc_status >> 16) & 0x7ff;
1462 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1463 /* Check if the packet is long enough to accept without copying
1464 to a minimally-sized skbuff. */
1465 if (pkt_len < rx_copybreak &&
1466 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1467 skb_reserve(skb, 2); /* 16 byte align the IP header */
1468 dma_sync_single_for_cpu(&np->pci_dev->dev,
1469 np->rx_info[entry].mapping,
1470 pkt_len, DMA_FROM_DEVICE);
1471 skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1472 dma_sync_single_for_device(&np->pci_dev->dev,
1473 np->rx_info[entry].mapping,
1474 pkt_len, DMA_FROM_DEVICE);
1475 skb_put(skb, pkt_len);
1477 dma_unmap_single(&np->pci_dev->dev,
1478 np->rx_info[entry].mapping,
1479 np->rx_buf_sz, DMA_FROM_DEVICE);
1480 skb = np->rx_info[entry].skb;
1481 skb_put(skb, pkt_len);
1482 np->rx_info[entry].skb = NULL;
1483 np->rx_info[entry].mapping = 0;
1485 #ifndef final_version /* Remove after testing. */
1486 /* You will want this info for the initial debug. */
1488 printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n",
1489 skb->data, skb->data + 6,
1490 skb->data[12], skb->data[13]);
1494 skb->protocol = eth_type_trans(skb, dev);
1497 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1499 if (le16_to_cpu(desc->status2) & 0x0100) {
1500 skb->ip_summed = CHECKSUM_UNNECESSARY;
1501 dev->stats.rx_compressed++;
1504 * This feature doesn't seem to be working, at least
1505 * with the two firmware versions I have. If the GFP sees
1506 * an IP fragment, it either ignores it completely, or reports
1507 * "bad checksum" on it.
1509 * Maybe I missed something -- corrections are welcome.
1510 * Until then, the printk stays. :-) -Ion
1512 else if (le16_to_cpu(desc->status2) & 0x0040) {
1513 skb->ip_summed = CHECKSUM_COMPLETE;
1514 skb->csum = le16_to_cpu(desc->csum);
1515 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1518 if (le16_to_cpu(desc->status2) & 0x0200) {
1519 u16 vlid = le16_to_cpu(desc->vlanid);
1522 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
1525 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
1527 #endif /* VLAN_SUPPORT */
1528 netif_receive_skb(skb);
1529 dev->stats.rx_packets++;
1534 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1537 if (*quota == 0) { /* out of rx quota */
1541 writew(np->rx_done, np->base + CompletionQConsumerIdx);
1544 refill_rx_ring(dev);
1546 printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1547 retcode, np->rx_done, desc_status);
1551 static int netdev_poll(struct napi_struct *napi, int budget)
1553 struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1554 struct net_device *dev = np->dev;
1556 void __iomem *ioaddr = np->base;
1560 writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1562 if (__netdev_rx(dev, "a))
1565 intr_status = readl(ioaddr + IntrStatus);
1566 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1568 napi_complete(napi);
1569 intr_status = readl(ioaddr + IntrEnable);
1570 intr_status |= IntrRxDone | IntrRxEmpty;
1571 writel(intr_status, ioaddr + IntrEnable);
1575 printk(KERN_DEBUG " exiting netdev_poll(): %d.\n",
1578 /* Restart Rx engine if stopped. */
1579 return budget - quota;
1582 static void refill_rx_ring(struct net_device *dev)
1584 struct netdev_private *np = netdev_priv(dev);
1585 struct sk_buff *skb;
1588 /* Refill the Rx ring buffers. */
1589 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1590 entry = np->dirty_rx % RX_RING_SIZE;
1591 if (np->rx_info[entry].skb == NULL) {
1592 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1593 np->rx_info[entry].skb = skb;
1595 break; /* Better luck next round. */
1596 np->rx_info[entry].mapping =
1597 dma_map_single(&np->pci_dev->dev, skb->data,
1598 np->rx_buf_sz, DMA_FROM_DEVICE);
1599 if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) {
1601 np->rx_info[entry].skb = NULL;
1604 np->rx_ring[entry].rxaddr =
1605 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1607 if (entry == RX_RING_SIZE - 1)
1608 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1611 writew(entry, np->base + RxDescQIdx);
1615 static void netdev_media_change(struct net_device *dev)
1617 struct netdev_private *np = netdev_priv(dev);
1618 void __iomem *ioaddr = np->base;
1619 u16 reg0, reg1, reg4, reg5;
1621 u32 new_intr_timer_ctrl;
1623 /* reset status first */
1624 mdio_read(dev, np->phys[0], MII_BMCR);
1625 mdio_read(dev, np->phys[0], MII_BMSR);
1627 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1628 reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1630 if (reg1 & BMSR_LSTATUS) {
1632 if (reg0 & BMCR_ANENABLE) {
1633 /* autonegotiation is enabled */
1634 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1635 reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1636 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1638 np->mii_if.full_duplex = 1;
1639 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1641 np->mii_if.full_duplex = 0;
1642 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1644 np->mii_if.full_duplex = 1;
1647 np->mii_if.full_duplex = 0;
1650 /* autonegotiation is disabled */
1651 if (reg0 & BMCR_SPEED100)
1655 if (reg0 & BMCR_FULLDPLX)
1656 np->mii_if.full_duplex = 1;
1658 np->mii_if.full_duplex = 0;
1660 netif_carrier_on(dev);
1661 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1663 np->speed100 ? "100" : "10",
1664 np->mii_if.full_duplex ? "full" : "half");
1666 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
1667 if (np->mii_if.full_duplex)
1668 new_tx_mode |= FullDuplex;
1669 if (np->tx_mode != new_tx_mode) {
1670 np->tx_mode = new_tx_mode;
1671 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1673 writel(np->tx_mode, ioaddr + TxMode);
1676 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1678 new_intr_timer_ctrl |= Timer10X;
1679 if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1680 np->intr_timer_ctrl = new_intr_timer_ctrl;
1681 writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1684 netif_carrier_off(dev);
1685 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1690 static void netdev_error(struct net_device *dev, int intr_status)
1692 struct netdev_private *np = netdev_priv(dev);
1694 /* Came close to underrunning the Tx FIFO, increase threshold. */
1695 if (intr_status & IntrTxDataLow) {
1696 if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1697 writel(++np->tx_threshold, np->base + TxThreshold);
1698 printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1699 dev->name, np->tx_threshold * 16);
1701 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1703 if (intr_status & IntrRxGFPDead) {
1704 dev->stats.rx_fifo_errors++;
1705 dev->stats.rx_errors++;
1707 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1708 dev->stats.tx_fifo_errors++;
1709 dev->stats.tx_errors++;
1711 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1712 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1713 dev->name, intr_status);
1717 static struct net_device_stats *get_stats(struct net_device *dev)
1719 struct netdev_private *np = netdev_priv(dev);
1720 void __iomem *ioaddr = np->base;
1722 /* This adapter architecture needs no SMP locks. */
1723 dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1724 dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1725 dev->stats.tx_packets = readl(ioaddr + 0x57000);
1726 dev->stats.tx_aborted_errors =
1727 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1728 dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1729 dev->stats.collisions =
1730 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1732 /* The chip only need report frame silently dropped. */
1733 dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1734 writew(0, ioaddr + RxDMAStatus);
1735 dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1736 dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1737 dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1738 dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1744 static u32 set_vlan_mode(struct netdev_private *np)
1748 void __iomem *filter_addr = np->base + HashTable + 8;
1751 for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
1752 if (vlan_count == 32)
1754 writew(vid, filter_addr);
1758 if (vlan_count == 32) {
1759 ret |= PerfectFilterVlan;
1760 while (vlan_count < 32) {
1761 writew(0, filter_addr);
1768 #endif /* VLAN_SUPPORT */
1770 static void set_rx_mode(struct net_device *dev)
1772 struct netdev_private *np = netdev_priv(dev);
1773 void __iomem *ioaddr = np->base;
1774 u32 rx_mode = MinVLANPrio;
1775 struct netdev_hw_addr *ha;
1779 rx_mode |= set_vlan_mode(np);
1780 #endif /* VLAN_SUPPORT */
1782 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1783 rx_mode |= AcceptAll;
1784 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1785 (dev->flags & IFF_ALLMULTI)) {
1786 /* Too many to match, or accept all multicasts. */
1787 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1788 } else if (netdev_mc_count(dev) <= 14) {
1789 /* Use the 16 element perfect filter, skip first two entries. */
1790 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1791 const __be16 *eaddrs;
1792 netdev_for_each_mc_addr(ha, dev) {
1793 eaddrs = (__be16 *) ha->addr;
1794 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1795 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1796 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1798 eaddrs = (const __be16 *)dev->dev_addr;
1799 i = netdev_mc_count(dev) + 2;
1801 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1802 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1803 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1805 rx_mode |= AcceptBroadcast|PerfectFilter;
1807 /* Must use a multicast hash table. */
1808 void __iomem *filter_addr;
1809 const __be16 *eaddrs;
1810 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1812 memset(mc_filter, 0, sizeof(mc_filter));
1813 netdev_for_each_mc_addr(ha, dev) {
1814 /* The chip uses the upper 9 CRC bits
1815 as index into the hash table */
1816 int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1817 __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1819 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1821 /* Clear the perfect filter list, skip first two entries. */
1822 filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1823 eaddrs = (const __be16 *)dev->dev_addr;
1824 for (i = 2; i < 16; i++) {
1825 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1826 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1827 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1829 for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1830 writew(mc_filter[i], filter_addr);
1831 rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1833 writel(rx_mode, ioaddr + RxFilterMode);
1836 static int check_if_running(struct net_device *dev)
1838 if (!netif_running(dev))
1843 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1845 struct netdev_private *np = netdev_priv(dev);
1846 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1847 strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1850 static int get_link_ksettings(struct net_device *dev,
1851 struct ethtool_link_ksettings *cmd)
1853 struct netdev_private *np = netdev_priv(dev);
1854 spin_lock_irq(&np->lock);
1855 mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1856 spin_unlock_irq(&np->lock);
1860 static int set_link_ksettings(struct net_device *dev,
1861 const struct ethtool_link_ksettings *cmd)
1863 struct netdev_private *np = netdev_priv(dev);
1865 spin_lock_irq(&np->lock);
1866 res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1867 spin_unlock_irq(&np->lock);
1872 static int nway_reset(struct net_device *dev)
1874 struct netdev_private *np = netdev_priv(dev);
1875 return mii_nway_restart(&np->mii_if);
1878 static u32 get_link(struct net_device *dev)
1880 struct netdev_private *np = netdev_priv(dev);
1881 return mii_link_ok(&np->mii_if);
1884 static u32 get_msglevel(struct net_device *dev)
1889 static void set_msglevel(struct net_device *dev, u32 val)
1894 static const struct ethtool_ops ethtool_ops = {
1895 .begin = check_if_running,
1896 .get_drvinfo = get_drvinfo,
1897 .nway_reset = nway_reset,
1898 .get_link = get_link,
1899 .get_msglevel = get_msglevel,
1900 .set_msglevel = set_msglevel,
1901 .get_link_ksettings = get_link_ksettings,
1902 .set_link_ksettings = set_link_ksettings,
1905 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1907 struct netdev_private *np = netdev_priv(dev);
1908 struct mii_ioctl_data *data = if_mii(rq);
1911 if (!netif_running(dev))
1914 spin_lock_irq(&np->lock);
1915 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1916 spin_unlock_irq(&np->lock);
1918 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1924 static int netdev_close(struct net_device *dev)
1926 struct netdev_private *np = netdev_priv(dev);
1927 void __iomem *ioaddr = np->base;
1930 netif_stop_queue(dev);
1932 napi_disable(&np->napi);
1935 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1936 dev->name, (int) readl(ioaddr + IntrStatus));
1937 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1938 dev->name, np->cur_tx, np->dirty_tx,
1939 np->cur_rx, np->dirty_rx);
1942 /* Disable interrupts by clearing the interrupt mask. */
1943 writel(0, ioaddr + IntrEnable);
1945 /* Stop the chip's Tx and Rx processes. */
1946 writel(0, ioaddr + GenCtrl);
1947 readl(ioaddr + GenCtrl);
1950 printk(KERN_DEBUG" Tx ring at %#llx:\n",
1951 (long long) np->tx_ring_dma);
1952 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1953 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1954 i, le32_to_cpu(np->tx_ring[i].status),
1955 (long long) dma_to_cpu(np->tx_ring[i].addr),
1956 le32_to_cpu(np->tx_done_q[i].status));
1957 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
1958 (long long) np->rx_ring_dma, np->rx_done_q);
1960 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1961 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1962 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1966 free_irq(np->pci_dev->irq, dev);
1968 /* Free all the skbuffs in the Rx queue. */
1969 for (i = 0; i < RX_RING_SIZE; i++) {
1970 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1971 if (np->rx_info[i].skb != NULL) {
1972 dma_unmap_single(&np->pci_dev->dev,
1973 np->rx_info[i].mapping,
1974 np->rx_buf_sz, DMA_FROM_DEVICE);
1975 dev_kfree_skb(np->rx_info[i].skb);
1977 np->rx_info[i].skb = NULL;
1978 np->rx_info[i].mapping = 0;
1980 for (i = 0; i < TX_RING_SIZE; i++) {
1981 struct sk_buff *skb = np->tx_info[i].skb;
1984 dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping,
1985 skb_first_frag_len(skb), DMA_TO_DEVICE);
1986 np->tx_info[i].mapping = 0;
1988 np->tx_info[i].skb = NULL;
1994 static int __maybe_unused starfire_suspend(struct device *dev_d)
1996 struct net_device *dev = dev_get_drvdata(dev_d);
1998 if (netif_running(dev)) {
1999 netif_device_detach(dev);
2006 static int __maybe_unused starfire_resume(struct device *dev_d)
2008 struct net_device *dev = dev_get_drvdata(dev_d);
2010 if (netif_running(dev)) {
2012 netif_device_attach(dev);
2018 static void starfire_remove_one(struct pci_dev *pdev)
2020 struct net_device *dev = pci_get_drvdata(pdev);
2021 struct netdev_private *np = netdev_priv(dev);
2025 unregister_netdev(dev);
2028 dma_free_coherent(&pdev->dev, np->queue_mem_size,
2029 np->queue_mem, np->queue_mem_dma);
2032 /* XXX: add wakeup code -- requires firmware for MagicPacket */
2033 pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
2034 pci_disable_device(pdev);
2037 pci_release_regions(pdev);
2039 free_netdev(dev); /* Will also free np!! */
2042 static SIMPLE_DEV_PM_OPS(starfire_pm_ops, starfire_suspend, starfire_resume);
2044 static struct pci_driver starfire_driver = {
2046 .probe = starfire_init_one,
2047 .remove = starfire_remove_one,
2048 .driver.pm = &starfire_pm_ops,
2049 .id_table = starfire_pci_tbl,
2053 static int __init starfire_init (void)
2055 /* when a module, this is printed whether or not devices are found in probe */
2057 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2060 BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2062 return pci_register_driver(&starfire_driver);
2066 static void __exit starfire_cleanup (void)
2068 pci_unregister_driver (&starfire_driver);
2072 module_init(starfire_init);
2073 module_exit(starfire_cleanup);