2 * tc35815.c: A TOSHIBA TC35815CF PCI 10/100Mbps ethernet driver for linux.
4 * Based on skelton.c by Donald Becker.
6 * This driver is a replacement of older and less maintained version.
7 * This is a header of the older version:
9 * Copyright 2001 MontaVista Software Inc.
10 * Author: MontaVista Software, Inc.
11 * ahennessy@mvista.com
12 * Copyright (C) 2000-2001 Toshiba Corporation
13 * static const char *version =
14 * "tc35815.c:v0.00 26/07/2000 by Toshiba Corporation\n";
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file "COPYING" in the main directory of this archive
21 * (C) Copyright TOSHIBA CORPORATION 2004-2005
22 * All Rights Reserved.
25 #define DRV_VERSION "1.39"
26 static const char *version = "tc35815.c:v" DRV_VERSION "\n";
27 #define MODNAME "tc35815"
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/types.h>
32 #include <linux/fcntl.h>
33 #include <linux/interrupt.h>
34 #include <linux/ioport.h>
36 #include <linux/if_vlan.h>
37 #include <linux/slab.h>
38 #include <linux/string.h>
39 #include <linux/spinlock.h>
40 #include <linux/errno.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/skbuff.h>
44 #include <linux/delay.h>
45 #include <linux/pci.h>
46 #include <linux/phy.h>
47 #include <linux/workqueue.h>
48 #include <linux/platform_device.h>
49 #include <linux/prefetch.h>
51 #include <asm/byteorder.h>
53 enum tc35815_chiptype {
59 /* indexed by tc35815_chiptype, above */
63 { "TOSHIBA TC35815CF 10/100BaseTX" },
64 { "TOSHIBA TC35815 with Wake on LAN" },
65 { "TOSHIBA TC35815/TX4939" },
68 static const struct pci_device_id tc35815_pci_tbl[] = {
69 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
70 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
71 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
74 MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl);
76 /* see MODULE_PARM_DESC */
77 static struct tc35815_options {
86 __u32 DMA_Ctl; /* 0x00 */
94 __u32 FDA_Lim; /* 0x20 */
101 __u32 MAC_Ctl; /* 0x40 */
109 __u32 CAM_Adr; /* 0x60 */
122 /* DMA_Ctl bit assign ------------------------------------------------------- */
123 #define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */
124 #define DMA_RxAlign_1 0x00400000
125 #define DMA_RxAlign_2 0x00800000
126 #define DMA_RxAlign_3 0x00c00000
127 #define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */
128 #define DMA_IntMask 0x00040000 /* 1:Interrupt mask */
129 #define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */
130 #define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */
131 #define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */
132 #define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */
133 #define DMA_TestMode 0x00002000 /* 1:Test Mode */
134 #define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */
135 #define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */
137 /* RxFragSize bit assign ---------------------------------------------------- */
138 #define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */
139 #define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */
141 /* MAC_Ctl bit assign ------------------------------------------------------- */
142 #define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */
143 #define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */
144 #define MAC_MissRoll 0x00000400 /* 1:Missed Roll */
145 #define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */
146 #define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */
147 #define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/
148 #define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */
149 #define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */
150 #define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */
151 #define MAC_Reset 0x00000004 /* 1:Software Reset */
152 #define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */
153 #define MAC_HaltReq 0x00000001 /* 1:Halt request */
155 /* PROM_Ctl bit assign ------------------------------------------------------ */
156 #define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */
157 #define PROM_Read 0x00004000 /*10:Read operation */
158 #define PROM_Write 0x00002000 /*01:Write operation */
159 #define PROM_Erase 0x00006000 /*11:Erase operation */
160 /*00:Enable or Disable Writting, */
161 /* as specified in PROM_Addr. */
162 #define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */
165 /* CAM_Ctl bit assign ------------------------------------------------------- */
166 #define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */
167 #define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/
169 #define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */
170 #define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */
171 #define CAM_StationAcc 0x00000001 /* 1:unicast accept */
173 /* CAM_Ena bit assign ------------------------------------------------------- */
174 #define CAM_ENTRY_MAX 21 /* CAM Data entry max count */
175 #define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */
176 #define CAM_Ena_Bit(index) (1 << (index))
177 #define CAM_ENTRY_DESTINATION 0
178 #define CAM_ENTRY_SOURCE 1
179 #define CAM_ENTRY_MACCTL 20
181 /* Tx_Ctl bit assign -------------------------------------------------------- */
182 #define Tx_En 0x00000001 /* 1:Transmit enable */
183 #define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */
184 #define Tx_NoPad 0x00000004 /* 1:Suppress Padding */
185 #define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */
186 #define Tx_FBack 0x00000010 /* 1:Fast Back-off */
187 #define Tx_EnUnder 0x00000100 /* 1:Enable Underrun */
188 #define Tx_EnExDefer 0x00000200 /* 1:Enable Excessive Deferral */
189 #define Tx_EnLCarr 0x00000400 /* 1:Enable Lost Carrier */
190 #define Tx_EnExColl 0x00000800 /* 1:Enable Excessive Collision */
191 #define Tx_EnLateColl 0x00001000 /* 1:Enable Late Collision */
192 #define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */
193 #define Tx_EnComp 0x00004000 /* 1:Enable Completion */
195 /* Tx_Stat bit assign ------------------------------------------------------- */
196 #define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */
197 #define Tx_ExColl 0x00000010 /* Excessive Collision */
198 #define Tx_TXDefer 0x00000020 /* Transmit Defered */
199 #define Tx_Paused 0x00000040 /* Transmit Paused */
200 #define Tx_IntTx 0x00000080 /* Interrupt on Tx */
201 #define Tx_Under 0x00000100 /* Underrun */
202 #define Tx_Defer 0x00000200 /* Deferral */
203 #define Tx_NCarr 0x00000400 /* No Carrier */
204 #define Tx_10Stat 0x00000800 /* 10Mbps Status */
205 #define Tx_LateColl 0x00001000 /* Late Collision */
206 #define Tx_TxPar 0x00002000 /* Tx Parity Error */
207 #define Tx_Comp 0x00004000 /* Completion */
208 #define Tx_Halted 0x00008000 /* Tx Halted */
209 #define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */
211 /* Rx_Ctl bit assign -------------------------------------------------------- */
212 #define Rx_EnGood 0x00004000 /* 1:Enable Good */
213 #define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */
214 #define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */
215 #define Rx_EnOver 0x00000400 /* 1:Enable OverFlow */
216 #define Rx_EnCRCErr 0x00000200 /* 1:Enable CRC Error */
217 #define Rx_EnAlign 0x00000100 /* 1:Enable Alignment */
218 #define Rx_IgnoreCRC 0x00000040 /* 1:Ignore CRC Value */
219 #define Rx_StripCRC 0x00000010 /* 1:Strip CRC Value */
220 #define Rx_ShortEn 0x00000008 /* 1:Short Enable */
221 #define Rx_LongEn 0x00000004 /* 1:Long Enable */
222 #define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */
223 #define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */
225 /* Rx_Stat bit assign ------------------------------------------------------- */
226 #define Rx_Halted 0x00008000 /* Rx Halted */
227 #define Rx_Good 0x00004000 /* Rx Good */
228 #define Rx_RxPar 0x00002000 /* Rx Parity Error */
229 #define Rx_TypePkt 0x00001000 /* Rx Type Packet */
230 #define Rx_LongErr 0x00000800 /* Rx Long Error */
231 #define Rx_Over 0x00000400 /* Rx Overflow */
232 #define Rx_CRCErr 0x00000200 /* Rx CRC Error */
233 #define Rx_Align 0x00000100 /* Rx Alignment Error */
234 #define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */
235 #define Rx_IntRx 0x00000040 /* Rx Interrupt */
236 #define Rx_CtlRecd 0x00000020 /* Rx Control Receive */
237 #define Rx_InLenErr 0x00000010 /* Rx In Range Frame Length Error */
239 #define Rx_Stat_Mask 0x0000FFF0 /* Rx All Status Mask */
241 /* Int_En bit assign -------------------------------------------------------- */
242 #define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */
243 #define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Ctl Complete Enable */
244 #define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */
245 #define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */
246 #define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */
247 #define Int_DParErrEn 0x00000040 /* 1:Detected Parity Error Enable */
248 #define Int_SSysErrEn 0x00000020 /* 1:Signalled System Error Enable */
249 #define Int_RMasAbtEn 0x00000010 /* 1:Received Master Abort Enable */
250 #define Int_RTargAbtEn 0x00000008 /* 1:Received Target Abort Enable */
251 #define Int_STargAbtEn 0x00000004 /* 1:Signalled Target Abort Enable */
252 #define Int_BLExEn 0x00000002 /* 1:Buffer List Exhausted Enable */
253 #define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */
254 /* Exhausted Enable */
256 /* Int_Src bit assign ------------------------------------------------------- */
257 #define Int_NRabt 0x00004000 /* 1:Non Recoverable error */
258 #define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */
259 #define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */
260 #define Int_FDAEx 0x00000800 /* 1:FDA Empty & Clear */
261 #define Int_IntNRAbt 0x00000400 /* 1:Non Recoverable Abort */
262 #define Int_IntCmp 0x00000200 /* 1:MAC control packet complete */
263 #define Int_IntExBD 0x00000100 /* 1:Interrupt Extra BD & Clear */
264 #define Int_DmParErr 0x00000080 /* 1:DMA Parity Error & Clear */
265 #define Int_IntEarNot 0x00000040 /* 1:Receive Data write & Clear */
266 #define Int_SWInt 0x00000020 /* 1:Software request & Clear */
267 #define Int_IntBLEx 0x00000010 /* 1:Buffer List Empty & Clear */
268 #define Int_IntFDAEx 0x00000008 /* 1:FDA Empty & Clear */
269 #define Int_IntPCI 0x00000004 /* 1:PCI controller & Clear */
270 #define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */
271 #define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */
273 /* MD_CA bit assign --------------------------------------------------------- */
274 #define MD_CA_PreSup 0x00001000 /* 1:Preamble Suppress */
275 #define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */
276 #define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */
283 /* Frame descripter */
285 volatile __u32 FDNext;
286 volatile __u32 FDSystem;
287 volatile __u32 FDStat;
288 volatile __u32 FDCtl;
291 /* Buffer descripter */
293 volatile __u32 BuffData;
294 volatile __u32 BDCtl;
299 /* Frame Descripter bit assign ---------------------------------------------- */
300 #define FD_FDLength_MASK 0x0000FFFF /* Length MASK */
301 #define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */
302 #define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */
303 #define FD_FrmOpt_BigEndian 0x40000000 /* Tx/Rx */
304 #define FD_FrmOpt_IntTx 0x20000000 /* Tx only */
305 #define FD_FrmOpt_NoCRC 0x10000000 /* Tx only */
306 #define FD_FrmOpt_NoPadding 0x08000000 /* Tx only */
307 #define FD_FrmOpt_Packing 0x04000000 /* Rx only */
308 #define FD_CownsFD 0x80000000 /* FD Controller owner bit */
309 #define FD_Next_EOL 0x00000001 /* FD EOL indicator */
310 #define FD_BDCnt_SHIFT 16
312 /* Buffer Descripter bit assign --------------------------------------------- */
313 #define BD_BuffLength_MASK 0x0000FFFF /* Receive Data Size */
314 #define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */
315 #define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */
316 #define BD_CownsBD 0x80000000 /* BD Controller owner bit */
317 #define BD_RxBDID_SHIFT 16
318 #define BD_RxBDSeqN_SHIFT 24
321 /* Some useful constants. */
323 #define TX_CTL_CMD (Tx_EnTxPar | Tx_EnLateColl | \
324 Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \
325 Tx_En) /* maybe 0x7b01 */
326 /* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */
327 #define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
328 | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */
329 #define INT_EN_CMD (Int_NRAbtEn | \
330 Int_DmParErrEn | Int_DParDEn | Int_DParErrEn | \
331 Int_SSysErrEn | Int_RMasAbtEn | Int_RTargAbtEn | \
333 Int_BLExEn | Int_FDAExEn) /* maybe 0xb7f*/
334 #define DMA_CTL_CMD DMA_BURST_SIZE
335 #define HAVE_DMA_RXALIGN(lp) likely((lp)->chiptype != TC35815CF)
337 /* Tuning parameters */
338 #define DMA_BURST_SIZE 32
339 #define TX_THRESHOLD 1024
340 /* used threshold with packet max byte for low pci transfer ability.*/
341 #define TX_THRESHOLD_MAX 1536
342 /* setting threshold max value when overrun error occurred this count. */
343 #define TX_THRESHOLD_KEEP_LIMIT 10
345 /* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
346 #define FD_PAGE_NUM 4
347 #define RX_BUF_NUM 128 /* < 256 */
348 #define RX_FD_NUM 256 /* >= 32 */
349 #define TX_FD_NUM 128
350 #if RX_CTL_CMD & Rx_LongEn
351 #define RX_BUF_SIZE PAGE_SIZE
352 #elif RX_CTL_CMD & Rx_StripCRC
353 #define RX_BUF_SIZE \
354 L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + NET_IP_ALIGN)
356 #define RX_BUF_SIZE \
357 L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN)
359 #define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */
360 #define NAPI_WEIGHT 16
370 struct BDesc bd[0]; /* variable length */
375 struct BDesc bd[RX_BUF_NUM];
379 #define tc_readl(addr) ioread32(addr)
380 #define tc_writel(d, addr) iowrite32(d, addr)
382 #define TC35815_TX_TIMEOUT msecs_to_jiffies(400)
384 /* Information that need to be kept for each controller. */
385 struct tc35815_local {
386 struct pci_dev *pci_dev;
388 struct net_device *dev;
389 struct napi_struct napi;
399 /* Tx control lock. This protects the transmit buffer ring
400 * state along with the "tx full" state of the driver. This
401 * means all netif_queue flow control actions are protected
402 * by this lock as well.
407 struct mii_bus *mii_bus;
408 struct phy_device *phy_dev;
412 struct work_struct restart_work;
415 * Transmitting: Batch Mode.
417 * Receiving: Non-Packing Mode.
418 * 1 circular FD for Free Buffer List.
419 * RX_BUF_NUM BD in Free Buffer FD.
420 * One Free Buffer BD has ETH_FRAME_LEN data buffer.
422 void *fd_buf; /* for TxFD, RxFD, FrFD */
423 dma_addr_t fd_buf_dma;
424 struct TxFD *tfd_base;
425 unsigned int tfd_start;
426 unsigned int tfd_end;
427 struct RxFD *rfd_base;
428 struct RxFD *rfd_limit;
429 struct RxFD *rfd_cur;
430 struct FrFD *fbl_ptr;
431 unsigned int fbl_count;
435 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
437 enum tc35815_chiptype chiptype;
440 static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt)
442 return lp->fd_buf_dma + ((u8 *)virt - (u8 *)lp->fd_buf);
445 static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
447 return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma));
450 static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
451 struct pci_dev *hwdev,
452 dma_addr_t *dma_handle)
455 skb = netdev_alloc_skb(dev, RX_BUF_SIZE);
458 *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
460 if (pci_dma_mapping_error(hwdev, *dma_handle)) {
461 dev_kfree_skb_any(skb);
464 skb_reserve(skb, 2); /* make IP header 4byte aligned */
468 static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle)
470 pci_unmap_single(hwdev, dma_handle, RX_BUF_SIZE,
472 dev_kfree_skb_any(skb);
475 /* Index to functions, as function prototypes. */
477 static int tc35815_open(struct net_device *dev);
478 static netdev_tx_t tc35815_send_packet(struct sk_buff *skb,
479 struct net_device *dev);
480 static irqreturn_t tc35815_interrupt(int irq, void *dev_id);
481 static int tc35815_rx(struct net_device *dev, int limit);
482 static int tc35815_poll(struct napi_struct *napi, int budget);
483 static void tc35815_txdone(struct net_device *dev);
484 static int tc35815_close(struct net_device *dev);
485 static struct net_device_stats *tc35815_get_stats(struct net_device *dev);
486 static void tc35815_set_multicast_list(struct net_device *dev);
487 static void tc35815_tx_timeout(struct net_device *dev);
488 static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
489 #ifdef CONFIG_NET_POLL_CONTROLLER
490 static void tc35815_poll_controller(struct net_device *dev);
492 static const struct ethtool_ops tc35815_ethtool_ops;
494 /* Example routines you must write ;->. */
495 static void tc35815_chip_reset(struct net_device *dev);
496 static void tc35815_chip_init(struct net_device *dev);
499 static void panic_queues(struct net_device *dev);
502 static void tc35815_restart_work(struct work_struct *work);
504 static int tc_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
506 struct net_device *dev = bus->priv;
507 struct tc35815_regs __iomem *tr =
508 (struct tc35815_regs __iomem *)dev->base_addr;
509 unsigned long timeout = jiffies + HZ;
511 tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA);
512 udelay(12); /* it takes 32 x 400ns at least */
513 while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
514 if (time_after(jiffies, timeout))
518 return tc_readl(&tr->MD_Data) & 0xffff;
521 static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val)
523 struct net_device *dev = bus->priv;
524 struct tc35815_regs __iomem *tr =
525 (struct tc35815_regs __iomem *)dev->base_addr;
526 unsigned long timeout = jiffies + HZ;
528 tc_writel(val, &tr->MD_Data);
529 tc_writel(MD_CA_Busy | MD_CA_Wr | (mii_id << 5) | (regnum & 0x1f),
531 udelay(12); /* it takes 32 x 400ns at least */
532 while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
533 if (time_after(jiffies, timeout))
540 static void tc_handle_link_change(struct net_device *dev)
542 struct tc35815_local *lp = netdev_priv(dev);
543 struct phy_device *phydev = lp->phy_dev;
545 int status_change = 0;
547 spin_lock_irqsave(&lp->lock, flags);
549 (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) {
550 struct tc35815_regs __iomem *tr =
551 (struct tc35815_regs __iomem *)dev->base_addr;
554 reg = tc_readl(&tr->MAC_Ctl);
556 tc_writel(reg, &tr->MAC_Ctl);
557 if (phydev->duplex == DUPLEX_FULL)
561 tc_writel(reg, &tr->MAC_Ctl);
563 tc_writel(reg, &tr->MAC_Ctl);
566 * TX4939 PCFG.SPEEDn bit will be changed on
567 * NETDEV_CHANGE event.
570 * WORKAROUND: enable LostCrS only if half duplex
572 * (TX4939 does not have EnLCarr)
574 if (phydev->duplex == DUPLEX_HALF &&
575 lp->chiptype != TC35815_TX4939)
576 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
579 lp->speed = phydev->speed;
580 lp->duplex = phydev->duplex;
584 if (phydev->link != lp->link) {
586 /* delayed promiscuous enabling */
587 if (dev->flags & IFF_PROMISC)
588 tc35815_set_multicast_list(dev);
593 lp->link = phydev->link;
597 spin_unlock_irqrestore(&lp->lock, flags);
599 if (status_change && netif_msg_link(lp)) {
600 phy_print_status(phydev);
601 pr_debug("%s: MII BMCR %04x BMSR %04x LPA %04x\n",
603 phy_read(phydev, MII_BMCR),
604 phy_read(phydev, MII_BMSR),
605 phy_read(phydev, MII_LPA));
609 static int tc_mii_probe(struct net_device *dev)
611 struct tc35815_local *lp = netdev_priv(dev);
612 struct phy_device *phydev = NULL;
616 /* find the first phy */
617 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
618 if (lp->mii_bus->phy_map[phy_addr]) {
620 printk(KERN_ERR "%s: multiple PHYs found\n",
624 phydev = lp->mii_bus->phy_map[phy_addr];
630 printk(KERN_ERR "%s: no PHY found\n", dev->name);
634 /* attach the mac to the phy */
635 phydev = phy_connect(dev, dev_name(&phydev->dev),
636 &tc_handle_link_change,
637 lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
638 if (IS_ERR(phydev)) {
639 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
640 return PTR_ERR(phydev);
642 printk(KERN_INFO "%s: attached PHY driver [%s] "
643 "(mii_bus:phy_addr=%s, id=%x)\n",
644 dev->name, phydev->drv->name, dev_name(&phydev->dev),
647 /* mask with MAC supported features */
648 phydev->supported &= PHY_BASIC_FEATURES;
650 if (options.speed == 10)
651 dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
652 else if (options.speed == 100)
653 dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
654 if (options.duplex == 1)
655 dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full;
656 else if (options.duplex == 2)
657 dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half;
658 phydev->supported &= ~dropmask;
659 phydev->advertising = phydev->supported;
664 lp->phy_dev = phydev;
669 static int tc_mii_init(struct net_device *dev)
671 struct tc35815_local *lp = netdev_priv(dev);
675 lp->mii_bus = mdiobus_alloc();
676 if (lp->mii_bus == NULL) {
681 lp->mii_bus->name = "tc35815_mii_bus";
682 lp->mii_bus->read = tc_mdio_read;
683 lp->mii_bus->write = tc_mdio_write;
684 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x",
685 (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn);
686 lp->mii_bus->priv = dev;
687 lp->mii_bus->parent = &lp->pci_dev->dev;
688 lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
689 if (!lp->mii_bus->irq) {
691 goto err_out_free_mii_bus;
694 for (i = 0; i < PHY_MAX_ADDR; i++)
695 lp->mii_bus->irq[i] = PHY_POLL;
697 err = mdiobus_register(lp->mii_bus);
699 goto err_out_free_mdio_irq;
700 err = tc_mii_probe(dev);
702 goto err_out_unregister_bus;
705 err_out_unregister_bus:
706 mdiobus_unregister(lp->mii_bus);
707 err_out_free_mdio_irq:
708 kfree(lp->mii_bus->irq);
709 err_out_free_mii_bus:
710 mdiobus_free(lp->mii_bus);
715 #ifdef CONFIG_CPU_TX49XX
717 * Find a platform_device providing a MAC address. The platform code
718 * should provide a "tc35815-mac" device with a MAC address in its
721 static int tc35815_mac_match(struct device *dev, void *data)
723 struct platform_device *plat_dev = to_platform_device(dev);
724 struct pci_dev *pci_dev = data;
725 unsigned int id = pci_dev->irq;
726 return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id;
729 static int tc35815_read_plat_dev_addr(struct net_device *dev)
731 struct tc35815_local *lp = netdev_priv(dev);
732 struct device *pd = bus_find_device(&platform_bus_type, NULL,
733 lp->pci_dev, tc35815_mac_match);
735 if (pd->platform_data)
736 memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN);
738 return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV;
743 static int tc35815_read_plat_dev_addr(struct net_device *dev)
749 static int tc35815_init_dev_addr(struct net_device *dev)
751 struct tc35815_regs __iomem *tr =
752 (struct tc35815_regs __iomem *)dev->base_addr;
755 while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
757 for (i = 0; i < 6; i += 2) {
759 tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl);
760 while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
762 data = tc_readl(&tr->PROM_Data);
763 dev->dev_addr[i] = data & 0xff;
764 dev->dev_addr[i+1] = data >> 8;
766 if (!is_valid_ether_addr(dev->dev_addr))
767 return tc35815_read_plat_dev_addr(dev);
771 static const struct net_device_ops tc35815_netdev_ops = {
772 .ndo_open = tc35815_open,
773 .ndo_stop = tc35815_close,
774 .ndo_start_xmit = tc35815_send_packet,
775 .ndo_get_stats = tc35815_get_stats,
776 .ndo_set_rx_mode = tc35815_set_multicast_list,
777 .ndo_tx_timeout = tc35815_tx_timeout,
778 .ndo_do_ioctl = tc35815_ioctl,
779 .ndo_validate_addr = eth_validate_addr,
780 .ndo_change_mtu = eth_change_mtu,
781 .ndo_set_mac_address = eth_mac_addr,
782 #ifdef CONFIG_NET_POLL_CONTROLLER
783 .ndo_poll_controller = tc35815_poll_controller,
787 static int tc35815_init_one(struct pci_dev *pdev,
788 const struct pci_device_id *ent)
790 void __iomem *ioaddr = NULL;
791 struct net_device *dev;
792 struct tc35815_local *lp;
795 static int printed_version;
796 if (!printed_version++) {
798 dev_printk(KERN_DEBUG, &pdev->dev,
799 "speed:%d duplex:%d\n",
800 options.speed, options.duplex);
804 dev_warn(&pdev->dev, "no IRQ assigned.\n");
808 /* dev zeroed in alloc_etherdev */
809 dev = alloc_etherdev(sizeof(*lp));
813 SET_NETDEV_DEV(dev, &pdev->dev);
814 lp = netdev_priv(dev);
817 /* enable device (incl. PCI PM wakeup), and bus-mastering */
818 rc = pcim_enable_device(pdev);
821 rc = pcim_iomap_regions(pdev, 1 << 1, MODNAME);
824 pci_set_master(pdev);
825 ioaddr = pcim_iomap_table(pdev)[1];
827 /* Initialize the device structure. */
828 dev->netdev_ops = &tc35815_netdev_ops;
829 dev->ethtool_ops = &tc35815_ethtool_ops;
830 dev->watchdog_timeo = TC35815_TX_TIMEOUT;
831 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
833 dev->irq = pdev->irq;
834 dev->base_addr = (unsigned long)ioaddr;
836 INIT_WORK(&lp->restart_work, tc35815_restart_work);
837 spin_lock_init(&lp->lock);
838 spin_lock_init(&lp->rx_lock);
840 lp->chiptype = ent->driver_data;
842 lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK;
843 pci_set_drvdata(pdev, dev);
845 /* Soft reset the chip. */
846 tc35815_chip_reset(dev);
848 /* Retrieve the ethernet address. */
849 if (tc35815_init_dev_addr(dev)) {
850 dev_warn(&pdev->dev, "not valid ether addr\n");
851 eth_hw_addr_random(dev);
854 rc = register_netdev(dev);
858 printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
860 chip_info[ent->driver_data].name,
865 rc = tc_mii_init(dev);
867 goto err_out_unregister;
872 unregister_netdev(dev);
879 static void tc35815_remove_one(struct pci_dev *pdev)
881 struct net_device *dev = pci_get_drvdata(pdev);
882 struct tc35815_local *lp = netdev_priv(dev);
884 phy_disconnect(lp->phy_dev);
885 mdiobus_unregister(lp->mii_bus);
886 kfree(lp->mii_bus->irq);
887 mdiobus_free(lp->mii_bus);
888 unregister_netdev(dev);
893 tc35815_init_queues(struct net_device *dev)
895 struct tc35815_local *lp = netdev_priv(dev);
897 unsigned long fd_addr;
900 BUG_ON(sizeof(struct FDesc) +
901 sizeof(struct BDesc) * RX_BUF_NUM +
902 sizeof(struct FDesc) * RX_FD_NUM +
903 sizeof(struct TxFD) * TX_FD_NUM >
904 PAGE_SIZE * FD_PAGE_NUM);
906 lp->fd_buf = pci_alloc_consistent(lp->pci_dev,
907 PAGE_SIZE * FD_PAGE_NUM,
911 for (i = 0; i < RX_BUF_NUM; i++) {
913 alloc_rxbuf_skb(dev, lp->pci_dev,
914 &lp->rx_skbs[i].skb_dma);
915 if (!lp->rx_skbs[i].skb) {
917 free_rxbuf_skb(lp->pci_dev,
919 lp->rx_skbs[i].skb_dma);
920 lp->rx_skbs[i].skb = NULL;
922 pci_free_consistent(lp->pci_dev,
923 PAGE_SIZE * FD_PAGE_NUM,
930 printk(KERN_DEBUG "%s: FD buf %p DataBuf",
931 dev->name, lp->fd_buf);
934 for (i = 0; i < FD_PAGE_NUM; i++)
935 clear_page((void *)((unsigned long)lp->fd_buf +
938 fd_addr = (unsigned long)lp->fd_buf;
940 /* Free Descriptors (for Receive) */
941 lp->rfd_base = (struct RxFD *)fd_addr;
942 fd_addr += sizeof(struct RxFD) * RX_FD_NUM;
943 for (i = 0; i < RX_FD_NUM; i++)
944 lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD);
945 lp->rfd_cur = lp->rfd_base;
946 lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1);
948 /* Transmit Descriptors */
949 lp->tfd_base = (struct TxFD *)fd_addr;
950 fd_addr += sizeof(struct TxFD) * TX_FD_NUM;
951 for (i = 0; i < TX_FD_NUM; i++) {
952 lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1]));
953 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
954 lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0);
956 lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0]));
960 /* Buffer List (for Receive) */
961 lp->fbl_ptr = (struct FrFD *)fd_addr;
962 lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
963 lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);
965 * move all allocated skbs to head of rx_skbs[] array.
966 * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in
967 * tc35815_rx() had failed.
970 for (i = 0; i < RX_BUF_NUM; i++) {
971 if (lp->rx_skbs[i].skb) {
972 if (i != lp->fbl_count) {
973 lp->rx_skbs[lp->fbl_count].skb =
975 lp->rx_skbs[lp->fbl_count].skb_dma =
976 lp->rx_skbs[i].skb_dma;
981 for (i = 0; i < RX_BUF_NUM; i++) {
982 if (i >= lp->fbl_count) {
983 lp->fbl_ptr->bd[i].BuffData = 0;
984 lp->fbl_ptr->bd[i].BDCtl = 0;
987 lp->fbl_ptr->bd[i].BuffData =
988 cpu_to_le32(lp->rx_skbs[i].skb_dma);
989 /* BDID is index of FrFD.bd[] */
990 lp->fbl_ptr->bd[i].BDCtl =
991 cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) |
995 printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n",
996 dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr);
1001 tc35815_clear_queues(struct net_device *dev)
1003 struct tc35815_local *lp = netdev_priv(dev);
1006 for (i = 0; i < TX_FD_NUM; i++) {
1007 u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
1008 struct sk_buff *skb =
1009 fdsystem != 0xffffffff ?
1010 lp->tx_skbs[fdsystem].skb : NULL;
1012 if (lp->tx_skbs[i].skb != skb) {
1013 printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
1017 BUG_ON(lp->tx_skbs[i].skb != skb);
1020 pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
1021 lp->tx_skbs[i].skb = NULL;
1022 lp->tx_skbs[i].skb_dma = 0;
1023 dev_kfree_skb_any(skb);
1025 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
1028 tc35815_init_queues(dev);
1032 tc35815_free_queues(struct net_device *dev)
1034 struct tc35815_local *lp = netdev_priv(dev);
1038 for (i = 0; i < TX_FD_NUM; i++) {
1039 u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
1040 struct sk_buff *skb =
1041 fdsystem != 0xffffffff ?
1042 lp->tx_skbs[fdsystem].skb : NULL;
1044 if (lp->tx_skbs[i].skb != skb) {
1045 printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
1049 BUG_ON(lp->tx_skbs[i].skb != skb);
1053 pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
1054 lp->tx_skbs[i].skb = NULL;
1055 lp->tx_skbs[i].skb_dma = 0;
1057 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
1061 lp->rfd_base = NULL;
1062 lp->rfd_limit = NULL;
1066 for (i = 0; i < RX_BUF_NUM; i++) {
1067 if (lp->rx_skbs[i].skb) {
1068 free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
1069 lp->rx_skbs[i].skb_dma);
1070 lp->rx_skbs[i].skb = NULL;
1074 pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM,
1075 lp->fd_buf, lp->fd_buf_dma);
1081 dump_txfd(struct TxFD *fd)
1083 printk("TxFD(%p): %08x %08x %08x %08x\n", fd,
1084 le32_to_cpu(fd->fd.FDNext),
1085 le32_to_cpu(fd->fd.FDSystem),
1086 le32_to_cpu(fd->fd.FDStat),
1087 le32_to_cpu(fd->fd.FDCtl));
1089 printk(" %08x %08x",
1090 le32_to_cpu(fd->bd.BuffData),
1091 le32_to_cpu(fd->bd.BDCtl));
1096 dump_rxfd(struct RxFD *fd)
1098 int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
1101 printk("RxFD(%p): %08x %08x %08x %08x\n", fd,
1102 le32_to_cpu(fd->fd.FDNext),
1103 le32_to_cpu(fd->fd.FDSystem),
1104 le32_to_cpu(fd->fd.FDStat),
1105 le32_to_cpu(fd->fd.FDCtl));
1106 if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD)
1109 for (i = 0; i < bd_count; i++)
1110 printk(" %08x %08x",
1111 le32_to_cpu(fd->bd[i].BuffData),
1112 le32_to_cpu(fd->bd[i].BDCtl));
1119 dump_frfd(struct FrFD *fd)
1122 printk("FrFD(%p): %08x %08x %08x %08x\n", fd,
1123 le32_to_cpu(fd->fd.FDNext),
1124 le32_to_cpu(fd->fd.FDSystem),
1125 le32_to_cpu(fd->fd.FDStat),
1126 le32_to_cpu(fd->fd.FDCtl));
1128 for (i = 0; i < RX_BUF_NUM; i++)
1129 printk(" %08x %08x",
1130 le32_to_cpu(fd->bd[i].BuffData),
1131 le32_to_cpu(fd->bd[i].BDCtl));
1136 panic_queues(struct net_device *dev)
1138 struct tc35815_local *lp = netdev_priv(dev);
1141 printk("TxFD base %p, start %u, end %u\n",
1142 lp->tfd_base, lp->tfd_start, lp->tfd_end);
1143 printk("RxFD base %p limit %p cur %p\n",
1144 lp->rfd_base, lp->rfd_limit, lp->rfd_cur);
1145 printk("FrFD %p\n", lp->fbl_ptr);
1146 for (i = 0; i < TX_FD_NUM; i++)
1147 dump_txfd(&lp->tfd_base[i]);
1148 for (i = 0; i < RX_FD_NUM; i++) {
1149 int bd_count = dump_rxfd(&lp->rfd_base[i]);
1150 i += (bd_count + 1) / 2; /* skip BDs */
1152 dump_frfd(lp->fbl_ptr);
1153 panic("%s: Illegal queue state.", dev->name);
1157 static void print_eth(const u8 *add)
1159 printk(KERN_DEBUG "print_eth(%p)\n", add);
1160 printk(KERN_DEBUG " %pM => %pM : %02x%02x\n",
1161 add + 6, add, add[12], add[13]);
1164 static int tc35815_tx_full(struct net_device *dev)
1166 struct tc35815_local *lp = netdev_priv(dev);
1167 return (lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end;
1170 static void tc35815_restart(struct net_device *dev)
1172 struct tc35815_local *lp = netdev_priv(dev);
1176 ret = phy_init_hw(lp->phy_dev);
1178 printk(KERN_ERR "%s: PHY init failed.\n", dev->name);
1181 spin_lock_bh(&lp->rx_lock);
1182 spin_lock_irq(&lp->lock);
1183 tc35815_chip_reset(dev);
1184 tc35815_clear_queues(dev);
1185 tc35815_chip_init(dev);
1186 /* Reconfigure CAM again since tc35815_chip_init() initialize it. */
1187 tc35815_set_multicast_list(dev);
1188 spin_unlock_irq(&lp->lock);
1189 spin_unlock_bh(&lp->rx_lock);
1191 netif_wake_queue(dev);
1194 static void tc35815_restart_work(struct work_struct *work)
1196 struct tc35815_local *lp =
1197 container_of(work, struct tc35815_local, restart_work);
1198 struct net_device *dev = lp->dev;
1200 tc35815_restart(dev);
1203 static void tc35815_schedule_restart(struct net_device *dev)
1205 struct tc35815_local *lp = netdev_priv(dev);
1206 struct tc35815_regs __iomem *tr =
1207 (struct tc35815_regs __iomem *)dev->base_addr;
1208 unsigned long flags;
1210 /* disable interrupts */
1211 spin_lock_irqsave(&lp->lock, flags);
1212 tc_writel(0, &tr->Int_En);
1213 tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl);
1214 schedule_work(&lp->restart_work);
1215 spin_unlock_irqrestore(&lp->lock, flags);
1218 static void tc35815_tx_timeout(struct net_device *dev)
1220 struct tc35815_regs __iomem *tr =
1221 (struct tc35815_regs __iomem *)dev->base_addr;
1223 printk(KERN_WARNING "%s: transmit timed out, status %#x\n",
1224 dev->name, tc_readl(&tr->Tx_Stat));
1226 /* Try to restart the adaptor. */
1227 tc35815_schedule_restart(dev);
1228 dev->stats.tx_errors++;
1232 * Open/initialize the controller. This is called (in the current kernel)
1233 * sometime after booting when the 'ifconfig' program is run.
1235 * This routine should set everything up anew at each open, even
1236 * registers that "should" only need to be set once at boot, so that
1237 * there is non-reboot way to recover if something goes wrong.
1240 tc35815_open(struct net_device *dev)
1242 struct tc35815_local *lp = netdev_priv(dev);
1245 * This is used if the interrupt line can turned off (shared).
1246 * See 3c503.c for an example of selecting the IRQ at config-time.
1248 if (request_irq(dev->irq, tc35815_interrupt, IRQF_SHARED,
1252 tc35815_chip_reset(dev);
1254 if (tc35815_init_queues(dev) != 0) {
1255 free_irq(dev->irq, dev);
1259 napi_enable(&lp->napi);
1261 /* Reset the hardware here. Don't forget to set the station address. */
1262 spin_lock_irq(&lp->lock);
1263 tc35815_chip_init(dev);
1264 spin_unlock_irq(&lp->lock);
1266 netif_carrier_off(dev);
1267 /* schedule a link state check */
1268 phy_start(lp->phy_dev);
1270 /* We are now ready to accept transmit requeusts from
1271 * the queueing layer of the networking.
1273 netif_start_queue(dev);
1278 /* This will only be invoked if your driver is _not_ in XOFF state.
1279 * What this means is that you need not check it, and that this
1280 * invariant will hold if you make sure that the netif_*_queue()
1281 * calls are done at the proper times.
1284 tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
1286 struct tc35815_local *lp = netdev_priv(dev);
1288 unsigned long flags;
1290 /* If some error occurs while trying to transmit this
1291 * packet, you should return '1' from this function.
1292 * In such a case you _may not_ do anything to the
1293 * SKB, it is still owned by the network queueing
1294 * layer when an error is returned. This means you
1295 * may not modify any SKB fields, you may not free
1299 /* This is the most common case for modern hardware.
1300 * The spinlock protects this code from the TX complete
1301 * hardware interrupt handler. Queue flow control is
1302 * thus managed under this lock as well.
1304 spin_lock_irqsave(&lp->lock, flags);
1306 /* failsafe... (handle txdone now if half of FDs are used) */
1307 if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM >
1309 tc35815_txdone(dev);
1311 if (netif_msg_pktdata(lp))
1312 print_eth(skb->data);
1314 if (lp->tx_skbs[lp->tfd_start].skb) {
1315 printk("%s: tx_skbs conflict.\n", dev->name);
1319 BUG_ON(lp->tx_skbs[lp->tfd_start].skb);
1321 lp->tx_skbs[lp->tfd_start].skb = skb;
1322 lp->tx_skbs[lp->tfd_start].skb_dma = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1325 txfd = &lp->tfd_base[lp->tfd_start];
1326 txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma);
1327 txfd->bd.BDCtl = cpu_to_le32(skb->len);
1328 txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start);
1329 txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT));
1331 if (lp->tfd_start == lp->tfd_end) {
1332 struct tc35815_regs __iomem *tr =
1333 (struct tc35815_regs __iomem *)dev->base_addr;
1334 /* Start DMA Transmitter. */
1335 txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
1336 txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
1337 if (netif_msg_tx_queued(lp)) {
1338 printk("%s: starting TxFD.\n", dev->name);
1341 tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
1343 txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL);
1344 if (netif_msg_tx_queued(lp)) {
1345 printk("%s: queueing TxFD.\n", dev->name);
1349 lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
1351 /* If we just used up the very last entry in the
1352 * TX ring on this device, tell the queueing
1353 * layer to send no more.
1355 if (tc35815_tx_full(dev)) {
1356 if (netif_msg_tx_queued(lp))
1357 printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name);
1358 netif_stop_queue(dev);
1361 /* When the TX completion hw interrupt arrives, this
1362 * is when the transmit statistics are updated.
1365 spin_unlock_irqrestore(&lp->lock, flags);
1366 return NETDEV_TX_OK;
1369 #define FATAL_ERROR_INT \
1370 (Int_IntPCI | Int_DmParErr | Int_IntNRAbt)
1371 static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
1374 printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):",
1376 if (status & Int_IntPCI)
1378 if (status & Int_DmParErr)
1379 printk(" DmParErr");
1380 if (status & Int_IntNRAbt)
1381 printk(" IntNRAbt");
1384 panic("%s: Too many fatal errors.", dev->name);
1385 printk(KERN_WARNING "%s: Resetting ...\n", dev->name);
1386 /* Try to restart the adaptor. */
1387 tc35815_schedule_restart(dev);
1390 static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1392 struct tc35815_local *lp = netdev_priv(dev);
1395 /* Fatal errors... */
1396 if (status & FATAL_ERROR_INT) {
1397 tc35815_fatal_error_interrupt(dev, status);
1400 /* recoverable errors */
1401 if (status & Int_IntFDAEx) {
1402 if (netif_msg_rx_err(lp))
1404 "Free Descriptor Area Exhausted (%#x).\n",
1406 dev->stats.rx_dropped++;
1409 if (status & Int_IntBLEx) {
1410 if (netif_msg_rx_err(lp))
1412 "Buffer List Exhausted (%#x).\n",
1414 dev->stats.rx_dropped++;
1417 if (status & Int_IntExBD) {
1418 if (netif_msg_rx_err(lp))
1420 "Excessive Buffer Descriptiors (%#x).\n",
1422 dev->stats.rx_length_errors++;
1426 /* normal notification */
1427 if (status & Int_IntMacRx) {
1428 /* Got a packet(s). */
1429 ret = tc35815_rx(dev, limit);
1430 lp->lstats.rx_ints++;
1432 if (status & Int_IntMacTx) {
1433 /* Transmit complete. */
1434 lp->lstats.tx_ints++;
1435 spin_lock_irq(&lp->lock);
1436 tc35815_txdone(dev);
1437 spin_unlock_irq(&lp->lock);
1445 * The typical workload of the driver:
1446 * Handle the network interface interrupts.
1448 static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1450 struct net_device *dev = dev_id;
1451 struct tc35815_local *lp = netdev_priv(dev);
1452 struct tc35815_regs __iomem *tr =
1453 (struct tc35815_regs __iomem *)dev->base_addr;
1454 u32 dmactl = tc_readl(&tr->DMA_Ctl);
1456 if (!(dmactl & DMA_IntMask)) {
1457 /* disable interrupts */
1458 tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
1459 if (napi_schedule_prep(&lp->napi))
1460 __napi_schedule(&lp->napi);
1462 printk(KERN_ERR "%s: interrupt taken in poll\n",
1466 (void)tc_readl(&tr->Int_Src); /* flush */
1472 #ifdef CONFIG_NET_POLL_CONTROLLER
1473 static void tc35815_poll_controller(struct net_device *dev)
1475 disable_irq(dev->irq);
1476 tc35815_interrupt(dev->irq, dev);
1477 enable_irq(dev->irq);
1481 /* We have a good packet(s), get it/them out of the buffers. */
1483 tc35815_rx(struct net_device *dev, int limit)
1485 struct tc35815_local *lp = netdev_priv(dev);
1490 while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
1491 int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
1492 int pkt_len = fdctl & FD_FDLength_MASK;
1493 int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
1495 struct RxFD *next_rfd;
1497 #if (RX_CTL_CMD & Rx_StripCRC) == 0
1498 pkt_len -= ETH_FCS_LEN;
1501 if (netif_msg_rx_status(lp))
1502 dump_rxfd(lp->rfd_cur);
1503 if (status & Rx_Good) {
1504 struct sk_buff *skb;
1505 unsigned char *data;
1510 BUG_ON(bd_count > 1);
1511 cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
1512 & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
1514 if (cur_bd >= RX_BUF_NUM) {
1515 printk("%s: invalid BDID.\n", dev->name);
1518 BUG_ON(lp->rx_skbs[cur_bd].skb_dma !=
1519 (le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3));
1520 if (!lp->rx_skbs[cur_bd].skb) {
1521 printk("%s: NULL skb.\n", dev->name);
1525 BUG_ON(cur_bd >= RX_BUF_NUM);
1527 skb = lp->rx_skbs[cur_bd].skb;
1528 prefetch(skb->data);
1529 lp->rx_skbs[cur_bd].skb = NULL;
1530 pci_unmap_single(lp->pci_dev,
1531 lp->rx_skbs[cur_bd].skb_dma,
1532 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1533 if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
1534 memmove(skb->data, skb->data - NET_IP_ALIGN,
1536 data = skb_put(skb, pkt_len);
1537 if (netif_msg_pktdata(lp))
1539 skb->protocol = eth_type_trans(skb, dev);
1540 netif_receive_skb(skb);
1542 dev->stats.rx_packets++;
1543 dev->stats.rx_bytes += pkt_len;
1545 dev->stats.rx_errors++;
1546 if (netif_msg_rx_err(lp))
1547 dev_info(&dev->dev, "Rx error (status %x)\n",
1548 status & Rx_Stat_Mask);
1549 /* WORKAROUND: LongErr and CRCErr means Overflow. */
1550 if ((status & Rx_LongErr) && (status & Rx_CRCErr)) {
1551 status &= ~(Rx_LongErr|Rx_CRCErr);
1554 if (status & Rx_LongErr)
1555 dev->stats.rx_length_errors++;
1556 if (status & Rx_Over)
1557 dev->stats.rx_fifo_errors++;
1558 if (status & Rx_CRCErr)
1559 dev->stats.rx_crc_errors++;
1560 if (status & Rx_Align)
1561 dev->stats.rx_frame_errors++;
1565 /* put Free Buffer back to controller */
1566 int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl);
1568 (bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
1570 if (id >= RX_BUF_NUM) {
1571 printk("%s: invalid BDID.\n", dev->name);
1575 BUG_ON(id >= RX_BUF_NUM);
1577 /* free old buffers */
1579 while (lp->fbl_count < RX_BUF_NUM)
1581 unsigned char curid =
1582 (id + 1 + lp->fbl_count) % RX_BUF_NUM;
1583 struct BDesc *bd = &lp->fbl_ptr->bd[curid];
1585 bdctl = le32_to_cpu(bd->BDCtl);
1586 if (bdctl & BD_CownsBD) {
1587 printk("%s: Freeing invalid BD.\n",
1592 /* pass BD to controller */
1593 if (!lp->rx_skbs[curid].skb) {
1594 lp->rx_skbs[curid].skb =
1595 alloc_rxbuf_skb(dev,
1597 &lp->rx_skbs[curid].skb_dma);
1598 if (!lp->rx_skbs[curid].skb)
1599 break; /* try on next reception */
1600 bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
1602 /* Note: BDLength was modified by chip. */
1603 bd->BDCtl = cpu_to_le32(BD_CownsBD |
1604 (curid << BD_RxBDID_SHIFT) |
1610 /* put RxFD back to controller */
1612 next_rfd = fd_bus_to_virt(lp,
1613 le32_to_cpu(lp->rfd_cur->fd.FDNext));
1614 if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) {
1615 printk("%s: RxFD FDNext invalid.\n", dev->name);
1619 for (i = 0; i < (bd_count + 1) / 2 + 1; i++) {
1620 /* pass FD to controller */
1622 lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead);
1624 lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL);
1626 lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD);
1629 if (lp->rfd_cur > lp->rfd_limit)
1630 lp->rfd_cur = lp->rfd_base;
1632 if (lp->rfd_cur != next_rfd)
1633 printk("rfd_cur = %p, next_rfd %p\n",
1634 lp->rfd_cur, next_rfd);
1641 static int tc35815_poll(struct napi_struct *napi, int budget)
1643 struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi);
1644 struct net_device *dev = lp->dev;
1645 struct tc35815_regs __iomem *tr =
1646 (struct tc35815_regs __iomem *)dev->base_addr;
1647 int received = 0, handled;
1653 spin_lock(&lp->rx_lock);
1654 status = tc_readl(&tr->Int_Src);
1656 /* BLEx, FDAEx will be cleared later */
1657 tc_writel(status & ~(Int_BLEx | Int_FDAEx),
1658 &tr->Int_Src); /* write to clear */
1660 handled = tc35815_do_interrupt(dev, status, budget - received);
1661 if (status & (Int_BLEx | Int_FDAEx))
1662 tc_writel(status & (Int_BLEx | Int_FDAEx),
1665 received += handled;
1666 if (received >= budget)
1669 status = tc_readl(&tr->Int_Src);
1671 spin_unlock(&lp->rx_lock);
1673 if (received < budget) {
1674 napi_complete(napi);
1675 /* enable interrupts */
1676 tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
1681 #define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr)
1684 tc35815_check_tx_stat(struct net_device *dev, int status)
1686 struct tc35815_local *lp = netdev_priv(dev);
1687 const char *msg = NULL;
1689 /* count collisions */
1690 if (status & Tx_ExColl)
1691 dev->stats.collisions += 16;
1692 if (status & Tx_TxColl_MASK)
1693 dev->stats.collisions += status & Tx_TxColl_MASK;
1695 /* TX4939 does not have NCarr */
1696 if (lp->chiptype == TC35815_TX4939)
1697 status &= ~Tx_NCarr;
1698 /* WORKAROUND: ignore LostCrS in full duplex operation */
1699 if (!lp->link || lp->duplex == DUPLEX_FULL)
1700 status &= ~Tx_NCarr;
1702 if (!(status & TX_STA_ERR)) {
1704 dev->stats.tx_packets++;
1708 dev->stats.tx_errors++;
1709 if (status & Tx_ExColl) {
1710 dev->stats.tx_aborted_errors++;
1711 msg = "Excessive Collision.";
1713 if (status & Tx_Under) {
1714 dev->stats.tx_fifo_errors++;
1715 msg = "Tx FIFO Underrun.";
1716 if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) {
1717 lp->lstats.tx_underrun++;
1718 if (lp->lstats.tx_underrun >= TX_THRESHOLD_KEEP_LIMIT) {
1719 struct tc35815_regs __iomem *tr =
1720 (struct tc35815_regs __iomem *)dev->base_addr;
1721 tc_writel(TX_THRESHOLD_MAX, &tr->TxThrsh);
1722 msg = "Tx FIFO Underrun.Change Tx threshold to max.";
1726 if (status & Tx_Defer) {
1727 dev->stats.tx_fifo_errors++;
1728 msg = "Excessive Deferral.";
1730 if (status & Tx_NCarr) {
1731 dev->stats.tx_carrier_errors++;
1732 msg = "Lost Carrier Sense.";
1734 if (status & Tx_LateColl) {
1735 dev->stats.tx_aborted_errors++;
1736 msg = "Late Collision.";
1738 if (status & Tx_TxPar) {
1739 dev->stats.tx_fifo_errors++;
1740 msg = "Transmit Parity Error.";
1742 if (status & Tx_SQErr) {
1743 dev->stats.tx_heartbeat_errors++;
1744 msg = "Signal Quality Error.";
1746 if (msg && netif_msg_tx_err(lp))
1747 printk(KERN_WARNING "%s: %s (%#x)\n", dev->name, msg, status);
1750 /* This handles TX complete events posted by the device
1754 tc35815_txdone(struct net_device *dev)
1756 struct tc35815_local *lp = netdev_priv(dev);
1760 txfd = &lp->tfd_base[lp->tfd_end];
1761 while (lp->tfd_start != lp->tfd_end &&
1762 !((fdctl = le32_to_cpu(txfd->fd.FDCtl)) & FD_CownsFD)) {
1763 int status = le32_to_cpu(txfd->fd.FDStat);
1764 struct sk_buff *skb;
1765 unsigned long fdnext = le32_to_cpu(txfd->fd.FDNext);
1766 u32 fdsystem = le32_to_cpu(txfd->fd.FDSystem);
1768 if (netif_msg_tx_done(lp)) {
1769 printk("%s: complete TxFD.\n", dev->name);
1772 tc35815_check_tx_stat(dev, status);
1774 skb = fdsystem != 0xffffffff ?
1775 lp->tx_skbs[fdsystem].skb : NULL;
1777 if (lp->tx_skbs[lp->tfd_end].skb != skb) {
1778 printk("%s: tx_skbs mismatch.\n", dev->name);
1782 BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb);
1785 dev->stats.tx_bytes += skb->len;
1786 pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
1787 lp->tx_skbs[lp->tfd_end].skb = NULL;
1788 lp->tx_skbs[lp->tfd_end].skb_dma = 0;
1789 dev_kfree_skb_any(skb);
1791 txfd->fd.FDSystem = cpu_to_le32(0xffffffff);
1793 lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM;
1794 txfd = &lp->tfd_base[lp->tfd_end];
1796 if ((fdnext & ~FD_Next_EOL) != fd_virt_to_bus(lp, txfd)) {
1797 printk("%s: TxFD FDNext invalid.\n", dev->name);
1801 if (fdnext & FD_Next_EOL) {
1802 /* DMA Transmitter has been stopping... */
1803 if (lp->tfd_end != lp->tfd_start) {
1804 struct tc35815_regs __iomem *tr =
1805 (struct tc35815_regs __iomem *)dev->base_addr;
1806 int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM;
1807 struct TxFD *txhead = &lp->tfd_base[head];
1808 int qlen = (lp->tfd_start + TX_FD_NUM
1809 - lp->tfd_end) % TX_FD_NUM;
1812 if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) {
1813 printk("%s: TxFD FDCtl invalid.\n", dev->name);
1817 /* log max queue length */
1818 if (lp->lstats.max_tx_qlen < qlen)
1819 lp->lstats.max_tx_qlen = qlen;
1822 /* start DMA Transmitter again */
1823 txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
1824 txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
1825 if (netif_msg_tx_queued(lp)) {
1826 printk("%s: start TxFD on queue.\n",
1830 tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
1836 /* If we had stopped the queue due to a "tx full"
1837 * condition, and space has now been made available,
1838 * wake up the queue.
1840 if (netif_queue_stopped(dev) && !tc35815_tx_full(dev))
1841 netif_wake_queue(dev);
1844 /* The inverse routine to tc35815_open(). */
1846 tc35815_close(struct net_device *dev)
1848 struct tc35815_local *lp = netdev_priv(dev);
1850 netif_stop_queue(dev);
1851 napi_disable(&lp->napi);
1853 phy_stop(lp->phy_dev);
1854 cancel_work_sync(&lp->restart_work);
1856 /* Flush the Tx and disable Rx here. */
1857 tc35815_chip_reset(dev);
1858 free_irq(dev->irq, dev);
1860 tc35815_free_queues(dev);
1867 * Get the current statistics.
1868 * This may be called with the card open or closed.
1870 static struct net_device_stats *tc35815_get_stats(struct net_device *dev)
1872 struct tc35815_regs __iomem *tr =
1873 (struct tc35815_regs __iomem *)dev->base_addr;
1874 if (netif_running(dev))
1875 /* Update the statistics from the device registers. */
1876 dev->stats.rx_missed_errors += tc_readl(&tr->Miss_Cnt);
1881 static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr)
1883 struct tc35815_local *lp = netdev_priv(dev);
1884 struct tc35815_regs __iomem *tr =
1885 (struct tc35815_regs __iomem *)dev->base_addr;
1886 int cam_index = index * 6;
1890 saved_addr = tc_readl(&tr->CAM_Adr);
1892 if (netif_msg_hw(lp))
1893 printk(KERN_DEBUG "%s: CAM %d: %pM\n",
1894 dev->name, index, addr);
1896 /* read modify write */
1897 tc_writel(cam_index - 2, &tr->CAM_Adr);
1898 cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000;
1899 cam_data |= addr[0] << 8 | addr[1];
1900 tc_writel(cam_data, &tr->CAM_Data);
1901 /* write whole word */
1902 tc_writel(cam_index + 2, &tr->CAM_Adr);
1903 cam_data = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
1904 tc_writel(cam_data, &tr->CAM_Data);
1906 /* write whole word */
1907 tc_writel(cam_index, &tr->CAM_Adr);
1908 cam_data = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
1909 tc_writel(cam_data, &tr->CAM_Data);
1910 /* read modify write */
1911 tc_writel(cam_index + 4, &tr->CAM_Adr);
1912 cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff;
1913 cam_data |= addr[4] << 24 | (addr[5] << 16);
1914 tc_writel(cam_data, &tr->CAM_Data);
1917 tc_writel(saved_addr, &tr->CAM_Adr);
1922 * Set or clear the multicast filter for this adaptor.
1923 * num_addrs == -1 Promiscuous mode, receive all packets
1924 * num_addrs == 0 Normal mode, clear multicast list
1925 * num_addrs > 0 Multicast mode, receive normal and MC packets,
1926 * and do best-effort filtering.
1929 tc35815_set_multicast_list(struct net_device *dev)
1931 struct tc35815_regs __iomem *tr =
1932 (struct tc35815_regs __iomem *)dev->base_addr;
1934 if (dev->flags & IFF_PROMISC) {
1935 /* With some (all?) 100MHalf HUB, controller will hang
1936 * if we enabled promiscuous mode before linkup... */
1937 struct tc35815_local *lp = netdev_priv(dev);
1941 /* Enable promiscuous mode */
1942 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
1943 } else if ((dev->flags & IFF_ALLMULTI) ||
1944 netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) {
1945 /* CAM 0, 1, 20 are reserved. */
1946 /* Disable promiscuous mode, use normal mode. */
1947 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
1948 } else if (!netdev_mc_empty(dev)) {
1949 struct netdev_hw_addr *ha;
1951 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
1953 tc_writel(0, &tr->CAM_Ctl);
1954 /* Walk the address list, and load the filter */
1956 netdev_for_each_mc_addr(ha, dev) {
1957 /* entry 0,1 is reserved. */
1958 tc35815_set_cam_entry(dev, i + 2, ha->addr);
1959 ena_bits |= CAM_Ena_Bit(i + 2);
1962 tc_writel(ena_bits, &tr->CAM_Ena);
1963 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
1965 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
1966 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
1970 static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1972 struct tc35815_local *lp = netdev_priv(dev);
1974 strlcpy(info->driver, MODNAME, sizeof(info->driver));
1975 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1976 strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
1979 static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1981 struct tc35815_local *lp = netdev_priv(dev);
1985 return phy_ethtool_gset(lp->phy_dev, cmd);
1988 static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1990 struct tc35815_local *lp = netdev_priv(dev);
1994 return phy_ethtool_sset(lp->phy_dev, cmd);
1997 static u32 tc35815_get_msglevel(struct net_device *dev)
1999 struct tc35815_local *lp = netdev_priv(dev);
2000 return lp->msg_enable;
2003 static void tc35815_set_msglevel(struct net_device *dev, u32 datum)
2005 struct tc35815_local *lp = netdev_priv(dev);
2006 lp->msg_enable = datum;
2009 static int tc35815_get_sset_count(struct net_device *dev, int sset)
2011 struct tc35815_local *lp = netdev_priv(dev);
2015 return sizeof(lp->lstats) / sizeof(int);
2021 static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
2023 struct tc35815_local *lp = netdev_priv(dev);
2024 data[0] = lp->lstats.max_tx_qlen;
2025 data[1] = lp->lstats.tx_ints;
2026 data[2] = lp->lstats.rx_ints;
2027 data[3] = lp->lstats.tx_underrun;
2031 const char str[ETH_GSTRING_LEN];
2032 } ethtool_stats_keys[] = {
2039 static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2041 memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
2044 static const struct ethtool_ops tc35815_ethtool_ops = {
2045 .get_drvinfo = tc35815_get_drvinfo,
2046 .get_settings = tc35815_get_settings,
2047 .set_settings = tc35815_set_settings,
2048 .get_link = ethtool_op_get_link,
2049 .get_msglevel = tc35815_get_msglevel,
2050 .set_msglevel = tc35815_set_msglevel,
2051 .get_strings = tc35815_get_strings,
2052 .get_sset_count = tc35815_get_sset_count,
2053 .get_ethtool_stats = tc35815_get_ethtool_stats,
2056 static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2058 struct tc35815_local *lp = netdev_priv(dev);
2060 if (!netif_running(dev))
2064 return phy_mii_ioctl(lp->phy_dev, rq, cmd);
2067 static void tc35815_chip_reset(struct net_device *dev)
2069 struct tc35815_regs __iomem *tr =
2070 (struct tc35815_regs __iomem *)dev->base_addr;
2072 /* reset the controller */
2073 tc_writel(MAC_Reset, &tr->MAC_Ctl);
2074 udelay(4); /* 3200ns */
2076 while (tc_readl(&tr->MAC_Ctl) & MAC_Reset) {
2078 printk(KERN_ERR "%s: MAC reset failed.\n", dev->name);
2083 tc_writel(0, &tr->MAC_Ctl);
2085 /* initialize registers to default value */
2086 tc_writel(0, &tr->DMA_Ctl);
2087 tc_writel(0, &tr->TxThrsh);
2088 tc_writel(0, &tr->TxPollCtr);
2089 tc_writel(0, &tr->RxFragSize);
2090 tc_writel(0, &tr->Int_En);
2091 tc_writel(0, &tr->FDA_Bas);
2092 tc_writel(0, &tr->FDA_Lim);
2093 tc_writel(0xffffffff, &tr->Int_Src); /* Write 1 to clear */
2094 tc_writel(0, &tr->CAM_Ctl);
2095 tc_writel(0, &tr->Tx_Ctl);
2096 tc_writel(0, &tr->Rx_Ctl);
2097 tc_writel(0, &tr->CAM_Ena);
2098 (void)tc_readl(&tr->Miss_Cnt); /* Read to clear */
2100 /* initialize internal SRAM */
2101 tc_writel(DMA_TestMode, &tr->DMA_Ctl);
2102 for (i = 0; i < 0x1000; i += 4) {
2103 tc_writel(i, &tr->CAM_Adr);
2104 tc_writel(0, &tr->CAM_Data);
2106 tc_writel(0, &tr->DMA_Ctl);
2109 static void tc35815_chip_init(struct net_device *dev)
2111 struct tc35815_local *lp = netdev_priv(dev);
2112 struct tc35815_regs __iomem *tr =
2113 (struct tc35815_regs __iomem *)dev->base_addr;
2114 unsigned long txctl = TX_CTL_CMD;
2116 /* load station address to CAM */
2117 tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr);
2119 /* Enable CAM (broadcast and unicast) */
2120 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
2121 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
2123 /* Use DMA_RxAlign_2 to make IP header 4-byte aligned. */
2124 if (HAVE_DMA_RXALIGN(lp))
2125 tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl);
2127 tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
2128 tc_writel(0, &tr->TxPollCtr); /* Batch mode */
2129 tc_writel(TX_THRESHOLD, &tr->TxThrsh);
2130 tc_writel(INT_EN_CMD, &tr->Int_En);
2133 tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas);
2134 tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base,
2137 * Activation method:
2138 * First, enable the MAC Transmitter and the DMA Receive circuits.
2139 * Then enable the DMA Transmitter and the MAC Receive circuits.
2141 tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr); /* start DMA receiver */
2142 tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */
2144 /* start MAC transmitter */
2145 /* TX4939 does not have EnLCarr */
2146 if (lp->chiptype == TC35815_TX4939)
2147 txctl &= ~Tx_EnLCarr;
2148 /* WORKAROUND: ignore LostCrS in full duplex operation */
2149 if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
2150 txctl &= ~Tx_EnLCarr;
2151 tc_writel(txctl, &tr->Tx_Ctl);
2155 static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
2157 struct net_device *dev = pci_get_drvdata(pdev);
2158 struct tc35815_local *lp = netdev_priv(dev);
2159 unsigned long flags;
2161 pci_save_state(pdev);
2162 if (!netif_running(dev))
2164 netif_device_detach(dev);
2166 phy_stop(lp->phy_dev);
2167 spin_lock_irqsave(&lp->lock, flags);
2168 tc35815_chip_reset(dev);
2169 spin_unlock_irqrestore(&lp->lock, flags);
2170 pci_set_power_state(pdev, PCI_D3hot);
2174 static int tc35815_resume(struct pci_dev *pdev)
2176 struct net_device *dev = pci_get_drvdata(pdev);
2177 struct tc35815_local *lp = netdev_priv(dev);
2179 pci_restore_state(pdev);
2180 if (!netif_running(dev))
2182 pci_set_power_state(pdev, PCI_D0);
2183 tc35815_restart(dev);
2184 netif_carrier_off(dev);
2186 phy_start(lp->phy_dev);
2187 netif_device_attach(dev);
2190 #endif /* CONFIG_PM */
2192 static struct pci_driver tc35815_pci_driver = {
2194 .id_table = tc35815_pci_tbl,
2195 .probe = tc35815_init_one,
2196 .remove = tc35815_remove_one,
2198 .suspend = tc35815_suspend,
2199 .resume = tc35815_resume,
2203 module_param_named(speed, options.speed, int, 0);
2204 MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps");
2205 module_param_named(duplex, options.duplex, int, 0);
2206 MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full");
2208 module_pci_driver(tc35815_pci_driver);
2209 MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver");
2210 MODULE_LICENSE("GPL");