1 // SPDX-License-Identifier: GPL-1.0+
2 /* 82596.c: A generic 82596 ethernet driver for linux. */
5 Written 1994 by Mark Evans.
6 This driver is for the Apricot 82596 bus-master interface
8 Modularised 12/94 Mark Evans
11 Modified to support the 82596 ethernet chips on 680x0 VME boards.
12 by Richard Hirst <richard@sleepie.demon.co.uk>
15 980825: Changed to receive directly in to sk_buffs which are
16 allocated at open() time. Eliminates copy on incoming frames
17 (small ones are still copied). Shared data now held in a
18 non-cached page, so we can run on 68060 in copyback mode.
21 * look at deferring rx frames rather than discarding (as per tulip)
22 * handle tx ring full as per tulip
23 * performance test to tune rx_copybreak
25 Most of my modifications relate to the braindead big-endian
26 implementation by Intel. When the i596 is operating in
27 'big-endian' mode, it thinks a 32 bit value of 0x12345678
28 should be stored as 0x56781234. This is a real pain, when
29 you have linked lists which are shared by the 680x0 and the
33 Written 1993 by Donald Becker.
34 Copyright 1993 United States Government as represented by the Director,
35 National Security Agency.
37 The author may be reached as becker@scyld.com, or C/O
38 Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
42 #include <linux/module.h>
43 #include <linux/kernel.h>
44 #include <linux/string.h>
45 #include <linux/errno.h>
46 #include <linux/ioport.h>
47 #include <linux/interrupt.h>
48 #include <linux/delay.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/skbuff.h>
52 #include <linux/init.h>
53 #include <linux/bitops.h>
54 #include <linux/gfp.h>
55 #include <linux/pgtable.h>
59 #include <asm/cacheflush.h>
61 static char version[] __initdata =
62 "82596.c $Revision: 1.5 $\n";
64 #define DRV_NAME "82596"
69 #define DEB_INIT 0x0001
70 #define DEB_PROBE 0x0002
71 #define DEB_SERIOUS 0x0004
72 #define DEB_ERRORS 0x0008
73 #define DEB_MULTI 0x0010
74 #define DEB_TDR 0x0020
75 #define DEB_OPEN 0x0040
76 #define DEB_RESET 0x0080
77 #define DEB_ADDCMD 0x0100
78 #define DEB_STATUS 0x0200
79 #define DEB_STARTTX 0x0400
80 #define DEB_RXADDR 0x0800
81 #define DEB_TXADDR 0x1000
82 #define DEB_RXFRAME 0x2000
83 #define DEB_INTS 0x4000
84 #define DEB_STRUCT 0x8000
85 #define DEB_ANY 0xffff
88 #define DEB(x,y) if (i596_debug & (x)) y
91 #if IS_ENABLED(CONFIG_MVME16x_NET)
92 #define ENABLE_MVME16x_NET
94 #if IS_ENABLED(CONFIG_BVME6000_NET)
95 #define ENABLE_BVME6000_NET
98 #ifdef ENABLE_MVME16x_NET
99 #include <asm/mvme16xhw.h>
101 #ifdef ENABLE_BVME6000_NET
102 #include <asm/bvme6000hw.h>
106 * Define various macros for Channel Attention, word swapping etc., dependent
107 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
111 #define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
112 #define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
113 #define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16)))
114 #define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
115 #define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
116 #define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
117 #define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
118 #define ISCP_BUSY 0x00010000
120 #error 82596.c: unknown architecture
124 * These were the intel versions, left here for reference. There
125 * are currently no x86 users of this legacy i82596 chip.
128 #define WSWAPrfd(x) ((struct i596_rfd *)((long)x))
129 #define WSWAPrbd(x) ((struct i596_rbd *)((long)x))
130 #define WSWAPiscp(x) ((struct i596_iscp *)((long)x))
131 #define WSWAPscb(x) ((struct i596_scb *)((long)x))
132 #define WSWAPcmd(x) ((struct i596_cmd *)((long)x))
133 #define WSWAPtbd(x) ((struct i596_tbd *)((long)x))
134 #define WSWAPchar(x) ((char *)((long)x))
135 #define ISCP_BUSY 0x0001
139 * The MPU_PORT command allows direct access to the 82596. With PORT access
140 * the following commands are available (p5-18). The 32-bit port command
141 * must be word-swapped with the most significant word written first.
142 * This only applies to VME boards.
144 #define PORT_RESET 0x00 /* reset 82596 */
145 #define PORT_SELFTEST 0x01 /* selftest */
146 #define PORT_ALTSCP 0x02 /* alternate SCB address */
147 #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
149 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
151 MODULE_AUTHOR("Richard Hirst");
152 MODULE_DESCRIPTION("i82596 driver");
153 MODULE_LICENSE("GPL");
155 module_param(i596_debug, int, 0);
156 MODULE_PARM_DESC(i596_debug, "i82596 debug mask");
159 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
160 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
162 static int rx_copybreak = 100;
164 #define PKT_BUF_SZ 1536
165 #define MAX_MC_CNT 64
167 #define I596_TOTAL_SIZE 17
169 #define I596_NULL ((void *)0xffffffff)
171 #define CMD_EOL 0x8000 /* The last command of the list, stop. */
172 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
173 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
175 #define CMD_FLEX 0x0008 /* Enable flexible memory model */
178 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
179 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
182 #define STAT_C 0x8000 /* Set to 0 after execution */
183 #define STAT_B 0x4000 /* Command being executed */
184 #define STAT_OK 0x2000 /* Command executed ok */
185 #define STAT_A 0x1000 /* Command aborted */
187 #define CUC_START 0x0100
188 #define CUC_RESUME 0x0200
189 #define CUC_SUSPEND 0x0300
190 #define CUC_ABORT 0x0400
191 #define RX_START 0x0010
192 #define RX_RESUME 0x0020
193 #define RX_SUSPEND 0x0030
194 #define RX_ABORT 0x0040
196 #define TX_TIMEOUT (HZ/20)
200 unsigned short porthi;
201 unsigned short portlo;
206 #define SIZE_MASK 0x3fff
211 struct i596_tbd *next;
215 /* The command structure has two 'next' pointers; v_next is the address of
216 * the next command as seen by the CPU, b_next is the address of the next
217 * command as seen by the 82596. The b_next pointer, as used by the 82596
218 * always references the status field of the next command, rather than the
219 * v_next field, because the 82596 is unaware of v_next. It may seem more
220 * logical to put v_next at the end of the structure, but we cannot do that
221 * because the 82596 expects other fields to be there, depending on command
226 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
227 unsigned short status;
228 unsigned short command;
229 struct i596_cmd *b_next; /* Address from i596 viewpoint */
234 struct i596_tbd *tbd;
237 struct sk_buff *skb; /* So we can free it after tx */
242 unsigned short status;
249 char mc_addrs[MAX_MC_CNT*6];
259 char i596_config[16];
265 struct i596_rfd *b_next; /* Address from i596 viewpoint */
266 struct i596_rbd *rbd;
267 unsigned short count;
269 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
270 struct i596_rfd *v_prev;
274 unsigned short count;
275 unsigned short zero1;
276 struct i596_rbd *b_next;
277 unsigned char *b_data; /* Address from i596 viewpoint */
279 unsigned short zero2;
281 struct i596_rbd *v_next;
282 struct i596_rbd *b_addr; /* This rbd addr from i596 view */
283 unsigned char *v_data; /* Address from CPUs viewpoint */
286 #define TX_RING_SIZE 64
287 #define RX_RING_SIZE 16
290 unsigned short status;
291 unsigned short command;
292 struct i596_cmd *cmd;
293 struct i596_rfd *rfd;
294 unsigned long crc_err;
295 unsigned long align_err;
296 unsigned long resource_err;
297 unsigned long over_err;
298 unsigned long rcvdt_err;
299 unsigned long short_err;
301 unsigned short t_off;
306 struct i596_scb *scb;
310 unsigned long sysbus;
312 struct i596_iscp *iscp;
315 struct i596_private {
316 volatile struct i596_scp scp;
317 volatile struct i596_iscp iscp;
318 volatile struct i596_scb scb;
319 struct sa_cmd sa_cmd;
320 struct cf_cmd cf_cmd;
321 struct tdr_cmd tdr_cmd;
322 struct mc_cmd mc_cmd;
324 int last_restart __attribute__((aligned(4)));
325 struct i596_rfd *rfd_head;
326 struct i596_rbd *rbd_head;
327 struct i596_cmd *cmd_tail;
328 struct i596_cmd *cmd_head;
330 unsigned long last_cmd;
331 struct i596_rfd rfds[RX_RING_SIZE];
332 struct i596_rbd rbds[RX_RING_SIZE];
333 struct tx_cmd tx_cmds[TX_RING_SIZE];
334 struct i596_tbd tbds[TX_RING_SIZE];
339 static char init_setup[] =
341 0x8E, /* length, prefetch on */
342 0xC8, /* fifo to 8, monitor off */
344 0xc0, /* don't save bad frames */
346 0x80, /* don't save bad frames */
348 0x2E, /* No source address insertion, 8 byte preamble */
349 0x00, /* priority and backoff defaults */
350 0x60, /* interframe spacing */
351 0x00, /* slot time LSB */
352 0xf2, /* slot time and retries */
353 0x00, /* promiscuous mode */
354 0x00, /* collision detect */
355 0x40, /* minimum frame length */
358 0x7f /* *multi IA */ };
360 static int i596_open(struct net_device *dev);
361 static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
362 static irqreturn_t i596_interrupt(int irq, void *dev_id);
363 static int i596_close(struct net_device *dev);
364 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
365 static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
366 static void print_eth(unsigned char *buf, char *str);
367 static void set_multicast_list(struct net_device *dev);
369 static int rx_ring_size = RX_RING_SIZE;
370 static int ticks_limit = 25;
371 static int max_cmd_backlog = TX_RING_SIZE-1;
374 static inline void CA(struct net_device *dev)
376 #ifdef ENABLE_MVME16x_NET
377 if (MACH_IS_MVME16x) {
378 ((struct i596_reg *) dev->base_addr)->ca = 1;
381 #ifdef ENABLE_BVME6000_NET
382 if (MACH_IS_BVME6000) {
385 i = *(volatile u32 *) (dev->base_addr);
391 static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
393 #ifdef ENABLE_MVME16x_NET
394 if (MACH_IS_MVME16x) {
395 struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
396 p->porthi = ((c) | (u32) (x)) & 0xffff;
397 p->portlo = ((c) | (u32) (x)) >> 16;
400 #ifdef ENABLE_BVME6000_NET
401 if (MACH_IS_BVME6000) {
402 u32 v = (u32) (c) | (u32) (x);
403 v = ((u32) (v) << 16) | ((u32) (v) >> 16);
404 *(volatile u32 *) dev->base_addr = v;
406 *(volatile u32 *) dev->base_addr = v;
412 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
414 while (--delcnt && lp->iscp.stat)
417 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
418 dev->name, str, lp->scb.status, lp->scb.command);
426 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
428 while (--delcnt && lp->scb.command)
431 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
432 dev->name, str, lp->scb.status, lp->scb.command);
440 static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str)
442 volatile struct i596_cmd *c = cmd;
444 while (--delcnt && c->command)
447 printk(KERN_ERR "%s: %s.\n", dev->name, str);
455 static void i596_display_data(struct net_device *dev)
457 struct i596_private *lp = dev->ml_priv;
458 struct i596_cmd *cmd;
459 struct i596_rfd *rfd;
460 struct i596_rbd *rbd;
462 printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n",
463 &lp->scp, lp->scp.sysbus, lp->scp.iscp);
464 printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n",
465 &lp->iscp, lp->iscp.stat, lp->iscp.scb);
466 printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x,"
467 " .cmd = %p, .rfd = %p\n",
468 &lp->scb, lp->scb.status, lp->scb.command,
469 lp->scb.cmd, lp->scb.rfd);
470 printk(KERN_ERR " errors: crc %lx, align %lx, resource %lx,"
471 " over %lx, rcvdt %lx, short %lx\n",
472 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
473 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
475 while (cmd != I596_NULL) {
476 printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n",
477 cmd, cmd->status, cmd->command, cmd->b_next);
481 printk(KERN_ERR "rfd_head = %p\n", rfd);
483 printk(KERN_ERR " %p .stat %04x, .cmd %04x, b_next %p, rbd %p,"
485 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
488 } while (rfd != lp->rfd_head);
490 printk(KERN_ERR "rbd_head = %p\n", rbd);
492 printk(KERN_ERR " %p .count %04x, b_next %p, b_data %p, size %04x\n",
493 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
495 } while (rbd != lp->rbd_head);
499 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
500 static irqreturn_t i596_error(int irq, void *dev_id)
502 struct net_device *dev = dev_id;
503 #ifdef ENABLE_MVME16x_NET
504 if (MACH_IS_MVME16x) {
505 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
511 #ifdef ENABLE_BVME6000_NET
512 if (MACH_IS_BVME6000) {
513 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
519 printk(KERN_ERR "%s: Error interrupt\n", dev->name);
520 i596_display_data(dev);
525 static inline void remove_rx_bufs(struct net_device *dev)
527 struct i596_private *lp = dev->ml_priv;
528 struct i596_rbd *rbd;
531 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
532 if (rbd->skb == NULL)
534 dev_kfree_skb(rbd->skb);
539 static inline int init_rx_bufs(struct net_device *dev)
541 struct i596_private *lp = dev->ml_priv;
543 struct i596_rfd *rfd;
544 struct i596_rbd *rbd;
546 /* First build the Receive Buffer Descriptor List */
548 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
549 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
557 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
558 rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
560 rbd->v_data = skb->data;
561 rbd->b_data = WSWAPchar(virt_to_bus(skb->data));
562 rbd->size = PKT_BUF_SZ;
564 cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ);
567 lp->rbd_head = lp->rbds;
568 rbd = lp->rbds + rx_ring_size - 1;
569 rbd->v_next = lp->rbds;
570 rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds));
572 /* Now build the Receive Frame Descriptor List */
574 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
575 rfd->rbd = I596_NULL;
578 rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1));
581 lp->rfd_head = lp->rfds;
582 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
584 rfd->rbd = lp->rbd_head;
585 rfd->v_prev = lp->rfds + rx_ring_size - 1;
586 rfd = lp->rfds + rx_ring_size - 1;
587 rfd->v_next = lp->rfds;
588 rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds));
589 rfd->cmd = CMD_EOL|CMD_FLEX;
595 static void rebuild_rx_bufs(struct net_device *dev)
597 struct i596_private *lp = dev->ml_priv;
600 /* Ensure rx frame/buffer descriptors are tidy */
602 for (i = 0; i < rx_ring_size; i++) {
603 lp->rfds[i].rbd = I596_NULL;
604 lp->rfds[i].cmd = CMD_FLEX;
606 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
607 lp->rfd_head = lp->rfds;
608 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
609 lp->rbd_head = lp->rbds;
610 lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds));
614 static int init_i596_mem(struct net_device *dev)
616 struct i596_private *lp = dev->ml_priv;
619 MPU_PORT(dev, PORT_RESET, NULL);
621 udelay(100); /* Wait 100us - seems to help */
623 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
624 #ifdef ENABLE_MVME16x_NET
625 if (MACH_IS_MVME16x) {
626 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
628 /* Disable all ints for now */
631 /* Following disables snooping. Snooping is not required
632 * as we make appropriate use of non-cached pages for
633 * shared data, and cache_push/cache_clear.
638 #ifdef ENABLE_BVME6000_NET
639 if (MACH_IS_BVME6000) {
640 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
646 /* change the scp address */
648 MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp));
652 lp->last_cmd = jiffies;
654 #ifdef ENABLE_MVME16x_NET
656 lp->scp.sysbus = 0x00000054;
658 #ifdef ENABLE_BVME6000_NET
659 if (MACH_IS_BVME6000)
660 lp->scp.sysbus = 0x0000004c;
663 lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp));
664 lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb));
665 lp->iscp.stat = ISCP_BUSY;
668 lp->cmd_head = lp->scb.cmd = I596_NULL;
670 #ifdef ENABLE_BVME6000_NET
671 if (MACH_IS_BVME6000) {
672 lp->scb.t_on = 7 * 25;
673 lp->scb.t_off = 1 * 25;
677 DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
681 if (wait_istat(dev,lp,1000,"initialization timed out"))
683 DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name));
685 /* Ensure rx frame/buffer descriptors are tidy */
686 rebuild_rx_bufs(dev);
689 #ifdef ENABLE_MVME16x_NET
690 if (MACH_IS_MVME16x) {
691 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
693 /* Enable ints, etc. now */
694 pcc2[0x2a] = 0x55; /* Edge sensitive */
698 #ifdef ENABLE_BVME6000_NET
699 if (MACH_IS_BVME6000) {
700 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
707 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name));
708 memcpy(lp->cf_cmd.i596_config, init_setup, 14);
709 lp->cf_cmd.cmd.command = CmdConfigure;
710 i596_add_cmd(dev, &lp->cf_cmd.cmd);
712 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
713 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
714 lp->sa_cmd.cmd.command = CmdSASetup;
715 i596_add_cmd(dev, &lp->sa_cmd.cmd);
717 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
718 lp->tdr_cmd.cmd.command = CmdTDR;
719 i596_add_cmd(dev, &lp->tdr_cmd.cmd);
721 spin_lock_irqsave (&lp->lock, flags);
723 if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
724 spin_unlock_irqrestore (&lp->lock, flags);
727 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
728 lp->scb.command = RX_START;
731 spin_unlock_irqrestore (&lp->lock, flags);
733 if (wait_cmd(dev,lp,1000,"RX_START not processed"))
735 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name));
739 printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
740 MPU_PORT(dev, PORT_RESET, NULL);
744 static inline int i596_rx(struct net_device *dev)
746 struct i596_private *lp = dev->ml_priv;
747 struct i596_rfd *rfd;
748 struct i596_rbd *rbd;
751 DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n",
752 lp->rfd_head, lp->rbd_head));
754 rfd = lp->rfd_head; /* Ref next frame to check */
756 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
757 if (rfd->rbd == I596_NULL)
759 else if (rfd->rbd == lp->rbd_head->b_addr)
762 printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name);
766 DEB(DEB_RXFRAME, printk(KERN_DEBUG " rfd %p, rfd.rbd %p, rfd.stat %04x\n",
767 rfd, rfd->rbd, rfd->stat));
769 if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) {
771 int pkt_len = rbd->count & 0x3fff;
772 struct sk_buff *skb = rbd->skb;
775 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
778 /* Check if the packet is long enough to just accept
779 * without copying to a properly sized skbuff.
782 if (pkt_len > rx_copybreak) {
783 struct sk_buff *newskb;
785 /* Get fresh skbuff to replace filled one. */
786 newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
787 if (newskb == NULL) {
788 skb = NULL; /* drop pkt */
791 /* Pass up the skb already on the Rx ring. */
792 skb_put(skb, pkt_len);
795 rbd->v_data = newskb->data;
796 rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
798 cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
801 skb = netdev_alloc_skb(dev, pkt_len + 2);
805 /* XXX tulip.c can defer packets here!! */
806 dev->stats.rx_dropped++;
809 /* 16 byte align the data fields */
811 skb_put_data(skb, rbd->v_data,
814 skb->protocol=eth_type_trans(skb,dev);
817 cache_clear(virt_to_phys(rbd->skb->data),
821 dev->stats.rx_packets++;
822 dev->stats.rx_bytes+=pkt_len;
826 DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n",
827 dev->name, rfd->stat));
828 dev->stats.rx_errors++;
829 if ((rfd->stat) & 0x0001)
830 dev->stats.collisions++;
831 if ((rfd->stat) & 0x0080)
832 dev->stats.rx_length_errors++;
833 if ((rfd->stat) & 0x0100)
834 dev->stats.rx_over_errors++;
835 if ((rfd->stat) & 0x0200)
836 dev->stats.rx_fifo_errors++;
837 if ((rfd->stat) & 0x0400)
838 dev->stats.rx_frame_errors++;
839 if ((rfd->stat) & 0x0800)
840 dev->stats.rx_crc_errors++;
841 if ((rfd->stat) & 0x1000)
842 dev->stats.rx_length_errors++;
845 /* Clear the buffer descriptor count and EOF + F flags */
847 if (rbd != I596_NULL && (rbd->count & 0x4000)) {
849 lp->rbd_head = rbd->v_next;
852 /* Tidy the frame descriptor, marking it as end of list */
854 rfd->rbd = I596_NULL;
856 rfd->cmd = CMD_EOL|CMD_FLEX;
859 /* Remove end-of-list from old end descriptor */
861 rfd->v_prev->cmd = CMD_FLEX;
863 /* Update record of next frame descriptor to process */
865 lp->scb.rfd = rfd->b_next;
866 lp->rfd_head = rfd->v_next;
870 DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames));
876 static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
878 struct i596_cmd *ptr;
880 while (lp->cmd_head != I596_NULL) {
882 lp->cmd_head = ptr->v_next;
885 switch ((ptr->command) & 0x7) {
888 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
889 struct sk_buff *skb = tx_cmd->skb;
893 dev->stats.tx_errors++;
894 dev->stats.tx_aborted_errors++;
896 ptr->v_next = ptr->b_next = I596_NULL;
897 tx_cmd->cmd.command = 0; /* Mark as free */
901 ptr->v_next = ptr->b_next = I596_NULL;
905 wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
906 lp->scb.cmd = I596_NULL;
909 static void i596_reset(struct net_device *dev, struct i596_private *lp,
914 DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n"));
916 spin_lock_irqsave (&lp->lock, flags);
918 wait_cmd(dev,lp,100,"i596_reset timed out");
920 netif_stop_queue(dev);
922 lp->scb.command = CUC_ABORT | RX_ABORT;
925 /* wait for shutdown */
926 wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
927 spin_unlock_irqrestore (&lp->lock, flags);
929 i596_cleanup_cmd(dev,lp);
932 netif_start_queue(dev);
936 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
938 struct i596_private *lp = dev->ml_priv;
939 int ioaddr = dev->base_addr;
942 DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n"));
945 cmd->command |= (CMD_EOL | CMD_INTR);
946 cmd->v_next = cmd->b_next = I596_NULL;
948 spin_lock_irqsave (&lp->lock, flags);
950 if (lp->cmd_head != I596_NULL) {
951 lp->cmd_tail->v_next = cmd;
952 lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status));
955 wait_cmd(dev,lp,100,"i596_add_cmd timed out");
956 lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status));
957 lp->scb.command = CUC_START;
963 spin_unlock_irqrestore (&lp->lock, flags);
965 if (lp->cmd_backlog > max_cmd_backlog) {
966 unsigned long tickssofar = jiffies - lp->last_cmd;
968 if (tickssofar < ticks_limit)
971 printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name);
973 i596_reset(dev, lp, ioaddr);
977 static int i596_open(struct net_device *dev)
981 DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq));
983 if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
984 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
987 #ifdef ENABLE_MVME16x_NET
988 if (MACH_IS_MVME16x) {
989 if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) {
995 res = init_rx_bufs(dev);
999 netif_start_queue(dev);
1001 if (init_i596_mem(dev)) {
1009 netif_stop_queue(dev);
1010 remove_rx_bufs(dev);
1012 #ifdef ENABLE_MVME16x_NET
1013 free_irq(0x56, dev);
1016 free_irq(dev->irq, dev);
1021 static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
1023 struct i596_private *lp = dev->ml_priv;
1024 int ioaddr = dev->base_addr;
1026 /* Transmitter timeout, serious problems. */
1027 DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n",
1030 dev->stats.tx_errors++;
1032 /* Try to restart the adaptor */
1033 if (lp->last_restart == dev->stats.tx_packets) {
1034 DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n"));
1035 /* Shutdown and restart */
1036 i596_reset (dev, lp, ioaddr);
1038 /* Issue a channel attention signal */
1039 DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n"));
1040 lp->scb.command = CUC_START | RX_START;
1042 lp->last_restart = dev->stats.tx_packets;
1045 netif_trans_update(dev); /* prevent tx timeout */
1046 netif_wake_queue (dev);
1049 static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1051 struct i596_private *lp = dev->ml_priv;
1052 struct tx_cmd *tx_cmd;
1053 struct i596_tbd *tbd;
1054 short length = skb->len;
1056 DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n",
1057 dev->name, skb->len, skb->data));
1059 if (skb->len < ETH_ZLEN) {
1060 if (skb_padto(skb, ETH_ZLEN))
1061 return NETDEV_TX_OK;
1064 netif_stop_queue(dev);
1066 tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1067 tbd = lp->tbds + lp->next_tx_cmd;
1069 if (tx_cmd->cmd.command) {
1070 printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n",
1072 dev->stats.tx_dropped++;
1076 if (++lp->next_tx_cmd == TX_RING_SIZE)
1077 lp->next_tx_cmd = 0;
1078 tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd));
1079 tbd->next = I596_NULL;
1081 tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1087 tbd->size = EOF | length;
1089 tbd->data = WSWAPchar(virt_to_bus(skb->data));
1092 cache_push(virt_to_phys(skb->data), length);
1094 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1095 i596_add_cmd(dev, &tx_cmd->cmd);
1097 dev->stats.tx_packets++;
1098 dev->stats.tx_bytes += length;
1101 netif_start_queue(dev);
1103 return NETDEV_TX_OK;
1106 static void print_eth(unsigned char *add, char *str)
1108 printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1109 add, add + 6, add, add[12], add[13], str);
1112 static const struct net_device_ops i596_netdev_ops = {
1113 .ndo_open = i596_open,
1114 .ndo_stop = i596_close,
1115 .ndo_start_xmit = i596_start_xmit,
1116 .ndo_set_rx_mode = set_multicast_list,
1117 .ndo_tx_timeout = i596_tx_timeout,
1118 .ndo_set_mac_address = eth_mac_addr,
1119 .ndo_validate_addr = eth_validate_addr,
1122 static struct net_device * __init i82596_probe(void)
1124 struct net_device *dev;
1126 struct i596_private *lp;
1132 return ERR_PTR(-ENODEV);
1135 dev = alloc_etherdev(0);
1137 return ERR_PTR(-ENOMEM);
1139 #ifdef ENABLE_MVME16x_NET
1140 if (MACH_IS_MVME16x) {
1141 if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) {
1142 printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n");
1146 memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */
1147 dev->base_addr = MVME_I596_BASE;
1148 dev->irq = (unsigned) MVME16x_IRQ_I596;
1152 #ifdef ENABLE_BVME6000_NET
1153 if (MACH_IS_BVME6000) {
1154 volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE;
1155 unsigned char msr = rtc[3];
1159 for (i = 0; i < 6; i++)
1160 eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */
1162 dev->base_addr = BVME_I596_BASE;
1163 dev->irq = (unsigned) BVME_IRQ_I596;
1171 dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
1172 if (!dev->mem_start) {
1177 DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1179 for (i = 0; i < 6; i++)
1180 DEB(DEB_PROBE,printk(" %2.2X", eth_addr[i]));
1181 eth_hw_addr_set(dev, eth_addr);
1183 DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1185 DEB(DEB_PROBE,printk(KERN_INFO "%s", version));
1187 /* The 82596-specific entries in the device structure. */
1188 dev->netdev_ops = &i596_netdev_ops;
1189 dev->watchdog_timeo = TX_TIMEOUT;
1191 dev->ml_priv = (void *)(dev->mem_start);
1194 DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), "
1195 "lp->scb at 0x%08lx\n",
1196 dev->name, (unsigned long)lp,
1197 sizeof(struct i596_private), (unsigned long)&lp->scb));
1198 memset((void *) lp, 0, sizeof(struct i596_private));
1201 cache_push(virt_to_phys((void *)(dev->mem_start)), 4096);
1202 cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096);
1203 kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
1205 lp->scb.command = 0;
1206 lp->scb.cmd = I596_NULL;
1207 lp->scb.rfd = I596_NULL;
1208 spin_lock_init(&lp->lock);
1210 err = register_netdev(dev);
1216 /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1217 * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1219 kernel_set_cachemode((void *)(dev->mem_start), 4096,
1220 IOMAP_FULL_CACHING);
1222 free_page ((u32)(dev->mem_start));
1226 return ERR_PTR(err);
1229 static irqreturn_t i596_interrupt(int irq, void *dev_id)
1231 struct net_device *dev = dev_id;
1232 struct i596_private *lp;
1234 unsigned short status, ack_cmd = 0;
1237 #ifdef ENABLE_BVME6000_NET
1238 if (MACH_IS_BVME6000) {
1239 if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) {
1240 i596_error(irq, dev_id);
1246 printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq);
1250 ioaddr = dev->base_addr;
1253 spin_lock (&lp->lock);
1255 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1256 status = lp->scb.status;
1258 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1259 dev->name, irq, status));
1261 ack_cmd = status & 0xf000;
1263 if ((status & 0x8000) || (status & 0x2000)) {
1264 struct i596_cmd *ptr;
1267 if ((status & 0x8000))
1268 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name));
1269 if ((status & 0x2000))
1270 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1272 while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) {
1275 DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n",
1276 lp->cmd_head->status, lp->cmd_head->command));
1277 lp->cmd_head = ptr->v_next;
1280 switch ((ptr->command) & 0x7) {
1283 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1284 struct sk_buff *skb = tx_cmd->skb;
1286 if ((ptr->status) & STAT_OK) {
1287 DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1289 dev->stats.tx_errors++;
1290 if ((ptr->status) & 0x0020)
1291 dev->stats.collisions++;
1292 if (!((ptr->status) & 0x0040))
1293 dev->stats.tx_heartbeat_errors++;
1294 if ((ptr->status) & 0x0400)
1295 dev->stats.tx_carrier_errors++;
1296 if ((ptr->status) & 0x0800)
1297 dev->stats.collisions++;
1298 if ((ptr->status) & 0x1000)
1299 dev->stats.tx_aborted_errors++;
1302 dev_consume_skb_irq(skb);
1304 tx_cmd->cmd.command = 0; /* Mark free */
1309 unsigned short status = ((struct tdr_cmd *)ptr)->status;
1311 if (status & 0x8000) {
1312 DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name));
1314 if (status & 0x4000)
1315 printk(KERN_ERR "%s: Transceiver problem.\n", dev->name);
1316 if (status & 0x2000)
1317 printk(KERN_ERR "%s: Termination problem.\n", dev->name);
1318 if (status & 0x1000)
1319 printk(KERN_ERR "%s: Short circuit.\n", dev->name);
1321 DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff));
1326 case CmdMulticastList:
1327 /* Zap command so set_multicast_list() knows it is free */
1331 ptr->v_next = ptr->b_next = I596_NULL;
1332 lp->last_cmd = jiffies;
1336 while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) {
1337 ptr->command &= 0x1fff;
1341 if ((lp->cmd_head != I596_NULL))
1342 ack_cmd |= CUC_START;
1343 lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status));
1345 if ((status & 0x1000) || (status & 0x4000)) {
1346 if ((status & 0x4000))
1347 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name));
1349 /* Only RX_START if stopped - RGH 07-07-96 */
1350 if (status & 0x1000) {
1351 if (netif_running(dev)) {
1352 DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1353 ack_cmd |= RX_START;
1354 dev->stats.rx_errors++;
1355 dev->stats.rx_fifo_errors++;
1356 rebuild_rx_bufs(dev);
1360 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1361 lp->scb.command = ack_cmd;
1363 #ifdef ENABLE_MVME16x_NET
1364 if (MACH_IS_MVME16x) {
1365 /* Ack the interrupt */
1367 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1372 #ifdef ENABLE_BVME6000_NET
1373 if (MACH_IS_BVME6000) {
1374 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1382 DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1384 spin_unlock (&lp->lock);
1385 return IRQ_RETVAL(handled);
1388 static int i596_close(struct net_device *dev)
1390 struct i596_private *lp = dev->ml_priv;
1391 unsigned long flags;
1393 netif_stop_queue(dev);
1395 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1396 dev->name, lp->scb.status));
1398 spin_lock_irqsave(&lp->lock, flags);
1400 wait_cmd(dev,lp,100,"close1 timed out");
1401 lp->scb.command = CUC_ABORT | RX_ABORT;
1404 wait_cmd(dev,lp,100,"close2 timed out");
1406 spin_unlock_irqrestore(&lp->lock, flags);
1407 DEB(DEB_STRUCT,i596_display_data(dev));
1408 i596_cleanup_cmd(dev,lp);
1410 #ifdef ENABLE_MVME16x_NET
1411 if (MACH_IS_MVME16x) {
1412 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1414 /* Disable all ints */
1417 pcc2[0x2b] = 0x40; /* Set snooping bits now! */
1420 #ifdef ENABLE_BVME6000_NET
1421 if (MACH_IS_BVME6000) {
1422 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1428 #ifdef ENABLE_MVME16x_NET
1429 free_irq(0x56, dev);
1431 free_irq(dev->irq, dev);
1432 remove_rx_bufs(dev);
1438 * Set or clear the multicast filter for this adaptor.
1441 static void set_multicast_list(struct net_device *dev)
1443 struct i596_private *lp = dev->ml_priv;
1444 int config = 0, cnt;
1446 DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1447 dev->name, netdev_mc_count(dev),
1448 dev->flags & IFF_PROMISC ? "ON" : "OFF",
1449 dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1451 if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out"))
1454 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1455 lp->cf_cmd.i596_config[8] |= 0x01;
1458 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1459 lp->cf_cmd.i596_config[8] &= ~0x01;
1462 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1463 lp->cf_cmd.i596_config[11] &= ~0x20;
1466 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1467 lp->cf_cmd.i596_config[11] |= 0x20;
1471 lp->cf_cmd.cmd.command = CmdConfigure;
1472 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1475 cnt = netdev_mc_count(dev);
1476 if (cnt > MAX_MC_CNT)
1479 printk(KERN_ERR "%s: Only %d multicast addresses supported",
1483 if (!netdev_mc_empty(dev)) {
1484 struct netdev_hw_addr *ha;
1488 if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out"))
1491 cmd->cmd.command = CmdMulticastList;
1492 cmd->mc_cnt = cnt * ETH_ALEN;
1494 netdev_for_each_mc_addr(ha, dev) {
1497 memcpy(cp, ha->addr, ETH_ALEN);
1499 DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
1503 i596_add_cmd(dev, &cmd->cmd);
1507 static struct net_device *dev_82596;
1509 static int debug = -1;
1510 module_param(debug, int, 0);
1511 MODULE_PARM_DESC(debug, "i82596 debug mask");
1513 static int __init i82596_init(void)
1517 dev_82596 = i82596_probe();
1518 return PTR_ERR_OR_ZERO(dev_82596);
1520 module_init(i82596_init);
1522 static void __exit i82596_cleanup(void)
1524 unregister_netdev(dev_82596);
1526 /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1527 * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1530 kernel_set_cachemode((void *)(dev_82596->mem_start), 4096,
1531 IOMAP_FULL_CACHING);
1533 free_page ((u32)(dev_82596->mem_start));
1534 free_netdev(dev_82596);
1536 module_exit(i82596_cleanup);