2 * drivers/net/ethernet/ibm/emac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
42 #include <linux/of_address.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_net.h>
45 #include <linux/slab.h>
47 #include <asm/processor.h>
50 #include <asm/uaccess.h>
52 #include <asm/dcr-regs.h>
57 * Lack of dma_unmap_???? calls is intentional.
59 * API-correct usage requires additional support state information to be
60 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
61 * EMAC design (e.g. TX buffer passed from network stack can be split into
62 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
63 * maintaining such information will add additional overhead.
64 * Current DMA API implementation for 4xx processors only ensures cache coherency
65 * and dma_unmap_???? routines are empty and are likely to stay this way.
66 * I decided to omit dma_unmap_??? calls because I don't want to add additional
67 * complexity just for the sake of following some abstract API, when it doesn't
68 * add any real benefit to the driver. I understand that this decision maybe
69 * controversial, but I really tried to make code API-correct and efficient
70 * at the same time and didn't come up with code I liked :(. --ebs
73 #define DRV_NAME "emac"
74 #define DRV_VERSION "3.54"
75 #define DRV_DESC "PPC 4xx OCP EMAC driver"
77 MODULE_DESCRIPTION(DRV_DESC);
79 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
80 MODULE_LICENSE("GPL");
82 /* minimum number of free TX descriptors required to wake up TX process */
83 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
85 /* If packet size is less than this number, we allocate small skb and copy packet
86 * contents into it instead of just sending original big skb up
88 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
90 /* Since multiple EMACs share MDIO lines in various ways, we need
91 * to avoid re-using the same PHY ID in cases where the arch didn't
92 * setup precise phy_map entries
94 * XXX This is something that needs to be reworked as we can have multiple
95 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
96 * probably require in that case to have explicit PHY IDs in the device-tree
98 static u32 busy_phy_map;
99 static DEFINE_MUTEX(emac_phy_map_lock);
101 /* This is the wait queue used to wait on any event related to probe, that
102 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
104 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
106 /* Having stable interface names is a doomed idea. However, it would be nice
107 * if we didn't have completely random interface names at boot too :-) It's
108 * just a matter of making everybody's life easier. Since we are doing
109 * threaded probing, it's a bit harder though. The base idea here is that
110 * we make up a list of all emacs in the device-tree before we register the
111 * driver. Every emac will then wait for the previous one in the list to
112 * initialize before itself. We should also keep that list ordered by
114 * That list is only 4 entries long, meaning that additional EMACs don't
115 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
118 #define EMAC_BOOT_LIST_SIZE 4
119 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
121 /* How long should I wait for dependent devices ? */
122 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
124 /* I don't want to litter system log with timeout errors
125 * when we have brain-damaged PHY.
127 static inline void emac_report_timeout_error(struct emac_instance *dev,
130 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
131 EMAC_FTR_460EX_PHY_CLK_FIX |
132 EMAC_FTR_440EP_PHY_CLK_FIX))
133 DBG(dev, "%s" NL, error);
134 else if (net_ratelimit())
135 printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
139 /* EMAC PHY clock workaround:
140 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141 * which allows controlling each EMAC clock
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
145 #ifdef CONFIG_PPC_DCR_NATIVE
146 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147 dcri_clrset(SDR0, SDR0_MFR,
148 0, SDR0_MFR_ECS >> dev->cell_index);
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
154 #ifdef CONFIG_PPC_DCR_NATIVE
155 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156 dcri_clrset(SDR0, SDR0_MFR,
157 SDR0_MFR_ECS >> dev->cell_index, 0);
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON HZ
163 #define PHY_POLL_LINK_OFF (HZ / 5)
165 /* Graceful stop timeouts in us.
166 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
168 #define STOP_TIMEOUT_10 1230
169 #define STOP_TIMEOUT_100 124
170 #define STOP_TIMEOUT_1000 13
171 #define STOP_TIMEOUT_1000_JUMBO 73
173 static unsigned char default_mcast_addr[] = {
174 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190 "tx_bd_excessive_collisions", "tx_bd_late_collision",
191 "tx_bd_multple_collisions", "tx_bd_single_collision",
192 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
200 static inline int emac_phy_supports_gige(int phy_mode)
202 return phy_mode == PHY_MODE_GMII ||
203 phy_mode == PHY_MODE_RGMII ||
204 phy_mode == PHY_MODE_SGMII ||
205 phy_mode == PHY_MODE_TBI ||
206 phy_mode == PHY_MODE_RTBI;
209 static inline int emac_phy_gpcs(int phy_mode)
211 return phy_mode == PHY_MODE_SGMII ||
212 phy_mode == PHY_MODE_TBI ||
213 phy_mode == PHY_MODE_RTBI;
216 static inline void emac_tx_enable(struct emac_instance *dev)
218 struct emac_regs __iomem *p = dev->emacp;
221 DBG(dev, "tx_enable" NL);
223 r = in_be32(&p->mr0);
224 if (!(r & EMAC_MR0_TXE))
225 out_be32(&p->mr0, r | EMAC_MR0_TXE);
228 static void emac_tx_disable(struct emac_instance *dev)
230 struct emac_regs __iomem *p = dev->emacp;
233 DBG(dev, "tx_disable" NL);
235 r = in_be32(&p->mr0);
236 if (r & EMAC_MR0_TXE) {
237 int n = dev->stop_timeout;
238 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
239 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
244 emac_report_timeout_error(dev, "TX disable timeout");
248 static void emac_rx_enable(struct emac_instance *dev)
250 struct emac_regs __iomem *p = dev->emacp;
253 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
256 DBG(dev, "rx_enable" NL);
258 r = in_be32(&p->mr0);
259 if (!(r & EMAC_MR0_RXE)) {
260 if (unlikely(!(r & EMAC_MR0_RXI))) {
261 /* Wait if previous async disable is still in progress */
262 int n = dev->stop_timeout;
263 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
268 emac_report_timeout_error(dev,
269 "RX disable timeout");
271 out_be32(&p->mr0, r | EMAC_MR0_RXE);
277 static void emac_rx_disable(struct emac_instance *dev)
279 struct emac_regs __iomem *p = dev->emacp;
282 DBG(dev, "rx_disable" NL);
284 r = in_be32(&p->mr0);
285 if (r & EMAC_MR0_RXE) {
286 int n = dev->stop_timeout;
287 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
288 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
293 emac_report_timeout_error(dev, "RX disable timeout");
297 static inline void emac_netif_stop(struct emac_instance *dev)
299 netif_tx_lock_bh(dev->ndev);
300 netif_addr_lock(dev->ndev);
302 netif_addr_unlock(dev->ndev);
303 netif_tx_unlock_bh(dev->ndev);
304 netif_trans_update(dev->ndev); /* prevent tx timeout */
305 mal_poll_disable(dev->mal, &dev->commac);
306 netif_tx_disable(dev->ndev);
309 static inline void emac_netif_start(struct emac_instance *dev)
311 netif_tx_lock_bh(dev->ndev);
312 netif_addr_lock(dev->ndev);
314 if (dev->mcast_pending && netif_running(dev->ndev))
315 __emac_set_multicast_list(dev);
316 netif_addr_unlock(dev->ndev);
317 netif_tx_unlock_bh(dev->ndev);
319 netif_wake_queue(dev->ndev);
321 /* NOTE: unconditional netif_wake_queue is only appropriate
322 * so long as all callers are assured to have free tx slots
323 * (taken from tg3... though the case where that is wrong is
324 * not terribly harmful)
326 mal_poll_enable(dev->mal, &dev->commac);
329 static inline void emac_rx_disable_async(struct emac_instance *dev)
331 struct emac_regs __iomem *p = dev->emacp;
334 DBG(dev, "rx_disable_async" NL);
336 r = in_be32(&p->mr0);
337 if (r & EMAC_MR0_RXE)
338 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
341 static int emac_reset(struct emac_instance *dev)
343 struct emac_regs __iomem *p = dev->emacp;
345 bool __maybe_unused try_internal_clock = false;
347 DBG(dev, "reset" NL);
349 if (!dev->reset_failed) {
350 /* 40x erratum suggests stopping RX channel before reset,
353 emac_rx_disable(dev);
354 emac_tx_disable(dev);
357 #ifdef CONFIG_PPC_DCR_NATIVE
360 * PPC460EX/GT Embedded Processor Advanced User's Manual
361 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
362 * Note: The PHY must provide a TX Clk in order to perform a soft reset
363 * of the EMAC. If none is present, select the internal clock
364 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
365 * After a soft reset, select the external clock.
367 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
368 * ethernet cable is not attached. This causes the reset to timeout
369 * and the PHY detection code in emac_init_phy() is unable to
370 * communicate and detect the AR8035-A PHY. As a result, the emac
371 * driver bails out early and the user has no ethernet.
372 * In order to stay compatible with existing configurations, the
373 * driver will temporarily switch to the internal clock, after
374 * the first reset fails.
376 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
377 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
378 dev->phy_map == 0xffffffff)) {
379 /* No PHY: select internal loop clock before reset */
380 dcri_clrset(SDR0, SDR0_ETH_CFG,
381 0, SDR0_ETH_CFG_ECS << dev->cell_index);
383 /* PHY present: select external clock before reset */
384 dcri_clrset(SDR0, SDR0_ETH_CFG,
385 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
390 out_be32(&p->mr0, EMAC_MR0_SRST);
391 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
394 #ifdef CONFIG_PPC_DCR_NATIVE
395 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
396 if (!n && !try_internal_clock) {
397 /* first attempt has timed out. */
399 try_internal_clock = true;
403 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
404 dev->phy_map == 0xffffffff)) {
405 /* No PHY: restore external clock source after reset */
406 dcri_clrset(SDR0, SDR0_ETH_CFG,
407 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
413 dev->reset_failed = 0;
416 emac_report_timeout_error(dev, "reset timeout");
417 dev->reset_failed = 1;
422 static void emac_hash_mc(struct emac_instance *dev)
424 const int regs = EMAC_XAHT_REGS(dev);
425 u32 *gaht_base = emac_gaht_base(dev);
427 struct netdev_hw_addr *ha;
430 DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
432 memset(gaht_temp, 0, sizeof (gaht_temp));
434 netdev_for_each_mc_addr(ha, dev->ndev) {
436 DBG2(dev, "mc %pM" NL, ha->addr);
438 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
439 ether_crc(ETH_ALEN, ha->addr));
440 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
441 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
443 gaht_temp[reg] |= mask;
446 for (i = 0; i < regs; i++)
447 out_be32(gaht_base + i, gaht_temp[i]);
450 static inline u32 emac_iff2rmr(struct net_device *ndev)
452 struct emac_instance *dev = netdev_priv(ndev);
455 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
457 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
462 if (ndev->flags & IFF_PROMISC)
464 else if (ndev->flags & IFF_ALLMULTI ||
465 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
467 else if (!netdev_mc_empty(ndev))
470 if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
471 r &= ~EMAC4_RMR_MJS_MASK;
472 r |= EMAC4_RMR_MJS(ndev->mtu);
478 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
480 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
482 DBG2(dev, "__emac_calc_base_mr1" NL);
486 ret |= EMAC_MR1_TFS_2K;
489 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
490 dev->ndev->name, tx_size);
495 ret |= EMAC_MR1_RFS_16K;
498 ret |= EMAC_MR1_RFS_4K;
501 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
502 dev->ndev->name, rx_size);
508 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
510 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
511 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
513 DBG2(dev, "__emac4_calc_base_mr1" NL);
517 ret |= EMAC4_MR1_TFS_16K;
520 ret |= EMAC4_MR1_TFS_4K;
523 ret |= EMAC4_MR1_TFS_2K;
526 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
527 dev->ndev->name, tx_size);
532 ret |= EMAC4_MR1_RFS_16K;
535 ret |= EMAC4_MR1_RFS_4K;
538 ret |= EMAC4_MR1_RFS_2K;
541 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
542 dev->ndev->name, rx_size);
548 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
550 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
551 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
552 __emac_calc_base_mr1(dev, tx_size, rx_size);
555 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
557 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
558 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
560 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
563 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
564 unsigned int low, unsigned int high)
566 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
567 return (low << 22) | ( (high & 0x3ff) << 6);
569 return (low << 23) | ( (high & 0x1ff) << 7);
572 static int emac_configure(struct emac_instance *dev)
574 struct emac_regs __iomem *p = dev->emacp;
575 struct net_device *ndev = dev->ndev;
576 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
579 DBG(dev, "configure" NL);
582 out_be32(&p->mr1, in_be32(&p->mr1)
583 | EMAC_MR1_FDE | EMAC_MR1_ILE);
585 } else if (emac_reset(dev) < 0)
588 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
589 tah_reset(dev->tah_dev);
591 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
592 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
594 /* Default fifo sizes */
595 tx_size = dev->tx_fifo_size;
596 rx_size = dev->rx_fifo_size;
598 /* No link, force loopback */
600 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
602 /* Check for full duplex */
603 else if (dev->phy.duplex == DUPLEX_FULL)
604 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
606 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
607 dev->stop_timeout = STOP_TIMEOUT_10;
608 switch (dev->phy.speed) {
610 if (emac_phy_gpcs(dev->phy.mode)) {
611 mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
612 (dev->phy.gpcs_address != 0xffffffff) ?
613 dev->phy.gpcs_address : dev->phy.address);
615 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
616 * identify this GPCS PHY later.
618 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
620 mr1 |= EMAC_MR1_MF_1000;
622 /* Extended fifo sizes */
623 tx_size = dev->tx_fifo_size_gige;
624 rx_size = dev->rx_fifo_size_gige;
626 if (dev->ndev->mtu > ETH_DATA_LEN) {
627 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
628 mr1 |= EMAC4_MR1_JPSM;
630 mr1 |= EMAC_MR1_JPSM;
631 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
633 dev->stop_timeout = STOP_TIMEOUT_1000;
636 mr1 |= EMAC_MR1_MF_100;
637 dev->stop_timeout = STOP_TIMEOUT_100;
639 default: /* make gcc happy */
643 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
644 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
646 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
647 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
649 /* on 40x erratum forces us to NOT use integrated flow control,
650 * let's hope it works on 44x ;)
652 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
653 dev->phy.duplex == DUPLEX_FULL) {
655 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
656 else if (dev->phy.asym_pause)
660 /* Add base settings & fifo sizes & program MR1 */
661 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
662 out_be32(&p->mr1, mr1);
664 /* Set individual MAC address */
665 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
666 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
667 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
670 /* VLAN Tag Protocol ID */
671 out_be32(&p->vtpid, 0x8100);
673 /* Receive mode register */
674 r = emac_iff2rmr(ndev);
675 if (r & EMAC_RMR_MAE)
677 out_be32(&p->rmr, r);
679 /* FIFOs thresholds */
680 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
681 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
682 tx_size / 2 / dev->fifo_entry_size);
684 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
685 tx_size / 2 / dev->fifo_entry_size);
686 out_be32(&p->tmr1, r);
687 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
689 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
690 there should be still enough space in FIFO to allow the our link
691 partner time to process this frame and also time to send PAUSE
694 Here is the worst case scenario for the RX FIFO "headroom"
695 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
697 1) One maximum-length frame on TX 1522 bytes
698 2) One PAUSE frame time 64 bytes
699 3) PAUSE frame decode time allowance 64 bytes
700 4) One maximum-length frame on RX 1522 bytes
701 5) Round-trip propagation delay of the link (100Mb) 15 bytes
705 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
706 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
708 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
709 rx_size / 4 / dev->fifo_entry_size);
710 out_be32(&p->rwmr, r);
712 /* Set PAUSE timer to the maximum */
713 out_be32(&p->ptr, 0xffff);
716 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
717 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
718 EMAC_ISR_IRE | EMAC_ISR_TE;
719 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
720 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
722 out_be32(&p->iser, r);
724 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
725 if (emac_phy_gpcs(dev->phy.mode)) {
726 if (dev->phy.gpcs_address != 0xffffffff)
727 emac_mii_reset_gpcs(&dev->phy);
729 emac_mii_reset_phy(&dev->phy);
735 static void emac_reinitialize(struct emac_instance *dev)
737 DBG(dev, "reinitialize" NL);
739 emac_netif_stop(dev);
740 if (!emac_configure(dev)) {
744 emac_netif_start(dev);
747 static void emac_full_tx_reset(struct emac_instance *dev)
749 DBG(dev, "full_tx_reset" NL);
751 emac_tx_disable(dev);
752 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
753 emac_clean_tx_ring(dev);
754 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
758 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
763 static void emac_reset_work(struct work_struct *work)
765 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
767 DBG(dev, "reset_work" NL);
769 mutex_lock(&dev->link_lock);
771 emac_netif_stop(dev);
772 emac_full_tx_reset(dev);
773 emac_netif_start(dev);
775 mutex_unlock(&dev->link_lock);
778 static void emac_tx_timeout(struct net_device *ndev)
780 struct emac_instance *dev = netdev_priv(ndev);
782 DBG(dev, "tx_timeout" NL);
784 schedule_work(&dev->reset_work);
788 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
790 int done = !!(stacr & EMAC_STACR_OC);
792 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
798 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
800 struct emac_regs __iomem *p = dev->emacp;
802 int n, err = -ETIMEDOUT;
804 mutex_lock(&dev->mdio_lock);
806 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
808 /* Enable proper MDIO port */
809 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
810 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
811 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
812 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
814 /* Wait for management interface to become idle */
816 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
819 DBG2(dev, " -> timeout wait idle\n");
824 /* Issue read command */
825 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
826 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
828 r = EMAC_STACR_BASE(dev->opb_bus_freq);
829 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
831 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
832 r |= EMACX_STACR_STAC_READ;
834 r |= EMAC_STACR_STAC_READ;
835 r |= (reg & EMAC_STACR_PRA_MASK)
836 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
837 out_be32(&p->stacr, r);
839 /* Wait for read to complete */
841 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
844 DBG2(dev, " -> timeout wait complete\n");
849 if (unlikely(r & EMAC_STACR_PHYE)) {
850 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
855 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
857 DBG2(dev, "mdio_read -> %04x" NL, r);
860 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
861 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
862 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
863 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
864 mutex_unlock(&dev->mdio_lock);
866 return err == 0 ? r : err;
869 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
872 struct emac_regs __iomem *p = dev->emacp;
874 int n, err = -ETIMEDOUT;
876 mutex_lock(&dev->mdio_lock);
878 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
880 /* Enable proper MDIO port */
881 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
882 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
883 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
884 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
886 /* Wait for management interface to be idle */
888 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
891 DBG2(dev, " -> timeout wait idle\n");
896 /* Issue write command */
897 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
898 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
900 r = EMAC_STACR_BASE(dev->opb_bus_freq);
901 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
903 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
904 r |= EMACX_STACR_STAC_WRITE;
906 r |= EMAC_STACR_STAC_WRITE;
907 r |= (reg & EMAC_STACR_PRA_MASK) |
908 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
909 (val << EMAC_STACR_PHYD_SHIFT);
910 out_be32(&p->stacr, r);
912 /* Wait for write to complete */
914 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
917 DBG2(dev, " -> timeout wait complete\n");
923 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
924 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
925 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
926 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
927 mutex_unlock(&dev->mdio_lock);
930 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
932 struct emac_instance *dev = netdev_priv(ndev);
935 res = __emac_mdio_read((dev->mdio_instance &&
936 dev->phy.gpcs_address != id) ?
937 dev->mdio_instance : dev,
942 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
944 struct emac_instance *dev = netdev_priv(ndev);
946 __emac_mdio_write((dev->mdio_instance &&
947 dev->phy.gpcs_address != id) ?
948 dev->mdio_instance : dev,
949 (u8) id, (u8) reg, (u16) val);
953 static void __emac_set_multicast_list(struct emac_instance *dev)
955 struct emac_regs __iomem *p = dev->emacp;
956 u32 rmr = emac_iff2rmr(dev->ndev);
958 DBG(dev, "__multicast %08x" NL, rmr);
960 /* I decided to relax register access rules here to avoid
963 * There is a real problem with EMAC4 core if we use MWSW_001 bit
964 * in MR1 register and do a full EMAC reset.
965 * One TX BD status update is delayed and, after EMAC reset, it
966 * never happens, resulting in TX hung (it'll be recovered by TX
967 * timeout handler eventually, but this is just gross).
968 * So we either have to do full TX reset or try to cheat here :)
970 * The only required change is to RX mode register, so I *think* all
971 * we need is just to stop RX channel. This seems to work on all
974 * If we need the full reset, we might just trigger the workqueue
975 * and do it async... a bit nasty but should work --BenH
977 dev->mcast_pending = 0;
978 emac_rx_disable(dev);
979 if (rmr & EMAC_RMR_MAE)
981 out_be32(&p->rmr, rmr);
986 static void emac_set_multicast_list(struct net_device *ndev)
988 struct emac_instance *dev = netdev_priv(ndev);
990 DBG(dev, "multicast" NL);
992 BUG_ON(!netif_running(dev->ndev));
995 dev->mcast_pending = 1;
999 mutex_lock(&dev->link_lock);
1000 __emac_set_multicast_list(dev);
1001 mutex_unlock(&dev->link_lock);
1004 static int emac_set_mac_address(struct net_device *ndev, void *sa)
1006 struct emac_instance *dev = netdev_priv(ndev);
1007 struct sockaddr *addr = sa;
1008 struct emac_regs __iomem *p = dev->emacp;
1010 if (!is_valid_ether_addr(addr->sa_data))
1011 return -EADDRNOTAVAIL;
1013 mutex_lock(&dev->link_lock);
1015 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1017 emac_rx_disable(dev);
1018 emac_tx_disable(dev);
1019 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
1020 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
1021 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
1023 emac_tx_enable(dev);
1024 emac_rx_enable(dev);
1026 mutex_unlock(&dev->link_lock);
1031 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
1033 int rx_sync_size = emac_rx_sync_size(new_mtu);
1034 int rx_skb_size = emac_rx_skb_size(new_mtu);
1036 int mr1_jumbo_bit_change = 0;
1038 mutex_lock(&dev->link_lock);
1039 emac_netif_stop(dev);
1040 emac_rx_disable(dev);
1041 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1043 if (dev->rx_sg_skb) {
1044 ++dev->estats.rx_dropped_resize;
1045 dev_kfree_skb(dev->rx_sg_skb);
1046 dev->rx_sg_skb = NULL;
1049 /* Make a first pass over RX ring and mark BDs ready, dropping
1050 * non-processed packets on the way. We need this as a separate pass
1051 * to simplify error recovery in the case of allocation failure later.
1053 for (i = 0; i < NUM_RX_BUFF; ++i) {
1054 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1055 ++dev->estats.rx_dropped_resize;
1057 dev->rx_desc[i].data_len = 0;
1058 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1059 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1062 /* Reallocate RX ring only if bigger skb buffers are required */
1063 if (rx_skb_size <= dev->rx_skb_size)
1066 /* Second pass, allocate new skbs */
1067 for (i = 0; i < NUM_RX_BUFF; ++i) {
1068 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1074 BUG_ON(!dev->rx_skb[i]);
1075 dev_kfree_skb(dev->rx_skb[i]);
1077 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1078 dev->rx_desc[i].data_ptr =
1079 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1080 DMA_FROM_DEVICE) + 2;
1081 dev->rx_skb[i] = skb;
1084 /* Check if we need to change "Jumbo" bit in MR1 */
1085 if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1086 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1087 (dev->ndev->mtu > ETH_DATA_LEN);
1089 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1090 (dev->ndev->mtu > ETH_DATA_LEN);
1093 if (mr1_jumbo_bit_change) {
1094 /* This is to prevent starting RX channel in emac_rx_enable() */
1095 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1097 dev->ndev->mtu = new_mtu;
1098 emac_full_tx_reset(dev);
1101 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1104 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1106 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1107 emac_rx_enable(dev);
1108 emac_netif_start(dev);
1109 mutex_unlock(&dev->link_lock);
1114 /* Process ctx, rtnl_lock semaphore */
1115 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1117 struct emac_instance *dev = netdev_priv(ndev);
1120 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1123 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1125 if (netif_running(ndev)) {
1126 /* Check if we really need to reinitialize RX ring */
1127 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1128 ret = emac_resize_rx_ring(dev, new_mtu);
1132 ndev->mtu = new_mtu;
1133 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1134 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1140 static void emac_clean_tx_ring(struct emac_instance *dev)
1144 for (i = 0; i < NUM_TX_BUFF; ++i) {
1145 if (dev->tx_skb[i]) {
1146 dev_kfree_skb(dev->tx_skb[i]);
1147 dev->tx_skb[i] = NULL;
1148 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1149 ++dev->estats.tx_dropped;
1151 dev->tx_desc[i].ctrl = 0;
1152 dev->tx_desc[i].data_ptr = 0;
1156 static void emac_clean_rx_ring(struct emac_instance *dev)
1160 for (i = 0; i < NUM_RX_BUFF; ++i)
1161 if (dev->rx_skb[i]) {
1162 dev->rx_desc[i].ctrl = 0;
1163 dev_kfree_skb(dev->rx_skb[i]);
1164 dev->rx_skb[i] = NULL;
1165 dev->rx_desc[i].data_ptr = 0;
1168 if (dev->rx_sg_skb) {
1169 dev_kfree_skb(dev->rx_sg_skb);
1170 dev->rx_sg_skb = NULL;
1174 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1177 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1181 dev->rx_skb[slot] = skb;
1182 dev->rx_desc[slot].data_len = 0;
1184 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1185 dev->rx_desc[slot].data_ptr =
1186 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1187 DMA_FROM_DEVICE) + 2;
1189 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1190 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1195 static void emac_print_link_status(struct emac_instance *dev)
1197 if (netif_carrier_ok(dev->ndev))
1198 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1199 dev->ndev->name, dev->phy.speed,
1200 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1201 dev->phy.pause ? ", pause enabled" :
1202 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1204 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1207 /* Process ctx, rtnl_lock semaphore */
1208 static int emac_open(struct net_device *ndev)
1210 struct emac_instance *dev = netdev_priv(ndev);
1213 DBG(dev, "open" NL);
1215 /* Setup error IRQ handler */
1216 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1218 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1219 ndev->name, dev->emac_irq);
1223 /* Allocate RX ring */
1224 for (i = 0; i < NUM_RX_BUFF; ++i)
1225 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1226 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1231 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1232 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1233 dev->rx_sg_skb = NULL;
1235 mutex_lock(&dev->link_lock);
1238 /* Start PHY polling now.
1240 if (dev->phy.address >= 0) {
1241 int link_poll_interval;
1242 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1243 dev->phy.def->ops->read_link(&dev->phy);
1244 emac_rx_clk_default(dev);
1245 netif_carrier_on(dev->ndev);
1246 link_poll_interval = PHY_POLL_LINK_ON;
1248 emac_rx_clk_tx(dev);
1249 netif_carrier_off(dev->ndev);
1250 link_poll_interval = PHY_POLL_LINK_OFF;
1252 dev->link_polling = 1;
1254 schedule_delayed_work(&dev->link_work, link_poll_interval);
1255 emac_print_link_status(dev);
1257 netif_carrier_on(dev->ndev);
1259 /* Required for Pause packet support in EMAC */
1260 dev_mc_add_global(ndev, default_mcast_addr);
1262 emac_configure(dev);
1263 mal_poll_add(dev->mal, &dev->commac);
1264 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1265 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1266 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1267 emac_tx_enable(dev);
1268 emac_rx_enable(dev);
1269 emac_netif_start(dev);
1271 mutex_unlock(&dev->link_lock);
1275 emac_clean_rx_ring(dev);
1276 free_irq(dev->emac_irq, dev);
1283 static int emac_link_differs(struct emac_instance *dev)
1285 u32 r = in_be32(&dev->emacp->mr1);
1287 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1288 int speed, pause, asym_pause;
1290 if (r & EMAC_MR1_MF_1000)
1292 else if (r & EMAC_MR1_MF_100)
1297 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1298 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1307 pause = asym_pause = 0;
1309 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1310 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1314 static void emac_link_timer(struct work_struct *work)
1316 struct emac_instance *dev =
1317 container_of(to_delayed_work(work),
1318 struct emac_instance, link_work);
1319 int link_poll_interval;
1321 mutex_lock(&dev->link_lock);
1322 DBG2(dev, "link timer" NL);
1327 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1328 if (!netif_carrier_ok(dev->ndev)) {
1329 emac_rx_clk_default(dev);
1330 /* Get new link parameters */
1331 dev->phy.def->ops->read_link(&dev->phy);
1333 netif_carrier_on(dev->ndev);
1334 emac_netif_stop(dev);
1335 emac_full_tx_reset(dev);
1336 emac_netif_start(dev);
1337 emac_print_link_status(dev);
1339 link_poll_interval = PHY_POLL_LINK_ON;
1341 if (netif_carrier_ok(dev->ndev)) {
1342 emac_rx_clk_tx(dev);
1343 netif_carrier_off(dev->ndev);
1344 netif_tx_disable(dev->ndev);
1345 emac_reinitialize(dev);
1346 emac_print_link_status(dev);
1348 link_poll_interval = PHY_POLL_LINK_OFF;
1350 schedule_delayed_work(&dev->link_work, link_poll_interval);
1352 mutex_unlock(&dev->link_lock);
1355 static void emac_force_link_update(struct emac_instance *dev)
1357 netif_carrier_off(dev->ndev);
1359 if (dev->link_polling) {
1360 cancel_delayed_work_sync(&dev->link_work);
1361 if (dev->link_polling)
1362 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1366 /* Process ctx, rtnl_lock semaphore */
1367 static int emac_close(struct net_device *ndev)
1369 struct emac_instance *dev = netdev_priv(ndev);
1371 DBG(dev, "close" NL);
1373 if (dev->phy.address >= 0) {
1374 dev->link_polling = 0;
1375 cancel_delayed_work_sync(&dev->link_work);
1377 mutex_lock(&dev->link_lock);
1378 emac_netif_stop(dev);
1380 mutex_unlock(&dev->link_lock);
1382 emac_rx_disable(dev);
1383 emac_tx_disable(dev);
1384 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1385 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1386 mal_poll_del(dev->mal, &dev->commac);
1388 emac_clean_tx_ring(dev);
1389 emac_clean_rx_ring(dev);
1391 free_irq(dev->emac_irq, dev);
1393 netif_carrier_off(ndev);
1398 static inline u16 emac_tx_csum(struct emac_instance *dev,
1399 struct sk_buff *skb)
1401 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1402 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1403 ++dev->stats.tx_packets_csum;
1404 return EMAC_TX_CTRL_TAH_CSUM;
1409 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1411 struct emac_regs __iomem *p = dev->emacp;
1412 struct net_device *ndev = dev->ndev;
1414 /* Send the packet out. If the if makes a significant perf
1415 * difference, then we can store the TMR0 value in "dev"
1418 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1419 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1421 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1423 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1424 netif_stop_queue(ndev);
1425 DBG2(dev, "stopped TX queue" NL);
1428 netif_trans_update(ndev);
1429 ++dev->stats.tx_packets;
1430 dev->stats.tx_bytes += len;
1432 return NETDEV_TX_OK;
1436 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1438 struct emac_instance *dev = netdev_priv(ndev);
1439 unsigned int len = skb->len;
1442 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1443 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1445 slot = dev->tx_slot++;
1446 if (dev->tx_slot == NUM_TX_BUFF) {
1448 ctrl |= MAL_TX_CTRL_WRAP;
1451 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1453 dev->tx_skb[slot] = skb;
1454 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1457 dev->tx_desc[slot].data_len = (u16) len;
1459 dev->tx_desc[slot].ctrl = ctrl;
1461 return emac_xmit_finish(dev, len);
1464 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1465 u32 pd, int len, int last, u16 base_ctrl)
1468 u16 ctrl = base_ctrl;
1469 int chunk = min(len, MAL_MAX_TX_SIZE);
1472 slot = (slot + 1) % NUM_TX_BUFF;
1475 ctrl |= MAL_TX_CTRL_LAST;
1476 if (slot == NUM_TX_BUFF - 1)
1477 ctrl |= MAL_TX_CTRL_WRAP;
1479 dev->tx_skb[slot] = NULL;
1480 dev->tx_desc[slot].data_ptr = pd;
1481 dev->tx_desc[slot].data_len = (u16) chunk;
1482 dev->tx_desc[slot].ctrl = ctrl;
1493 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1494 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1496 struct emac_instance *dev = netdev_priv(ndev);
1497 int nr_frags = skb_shinfo(skb)->nr_frags;
1498 int len = skb->len, chunk;
1503 /* This is common "fast" path */
1504 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1505 return emac_start_xmit(skb, ndev);
1507 len -= skb->data_len;
1509 /* Note, this is only an *estimation*, we can still run out of empty
1510 * slots because of the additional fragmentation into
1511 * MAL_MAX_TX_SIZE-sized chunks
1513 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1516 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1517 emac_tx_csum(dev, skb);
1518 slot = dev->tx_slot;
1521 dev->tx_skb[slot] = NULL;
1522 chunk = min(len, MAL_MAX_TX_SIZE);
1523 dev->tx_desc[slot].data_ptr = pd =
1524 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1525 dev->tx_desc[slot].data_len = (u16) chunk;
1528 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1531 for (i = 0; i < nr_frags; ++i) {
1532 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1533 len = skb_frag_size(frag);
1535 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1538 pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1541 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1545 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1547 /* Attach skb to the last slot so we don't release it too early */
1548 dev->tx_skb[slot] = skb;
1550 /* Send the packet out */
1551 if (dev->tx_slot == NUM_TX_BUFF - 1)
1552 ctrl |= MAL_TX_CTRL_WRAP;
1554 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1555 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1557 return emac_xmit_finish(dev, skb->len);
1560 /* Well, too bad. Our previous estimation was overly optimistic.
1563 while (slot != dev->tx_slot) {
1564 dev->tx_desc[slot].ctrl = 0;
1567 slot = NUM_TX_BUFF - 1;
1569 ++dev->estats.tx_undo;
1572 netif_stop_queue(ndev);
1573 DBG2(dev, "stopped TX queue" NL);
1574 return NETDEV_TX_BUSY;
1578 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1580 struct emac_error_stats *st = &dev->estats;
1582 DBG(dev, "BD TX error %04x" NL, ctrl);
1585 if (ctrl & EMAC_TX_ST_BFCS)
1586 ++st->tx_bd_bad_fcs;
1587 if (ctrl & EMAC_TX_ST_LCS)
1588 ++st->tx_bd_carrier_loss;
1589 if (ctrl & EMAC_TX_ST_ED)
1590 ++st->tx_bd_excessive_deferral;
1591 if (ctrl & EMAC_TX_ST_EC)
1592 ++st->tx_bd_excessive_collisions;
1593 if (ctrl & EMAC_TX_ST_LC)
1594 ++st->tx_bd_late_collision;
1595 if (ctrl & EMAC_TX_ST_MC)
1596 ++st->tx_bd_multple_collisions;
1597 if (ctrl & EMAC_TX_ST_SC)
1598 ++st->tx_bd_single_collision;
1599 if (ctrl & EMAC_TX_ST_UR)
1600 ++st->tx_bd_underrun;
1601 if (ctrl & EMAC_TX_ST_SQE)
1605 static void emac_poll_tx(void *param)
1607 struct emac_instance *dev = param;
1610 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1612 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1613 bad_mask = EMAC_IS_BAD_TX_TAH;
1615 bad_mask = EMAC_IS_BAD_TX;
1617 netif_tx_lock_bh(dev->ndev);
1620 int slot = dev->ack_slot, n = 0;
1622 ctrl = dev->tx_desc[slot].ctrl;
1623 if (!(ctrl & MAL_TX_CTRL_READY)) {
1624 struct sk_buff *skb = dev->tx_skb[slot];
1629 dev->tx_skb[slot] = NULL;
1631 slot = (slot + 1) % NUM_TX_BUFF;
1633 if (unlikely(ctrl & bad_mask))
1634 emac_parse_tx_error(dev, ctrl);
1640 dev->ack_slot = slot;
1641 if (netif_queue_stopped(dev->ndev) &&
1642 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1643 netif_wake_queue(dev->ndev);
1645 DBG2(dev, "tx %d pkts" NL, n);
1648 netif_tx_unlock_bh(dev->ndev);
1651 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1654 struct sk_buff *skb = dev->rx_skb[slot];
1656 DBG2(dev, "recycle %d %d" NL, slot, len);
1659 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1660 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1662 dev->rx_desc[slot].data_len = 0;
1664 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1665 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1668 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1670 struct emac_error_stats *st = &dev->estats;
1672 DBG(dev, "BD RX error %04x" NL, ctrl);
1675 if (ctrl & EMAC_RX_ST_OE)
1676 ++st->rx_bd_overrun;
1677 if (ctrl & EMAC_RX_ST_BP)
1678 ++st->rx_bd_bad_packet;
1679 if (ctrl & EMAC_RX_ST_RP)
1680 ++st->rx_bd_runt_packet;
1681 if (ctrl & EMAC_RX_ST_SE)
1682 ++st->rx_bd_short_event;
1683 if (ctrl & EMAC_RX_ST_AE)
1684 ++st->rx_bd_alignment_error;
1685 if (ctrl & EMAC_RX_ST_BFCS)
1686 ++st->rx_bd_bad_fcs;
1687 if (ctrl & EMAC_RX_ST_PTL)
1688 ++st->rx_bd_packet_too_long;
1689 if (ctrl & EMAC_RX_ST_ORE)
1690 ++st->rx_bd_out_of_range;
1691 if (ctrl & EMAC_RX_ST_IRE)
1692 ++st->rx_bd_in_range;
1695 static inline void emac_rx_csum(struct emac_instance *dev,
1696 struct sk_buff *skb, u16 ctrl)
1698 #ifdef CONFIG_IBM_EMAC_TAH
1699 if (!ctrl && dev->tah_dev) {
1700 skb->ip_summed = CHECKSUM_UNNECESSARY;
1701 ++dev->stats.rx_packets_csum;
1706 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1708 if (likely(dev->rx_sg_skb != NULL)) {
1709 int len = dev->rx_desc[slot].data_len;
1710 int tot_len = dev->rx_sg_skb->len + len;
1712 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1713 ++dev->estats.rx_dropped_mtu;
1714 dev_kfree_skb(dev->rx_sg_skb);
1715 dev->rx_sg_skb = NULL;
1717 memcpy(skb_tail_pointer(dev->rx_sg_skb),
1718 dev->rx_skb[slot]->data, len);
1719 skb_put(dev->rx_sg_skb, len);
1720 emac_recycle_rx_skb(dev, slot, len);
1724 emac_recycle_rx_skb(dev, slot, 0);
1728 /* NAPI poll context */
1729 static int emac_poll_rx(void *param, int budget)
1731 struct emac_instance *dev = param;
1732 int slot = dev->rx_slot, received = 0;
1734 DBG2(dev, "poll_rx(%d)" NL, budget);
1737 while (budget > 0) {
1739 struct sk_buff *skb;
1740 u16 ctrl = dev->rx_desc[slot].ctrl;
1742 if (ctrl & MAL_RX_CTRL_EMPTY)
1745 skb = dev->rx_skb[slot];
1747 len = dev->rx_desc[slot].data_len;
1749 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1752 ctrl &= EMAC_BAD_RX_MASK;
1753 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1754 emac_parse_rx_error(dev, ctrl);
1755 ++dev->estats.rx_dropped_error;
1756 emac_recycle_rx_skb(dev, slot, 0);
1761 if (len < ETH_HLEN) {
1762 ++dev->estats.rx_dropped_stack;
1763 emac_recycle_rx_skb(dev, slot, len);
1767 if (len && len < EMAC_RX_COPY_THRESH) {
1768 struct sk_buff *copy_skb =
1769 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1770 if (unlikely(!copy_skb))
1773 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1774 memcpy(copy_skb->data - 2, skb->data - 2, len + 2);
1775 emac_recycle_rx_skb(dev, slot, len);
1777 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1782 skb->protocol = eth_type_trans(skb, dev->ndev);
1783 emac_rx_csum(dev, skb, ctrl);
1785 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1786 ++dev->estats.rx_dropped_stack;
1788 ++dev->stats.rx_packets;
1790 dev->stats.rx_bytes += len;
1791 slot = (slot + 1) % NUM_RX_BUFF;
1796 if (ctrl & MAL_RX_CTRL_FIRST) {
1797 BUG_ON(dev->rx_sg_skb);
1798 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1799 DBG(dev, "rx OOM %d" NL, slot);
1800 ++dev->estats.rx_dropped_oom;
1801 emac_recycle_rx_skb(dev, slot, 0);
1803 dev->rx_sg_skb = skb;
1806 } else if (!emac_rx_sg_append(dev, slot) &&
1807 (ctrl & MAL_RX_CTRL_LAST)) {
1809 skb = dev->rx_sg_skb;
1810 dev->rx_sg_skb = NULL;
1812 ctrl &= EMAC_BAD_RX_MASK;
1813 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1814 emac_parse_rx_error(dev, ctrl);
1815 ++dev->estats.rx_dropped_error;
1823 DBG(dev, "rx OOM %d" NL, slot);
1824 /* Drop the packet and recycle skb */
1825 ++dev->estats.rx_dropped_oom;
1826 emac_recycle_rx_skb(dev, slot, 0);
1831 DBG2(dev, "rx %d BDs" NL, received);
1832 dev->rx_slot = slot;
1835 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1837 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1838 DBG2(dev, "rx restart" NL);
1843 if (dev->rx_sg_skb) {
1844 DBG2(dev, "dropping partial rx packet" NL);
1845 ++dev->estats.rx_dropped_error;
1846 dev_kfree_skb(dev->rx_sg_skb);
1847 dev->rx_sg_skb = NULL;
1850 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1851 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1852 emac_rx_enable(dev);
1858 /* NAPI poll context */
1859 static int emac_peek_rx(void *param)
1861 struct emac_instance *dev = param;
1863 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1866 /* NAPI poll context */
1867 static int emac_peek_rx_sg(void *param)
1869 struct emac_instance *dev = param;
1871 int slot = dev->rx_slot;
1873 u16 ctrl = dev->rx_desc[slot].ctrl;
1874 if (ctrl & MAL_RX_CTRL_EMPTY)
1876 else if (ctrl & MAL_RX_CTRL_LAST)
1879 slot = (slot + 1) % NUM_RX_BUFF;
1881 /* I'm just being paranoid here :) */
1882 if (unlikely(slot == dev->rx_slot))
1888 static void emac_rxde(void *param)
1890 struct emac_instance *dev = param;
1892 ++dev->estats.rx_stopped;
1893 emac_rx_disable_async(dev);
1897 static irqreturn_t emac_irq(int irq, void *dev_instance)
1899 struct emac_instance *dev = dev_instance;
1900 struct emac_regs __iomem *p = dev->emacp;
1901 struct emac_error_stats *st = &dev->estats;
1904 spin_lock(&dev->lock);
1906 isr = in_be32(&p->isr);
1907 out_be32(&p->isr, isr);
1909 DBG(dev, "isr = %08x" NL, isr);
1911 if (isr & EMAC4_ISR_TXPE)
1913 if (isr & EMAC4_ISR_RXPE)
1915 if (isr & EMAC4_ISR_TXUE)
1917 if (isr & EMAC4_ISR_RXOE)
1918 ++st->rx_fifo_overrun;
1919 if (isr & EMAC_ISR_OVR)
1921 if (isr & EMAC_ISR_BP)
1922 ++st->rx_bad_packet;
1923 if (isr & EMAC_ISR_RP)
1924 ++st->rx_runt_packet;
1925 if (isr & EMAC_ISR_SE)
1926 ++st->rx_short_event;
1927 if (isr & EMAC_ISR_ALE)
1928 ++st->rx_alignment_error;
1929 if (isr & EMAC_ISR_BFCS)
1931 if (isr & EMAC_ISR_PTLE)
1932 ++st->rx_packet_too_long;
1933 if (isr & EMAC_ISR_ORE)
1934 ++st->rx_out_of_range;
1935 if (isr & EMAC_ISR_IRE)
1937 if (isr & EMAC_ISR_SQE)
1939 if (isr & EMAC_ISR_TE)
1942 spin_unlock(&dev->lock);
1947 static struct net_device_stats *emac_stats(struct net_device *ndev)
1949 struct emac_instance *dev = netdev_priv(ndev);
1950 struct emac_stats *st = &dev->stats;
1951 struct emac_error_stats *est = &dev->estats;
1952 struct net_device_stats *nst = &dev->nstats;
1953 unsigned long flags;
1955 DBG2(dev, "stats" NL);
1957 /* Compute "legacy" statistics */
1958 spin_lock_irqsave(&dev->lock, flags);
1959 nst->rx_packets = (unsigned long)st->rx_packets;
1960 nst->rx_bytes = (unsigned long)st->rx_bytes;
1961 nst->tx_packets = (unsigned long)st->tx_packets;
1962 nst->tx_bytes = (unsigned long)st->tx_bytes;
1963 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1964 est->rx_dropped_error +
1965 est->rx_dropped_resize +
1966 est->rx_dropped_mtu);
1967 nst->tx_dropped = (unsigned long)est->tx_dropped;
1969 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1970 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1971 est->rx_fifo_overrun +
1973 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1974 est->rx_alignment_error);
1975 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1977 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1978 est->rx_bd_short_event +
1979 est->rx_bd_packet_too_long +
1980 est->rx_bd_out_of_range +
1981 est->rx_bd_in_range +
1982 est->rx_runt_packet +
1983 est->rx_short_event +
1984 est->rx_packet_too_long +
1985 est->rx_out_of_range +
1988 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1989 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1991 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1992 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1993 est->tx_bd_excessive_collisions +
1994 est->tx_bd_late_collision +
1995 est->tx_bd_multple_collisions);
1996 spin_unlock_irqrestore(&dev->lock, flags);
2000 static struct mal_commac_ops emac_commac_ops = {
2001 .poll_tx = &emac_poll_tx,
2002 .poll_rx = &emac_poll_rx,
2003 .peek_rx = &emac_peek_rx,
2007 static struct mal_commac_ops emac_commac_sg_ops = {
2008 .poll_tx = &emac_poll_tx,
2009 .poll_rx = &emac_poll_rx,
2010 .peek_rx = &emac_peek_rx_sg,
2014 /* Ethtool support */
2015 static int emac_ethtool_get_settings(struct net_device *ndev,
2016 struct ethtool_cmd *cmd)
2018 struct emac_instance *dev = netdev_priv(ndev);
2020 cmd->supported = dev->phy.features;
2021 cmd->port = PORT_MII;
2022 cmd->phy_address = dev->phy.address;
2024 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
2026 mutex_lock(&dev->link_lock);
2027 cmd->advertising = dev->phy.advertising;
2028 cmd->autoneg = dev->phy.autoneg;
2029 cmd->speed = dev->phy.speed;
2030 cmd->duplex = dev->phy.duplex;
2031 mutex_unlock(&dev->link_lock);
2036 static int emac_ethtool_set_settings(struct net_device *ndev,
2037 struct ethtool_cmd *cmd)
2039 struct emac_instance *dev = netdev_priv(ndev);
2040 u32 f = dev->phy.features;
2042 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
2043 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
2045 /* Basic sanity checks */
2046 if (dev->phy.address < 0)
2048 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
2050 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
2052 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
2055 if (cmd->autoneg == AUTONEG_DISABLE) {
2056 switch (cmd->speed) {
2058 if (cmd->duplex == DUPLEX_HALF &&
2059 !(f & SUPPORTED_10baseT_Half))
2061 if (cmd->duplex == DUPLEX_FULL &&
2062 !(f & SUPPORTED_10baseT_Full))
2066 if (cmd->duplex == DUPLEX_HALF &&
2067 !(f & SUPPORTED_100baseT_Half))
2069 if (cmd->duplex == DUPLEX_FULL &&
2070 !(f & SUPPORTED_100baseT_Full))
2074 if (cmd->duplex == DUPLEX_HALF &&
2075 !(f & SUPPORTED_1000baseT_Half))
2077 if (cmd->duplex == DUPLEX_FULL &&
2078 !(f & SUPPORTED_1000baseT_Full))
2085 mutex_lock(&dev->link_lock);
2086 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2088 mutex_unlock(&dev->link_lock);
2091 if (!(f & SUPPORTED_Autoneg))
2094 mutex_lock(&dev->link_lock);
2095 dev->phy.def->ops->setup_aneg(&dev->phy,
2096 (cmd->advertising & f) |
2097 (dev->phy.advertising &
2099 ADVERTISED_Asym_Pause)));
2100 mutex_unlock(&dev->link_lock);
2102 emac_force_link_update(dev);
2107 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2108 struct ethtool_ringparam *rp)
2110 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2111 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2114 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2115 struct ethtool_pauseparam *pp)
2117 struct emac_instance *dev = netdev_priv(ndev);
2119 mutex_lock(&dev->link_lock);
2120 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2121 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2124 if (dev->phy.duplex == DUPLEX_FULL) {
2126 pp->rx_pause = pp->tx_pause = 1;
2127 else if (dev->phy.asym_pause)
2130 mutex_unlock(&dev->link_lock);
2133 static int emac_get_regs_len(struct emac_instance *dev)
2135 return sizeof(struct emac_ethtool_regs_subhdr) +
2136 sizeof(struct emac_regs);
2139 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2141 struct emac_instance *dev = netdev_priv(ndev);
2144 size = sizeof(struct emac_ethtool_regs_hdr) +
2145 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2146 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2147 size += zmii_get_regs_len(dev->zmii_dev);
2148 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2149 size += rgmii_get_regs_len(dev->rgmii_dev);
2150 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2151 size += tah_get_regs_len(dev->tah_dev);
2156 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2158 struct emac_ethtool_regs_subhdr *hdr = buf;
2160 hdr->index = dev->cell_index;
2161 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2162 hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2163 } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2164 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2166 hdr->version = EMAC_ETHTOOL_REGS_VER;
2168 memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2169 return (void *)(hdr + 1) + sizeof(struct emac_regs);
2172 static void emac_ethtool_get_regs(struct net_device *ndev,
2173 struct ethtool_regs *regs, void *buf)
2175 struct emac_instance *dev = netdev_priv(ndev);
2176 struct emac_ethtool_regs_hdr *hdr = buf;
2178 hdr->components = 0;
2181 buf = mal_dump_regs(dev->mal, buf);
2182 buf = emac_dump_regs(dev, buf);
2183 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2184 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2185 buf = zmii_dump_regs(dev->zmii_dev, buf);
2187 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2188 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2189 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2191 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2192 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2193 buf = tah_dump_regs(dev->tah_dev, buf);
2197 static int emac_ethtool_nway_reset(struct net_device *ndev)
2199 struct emac_instance *dev = netdev_priv(ndev);
2202 DBG(dev, "nway_reset" NL);
2204 if (dev->phy.address < 0)
2207 mutex_lock(&dev->link_lock);
2208 if (!dev->phy.autoneg) {
2213 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2215 mutex_unlock(&dev->link_lock);
2216 emac_force_link_update(dev);
2220 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2222 if (stringset == ETH_SS_STATS)
2223 return EMAC_ETHTOOL_STATS_COUNT;
2228 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2231 if (stringset == ETH_SS_STATS)
2232 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2235 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2236 struct ethtool_stats *estats,
2239 struct emac_instance *dev = netdev_priv(ndev);
2241 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2242 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2243 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2246 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2247 struct ethtool_drvinfo *info)
2249 struct emac_instance *dev = netdev_priv(ndev);
2251 strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2252 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2253 snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
2254 dev->cell_index, dev->ofdev->dev.of_node->full_name);
2257 static const struct ethtool_ops emac_ethtool_ops = {
2258 .get_settings = emac_ethtool_get_settings,
2259 .set_settings = emac_ethtool_set_settings,
2260 .get_drvinfo = emac_ethtool_get_drvinfo,
2262 .get_regs_len = emac_ethtool_get_regs_len,
2263 .get_regs = emac_ethtool_get_regs,
2265 .nway_reset = emac_ethtool_nway_reset,
2267 .get_ringparam = emac_ethtool_get_ringparam,
2268 .get_pauseparam = emac_ethtool_get_pauseparam,
2270 .get_strings = emac_ethtool_get_strings,
2271 .get_sset_count = emac_ethtool_get_sset_count,
2272 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2274 .get_link = ethtool_op_get_link,
2277 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2279 struct emac_instance *dev = netdev_priv(ndev);
2280 struct mii_ioctl_data *data = if_mii(rq);
2282 DBG(dev, "ioctl %08x" NL, cmd);
2284 if (dev->phy.address < 0)
2289 data->phy_id = dev->phy.address;
2292 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2297 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2305 struct emac_depentry {
2307 struct device_node *node;
2308 struct platform_device *ofdev;
2312 #define EMAC_DEP_MAL_IDX 0
2313 #define EMAC_DEP_ZMII_IDX 1
2314 #define EMAC_DEP_RGMII_IDX 2
2315 #define EMAC_DEP_TAH_IDX 3
2316 #define EMAC_DEP_MDIO_IDX 4
2317 #define EMAC_DEP_PREV_IDX 5
2318 #define EMAC_DEP_COUNT 6
2320 static int emac_check_deps(struct emac_instance *dev,
2321 struct emac_depentry *deps)
2324 struct device_node *np;
2326 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2327 /* no dependency on that item, allright */
2328 if (deps[i].phandle == 0) {
2332 /* special case for blist as the dependency might go away */
2333 if (i == EMAC_DEP_PREV_IDX) {
2334 np = *(dev->blist - 1);
2336 deps[i].phandle = 0;
2340 if (deps[i].node == NULL)
2341 deps[i].node = of_node_get(np);
2343 if (deps[i].node == NULL)
2344 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2345 if (deps[i].node == NULL)
2347 if (deps[i].ofdev == NULL)
2348 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2349 if (deps[i].ofdev == NULL)
2351 if (deps[i].drvdata == NULL)
2352 deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2353 if (deps[i].drvdata != NULL)
2356 return there == EMAC_DEP_COUNT;
2359 static void emac_put_deps(struct emac_instance *dev)
2361 of_dev_put(dev->mal_dev);
2362 of_dev_put(dev->zmii_dev);
2363 of_dev_put(dev->rgmii_dev);
2364 of_dev_put(dev->mdio_dev);
2365 of_dev_put(dev->tah_dev);
2368 static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2371 /* We are only intereted in device addition */
2372 if (action == BUS_NOTIFY_BOUND_DRIVER)
2373 wake_up_all(&emac_probe_wait);
2377 static struct notifier_block emac_of_bus_notifier = {
2378 .notifier_call = emac_of_bus_notify
2381 static int emac_wait_deps(struct emac_instance *dev)
2383 struct emac_depentry deps[EMAC_DEP_COUNT];
2386 memset(&deps, 0, sizeof(deps));
2388 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2389 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2390 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2392 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2394 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2395 if (dev->blist && dev->blist > emac_boot_list)
2396 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2397 bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2398 wait_event_timeout(emac_probe_wait,
2399 emac_check_deps(dev, deps),
2400 EMAC_PROBE_DEP_TIMEOUT);
2401 bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2402 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2403 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2404 of_node_put(deps[i].node);
2406 of_dev_put(deps[i].ofdev);
2409 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2410 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2411 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2412 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2413 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2415 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2419 static int emac_read_uint_prop(struct device_node *np, const char *name,
2420 u32 *val, int fatal)
2423 const u32 *prop = of_get_property(np, name, &len);
2424 if (prop == NULL || len < sizeof(u32)) {
2426 printk(KERN_ERR "%s: missing %s property\n",
2427 np->full_name, name);
2434 static int emac_init_phy(struct emac_instance *dev)
2436 struct device_node *np = dev->ofdev->dev.of_node;
2437 struct net_device *ndev = dev->ndev;
2441 dev->phy.dev = ndev;
2442 dev->phy.mode = dev->phy_mode;
2444 /* PHY-less configuration.
2445 * XXX I probably should move these settings to the dev tree
2447 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2450 /* PHY-less configuration.
2451 * XXX I probably should move these settings to the dev tree
2453 dev->phy.address = -1;
2454 dev->phy.features = SUPPORTED_MII;
2455 if (emac_phy_supports_gige(dev->phy_mode))
2456 dev->phy.features |= SUPPORTED_1000baseT_Full;
2458 dev->phy.features |= SUPPORTED_100baseT_Full;
2464 mutex_lock(&emac_phy_map_lock);
2465 phy_map = dev->phy_map | busy_phy_map;
2467 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2469 dev->phy.mdio_read = emac_mdio_read;
2470 dev->phy.mdio_write = emac_mdio_write;
2472 /* Enable internal clock source */
2473 #ifdef CONFIG_PPC_DCR_NATIVE
2474 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2475 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2477 /* PHY clock workaround */
2478 emac_rx_clk_tx(dev);
2480 /* Enable internal clock source on 440GX*/
2481 #ifdef CONFIG_PPC_DCR_NATIVE
2482 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2483 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2485 /* Configure EMAC with defaults so we can at least use MDIO
2486 * This is needed mostly for 440GX
2488 if (emac_phy_gpcs(dev->phy.mode)) {
2490 * Make GPCS PHY address equal to EMAC index.
2491 * We probably should take into account busy_phy_map
2492 * and/or phy_map here.
2494 * Note that the busy_phy_map is currently global
2495 * while it should probably be per-ASIC...
2497 dev->phy.gpcs_address = dev->gpcs_address;
2498 if (dev->phy.gpcs_address == 0xffffffff)
2499 dev->phy.address = dev->cell_index;
2502 emac_configure(dev);
2504 if (dev->phy_address != 0xffffffff)
2505 phy_map = ~(1 << dev->phy_address);
2507 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2508 if (!(phy_map & 1)) {
2510 busy_phy_map |= 1 << i;
2512 /* Quick check if there is a PHY at the address */
2513 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2514 if (r == 0xffff || r < 0)
2516 if (!emac_mii_phy_probe(&dev->phy, i))
2520 /* Enable external clock source */
2521 #ifdef CONFIG_PPC_DCR_NATIVE
2522 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2523 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2525 mutex_unlock(&emac_phy_map_lock);
2527 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2532 if (dev->phy.def->ops->init)
2533 dev->phy.def->ops->init(&dev->phy);
2535 /* Disable any PHY features not supported by the platform */
2536 dev->phy.def->features &= ~dev->phy_feat_exc;
2537 dev->phy.features &= ~dev->phy_feat_exc;
2539 /* Setup initial link parameters */
2540 if (dev->phy.features & SUPPORTED_Autoneg) {
2541 adv = dev->phy.features;
2542 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2543 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2544 /* Restart autonegotiation */
2545 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2547 u32 f = dev->phy.def->features;
2548 int speed = SPEED_10, fd = DUPLEX_HALF;
2550 /* Select highest supported speed/duplex */
2551 if (f & SUPPORTED_1000baseT_Full) {
2554 } else if (f & SUPPORTED_1000baseT_Half)
2556 else if (f & SUPPORTED_100baseT_Full) {
2559 } else if (f & SUPPORTED_100baseT_Half)
2561 else if (f & SUPPORTED_10baseT_Full)
2564 /* Force link parameters */
2565 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2570 static int emac_init_config(struct emac_instance *dev)
2572 struct device_node *np = dev->ofdev->dev.of_node;
2575 /* Read config from device-tree */
2576 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2578 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2580 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2582 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2584 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2585 dev->max_mtu = 1500;
2586 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2587 dev->rx_fifo_size = 2048;
2588 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2589 dev->tx_fifo_size = 2048;
2590 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2591 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2592 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2593 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2594 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2595 dev->phy_address = 0xffffffff;
2596 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2597 dev->phy_map = 0xffffffff;
2598 if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2599 dev->gpcs_address = 0xffffffff;
2600 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2602 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2604 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2606 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2608 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2610 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2611 dev->zmii_port = 0xffffffff;
2612 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2614 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2615 dev->rgmii_port = 0xffffffff;
2616 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2617 dev->fifo_entry_size = 16;
2618 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2619 dev->mal_burst_size = 256;
2621 /* PHY mode needs some decoding */
2622 dev->phy_mode = of_get_phy_mode(np);
2623 if (dev->phy_mode < 0)
2624 dev->phy_mode = PHY_MODE_NA;
2626 /* Check EMAC version */
2627 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2628 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2629 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2630 of_device_is_compatible(np, "ibm,emac-460gt"))
2631 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2632 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2633 of_device_is_compatible(np, "ibm,emac-405exr"))
2634 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2635 if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2636 dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2637 EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2638 EMAC_FTR_460EX_PHY_CLK_FIX);
2640 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2641 dev->features |= EMAC_FTR_EMAC4;
2642 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2643 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2645 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2646 of_device_is_compatible(np, "ibm,emac-440gr"))
2647 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2648 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2649 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2650 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2652 printk(KERN_ERR "%s: Flow control not disabled!\n",
2660 /* Fixup some feature bits based on the device tree */
2661 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2662 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2663 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2664 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2666 /* CAB lacks the appropriate properties */
2667 if (of_device_is_compatible(np, "ibm,emac-axon"))
2668 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2669 EMAC_FTR_STACR_OC_INVERT;
2671 /* Enable TAH/ZMII/RGMII features as found */
2672 if (dev->tah_ph != 0) {
2673 #ifdef CONFIG_IBM_EMAC_TAH
2674 dev->features |= EMAC_FTR_HAS_TAH;
2676 printk(KERN_ERR "%s: TAH support not enabled !\n",
2682 if (dev->zmii_ph != 0) {
2683 #ifdef CONFIG_IBM_EMAC_ZMII
2684 dev->features |= EMAC_FTR_HAS_ZMII;
2686 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2692 if (dev->rgmii_ph != 0) {
2693 #ifdef CONFIG_IBM_EMAC_RGMII
2694 dev->features |= EMAC_FTR_HAS_RGMII;
2696 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2702 /* Read MAC-address */
2703 p = of_get_property(np, "local-mac-address", NULL);
2705 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2709 memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
2711 /* IAHT and GAHT filter parameterization */
2712 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2713 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2714 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2716 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2717 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2720 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2721 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2722 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2723 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2724 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2729 static const struct net_device_ops emac_netdev_ops = {
2730 .ndo_open = emac_open,
2731 .ndo_stop = emac_close,
2732 .ndo_get_stats = emac_stats,
2733 .ndo_set_rx_mode = emac_set_multicast_list,
2734 .ndo_do_ioctl = emac_ioctl,
2735 .ndo_tx_timeout = emac_tx_timeout,
2736 .ndo_validate_addr = eth_validate_addr,
2737 .ndo_set_mac_address = emac_set_mac_address,
2738 .ndo_start_xmit = emac_start_xmit,
2739 .ndo_change_mtu = eth_change_mtu,
2742 static const struct net_device_ops emac_gige_netdev_ops = {
2743 .ndo_open = emac_open,
2744 .ndo_stop = emac_close,
2745 .ndo_get_stats = emac_stats,
2746 .ndo_set_rx_mode = emac_set_multicast_list,
2747 .ndo_do_ioctl = emac_ioctl,
2748 .ndo_tx_timeout = emac_tx_timeout,
2749 .ndo_validate_addr = eth_validate_addr,
2750 .ndo_set_mac_address = emac_set_mac_address,
2751 .ndo_start_xmit = emac_start_xmit_sg,
2752 .ndo_change_mtu = emac_change_mtu,
2755 static int emac_probe(struct platform_device *ofdev)
2757 struct net_device *ndev;
2758 struct emac_instance *dev;
2759 struct device_node *np = ofdev->dev.of_node;
2760 struct device_node **blist = NULL;
2763 /* Skip unused/unwired EMACS. We leave the check for an unused
2764 * property here for now, but new flat device trees should set a
2765 * status property to "disabled" instead.
2767 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2770 /* Find ourselves in the bootlist if we are there */
2771 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2772 if (emac_boot_list[i] == np)
2773 blist = &emac_boot_list[i];
2775 /* Allocate our net_device structure */
2777 ndev = alloc_etherdev(sizeof(struct emac_instance));
2781 dev = netdev_priv(ndev);
2785 SET_NETDEV_DEV(ndev, &ofdev->dev);
2787 /* Initialize some embedded data structures */
2788 mutex_init(&dev->mdio_lock);
2789 mutex_init(&dev->link_lock);
2790 spin_lock_init(&dev->lock);
2791 INIT_WORK(&dev->reset_work, emac_reset_work);
2793 /* Init various config data based on device-tree */
2794 err = emac_init_config(dev);
2798 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2799 dev->emac_irq = irq_of_parse_and_map(np, 0);
2800 dev->wol_irq = irq_of_parse_and_map(np, 1);
2801 if (!dev->emac_irq) {
2802 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2805 ndev->irq = dev->emac_irq;
2808 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2809 printk(KERN_ERR "%s: Can't get registers address\n",
2813 // TODO : request_mem_region
2814 dev->emacp = ioremap(dev->rsrc_regs.start,
2815 resource_size(&dev->rsrc_regs));
2816 if (dev->emacp == NULL) {
2817 printk(KERN_ERR "%s: Can't map device registers!\n",
2823 /* Wait for dependent devices */
2824 err = emac_wait_deps(dev);
2827 "%s: Timeout waiting for dependent devices\n",
2829 /* display more info about what's missing ? */
2832 dev->mal = platform_get_drvdata(dev->mal_dev);
2833 if (dev->mdio_dev != NULL)
2834 dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
2836 /* Register with MAL */
2837 dev->commac.ops = &emac_commac_ops;
2838 dev->commac.dev = dev;
2839 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2840 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2841 err = mal_register_commac(dev->mal, &dev->commac);
2843 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2844 np->full_name, dev->mal_dev->dev.of_node->full_name);
2847 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2848 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2850 /* Get pointers to BD rings */
2852 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2854 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2856 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2857 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2860 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2861 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2862 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2863 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2865 /* Attach to ZMII, if needed */
2866 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2867 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2868 goto err_unreg_commac;
2870 /* Attach to RGMII, if needed */
2871 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2872 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2873 goto err_detach_zmii;
2875 /* Attach to TAH, if needed */
2876 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2877 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2878 goto err_detach_rgmii;
2880 /* Set some link defaults before we can find out real parameters */
2881 dev->phy.speed = SPEED_100;
2882 dev->phy.duplex = DUPLEX_FULL;
2883 dev->phy.autoneg = AUTONEG_DISABLE;
2884 dev->phy.pause = dev->phy.asym_pause = 0;
2885 dev->stop_timeout = STOP_TIMEOUT_100;
2886 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2888 /* Some SoCs like APM821xx does not support Half Duplex mode. */
2889 if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
2890 dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
2891 SUPPORTED_100baseT_Half |
2892 SUPPORTED_10baseT_Half);
2895 /* Find PHY if any */
2896 err = emac_init_phy(dev);
2898 goto err_detach_tah;
2901 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
2902 ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
2904 ndev->watchdog_timeo = 5 * HZ;
2905 if (emac_phy_supports_gige(dev->phy_mode)) {
2906 ndev->netdev_ops = &emac_gige_netdev_ops;
2907 dev->commac.ops = &emac_commac_sg_ops;
2909 ndev->netdev_ops = &emac_netdev_ops;
2910 ndev->ethtool_ops = &emac_ethtool_ops;
2912 netif_carrier_off(ndev);
2914 err = register_netdev(ndev);
2916 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2917 np->full_name, err);
2918 goto err_detach_tah;
2921 /* Set our drvdata last as we don't want them visible until we are
2925 platform_set_drvdata(ofdev, dev);
2927 /* There's a new kid in town ! Let's tell everybody */
2928 wake_up_all(&emac_probe_wait);
2931 printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2932 ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2934 if (dev->phy_mode == PHY_MODE_SGMII)
2935 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2937 if (dev->phy.address >= 0)
2938 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2939 dev->phy.def->name, dev->phy.address);
2941 emac_dbg_register(dev);
2946 /* I have a bad feeling about this ... */
2949 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2950 tah_detach(dev->tah_dev, dev->tah_port);
2952 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2953 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2955 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2956 zmii_detach(dev->zmii_dev, dev->zmii_port);
2958 mal_unregister_commac(dev->mal, &dev->commac);
2962 iounmap(dev->emacp);
2965 irq_dispose_mapping(dev->wol_irq);
2967 irq_dispose_mapping(dev->emac_irq);
2971 /* if we were on the bootlist, remove us as we won't show up and
2972 * wake up all waiters to notify them in case they were waiting
2977 wake_up_all(&emac_probe_wait);
2982 static int emac_remove(struct platform_device *ofdev)
2984 struct emac_instance *dev = platform_get_drvdata(ofdev);
2986 DBG(dev, "remove" NL);
2988 unregister_netdev(dev->ndev);
2990 cancel_work_sync(&dev->reset_work);
2992 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2993 tah_detach(dev->tah_dev, dev->tah_port);
2994 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2995 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2996 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2997 zmii_detach(dev->zmii_dev, dev->zmii_port);
2999 busy_phy_map &= ~(1 << dev->phy.address);
3000 DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
3002 mal_unregister_commac(dev->mal, &dev->commac);
3005 emac_dbg_unregister(dev);
3006 iounmap(dev->emacp);
3009 irq_dispose_mapping(dev->wol_irq);
3011 irq_dispose_mapping(dev->emac_irq);
3013 free_netdev(dev->ndev);
3018 /* XXX Features in here should be replaced by properties... */
3019 static const struct of_device_id emac_match[] =
3023 .compatible = "ibm,emac",
3027 .compatible = "ibm,emac4",
3031 .compatible = "ibm,emac4sync",
3035 MODULE_DEVICE_TABLE(of, emac_match);
3037 static struct platform_driver emac_driver = {
3040 .of_match_table = emac_match,
3042 .probe = emac_probe,
3043 .remove = emac_remove,
3046 static void __init emac_make_bootlist(void)
3048 struct device_node *np = NULL;
3050 int cell_indices[EMAC_BOOT_LIST_SIZE];
3053 while((np = of_find_all_nodes(np)) != NULL) {
3056 if (of_match_node(emac_match, np) == NULL)
3058 if (of_get_property(np, "unused", NULL))
3060 idx = of_get_property(np, "cell-index", NULL);
3063 cell_indices[i] = *idx;
3064 emac_boot_list[i++] = of_node_get(np);
3065 if (i >= EMAC_BOOT_LIST_SIZE) {
3072 /* Bubble sort them (doh, what a creative algorithm :-) */
3073 for (i = 0; max > 1 && (i < (max - 1)); i++)
3074 for (j = i; j < max; j++) {
3075 if (cell_indices[i] > cell_indices[j]) {
3076 swap(emac_boot_list[i], emac_boot_list[j]);
3077 swap(cell_indices[i], cell_indices[j]);
3082 static int __init emac_init(void)
3086 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3088 /* Init debug stuff */
3091 /* Build EMAC boot list */
3092 emac_make_bootlist();
3094 /* Init submodules */
3107 rc = platform_driver_register(&emac_driver);
3125 static void __exit emac_exit(void)
3129 platform_driver_unregister(&emac_driver);
3137 /* Destroy EMAC boot list */
3138 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3139 of_node_put(emac_boot_list[i]);
3142 module_init(emac_init);
3143 module_exit(emac_exit);