2 * drivers/net/ethernet/ibm/emac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
42 #include <linux/of_address.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_net.h>
45 #include <linux/slab.h>
47 #include <asm/processor.h>
50 #include <asm/uaccess.h>
52 #include <asm/dcr-regs.h>
57 * Lack of dma_unmap_???? calls is intentional.
59 * API-correct usage requires additional support state information to be
60 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
61 * EMAC design (e.g. TX buffer passed from network stack can be split into
62 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
63 * maintaining such information will add additional overhead.
64 * Current DMA API implementation for 4xx processors only ensures cache coherency
65 * and dma_unmap_???? routines are empty and are likely to stay this way.
66 * I decided to omit dma_unmap_??? calls because I don't want to add additional
67 * complexity just for the sake of following some abstract API, when it doesn't
68 * add any real benefit to the driver. I understand that this decision maybe
69 * controversial, but I really tried to make code API-correct and efficient
70 * at the same time and didn't come up with code I liked :(. --ebs
73 #define DRV_NAME "emac"
74 #define DRV_VERSION "3.54"
75 #define DRV_DESC "PPC 4xx OCP EMAC driver"
77 MODULE_DESCRIPTION(DRV_DESC);
79 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
80 MODULE_LICENSE("GPL");
82 /* minimum number of free TX descriptors required to wake up TX process */
83 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
85 /* If packet size is less than this number, we allocate small skb and copy packet
86 * contents into it instead of just sending original big skb up
88 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
90 /* Since multiple EMACs share MDIO lines in various ways, we need
91 * to avoid re-using the same PHY ID in cases where the arch didn't
92 * setup precise phy_map entries
94 * XXX This is something that needs to be reworked as we can have multiple
95 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
96 * probably require in that case to have explicit PHY IDs in the device-tree
98 static u32 busy_phy_map;
99 static DEFINE_MUTEX(emac_phy_map_lock);
101 /* This is the wait queue used to wait on any event related to probe, that
102 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
104 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
106 /* Having stable interface names is a doomed idea. However, it would be nice
107 * if we didn't have completely random interface names at boot too :-) It's
108 * just a matter of making everybody's life easier. Since we are doing
109 * threaded probing, it's a bit harder though. The base idea here is that
110 * we make up a list of all emacs in the device-tree before we register the
111 * driver. Every emac will then wait for the previous one in the list to
112 * initialize before itself. We should also keep that list ordered by
114 * That list is only 4 entries long, meaning that additional EMACs don't
115 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
118 #define EMAC_BOOT_LIST_SIZE 4
119 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
121 /* How long should I wait for dependent devices ? */
122 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
124 /* I don't want to litter system log with timeout errors
125 * when we have brain-damaged PHY.
127 static inline void emac_report_timeout_error(struct emac_instance *dev,
130 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
131 EMAC_FTR_460EX_PHY_CLK_FIX |
132 EMAC_FTR_440EP_PHY_CLK_FIX))
133 DBG(dev, "%s" NL, error);
134 else if (net_ratelimit())
135 printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
139 /* EMAC PHY clock workaround:
140 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141 * which allows controlling each EMAC clock
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
145 #ifdef CONFIG_PPC_DCR_NATIVE
146 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147 dcri_clrset(SDR0, SDR0_MFR,
148 0, SDR0_MFR_ECS >> dev->cell_index);
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
154 #ifdef CONFIG_PPC_DCR_NATIVE
155 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156 dcri_clrset(SDR0, SDR0_MFR,
157 SDR0_MFR_ECS >> dev->cell_index, 0);
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON HZ
163 #define PHY_POLL_LINK_OFF (HZ / 5)
165 /* Graceful stop timeouts in us.
166 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
168 #define STOP_TIMEOUT_10 1230
169 #define STOP_TIMEOUT_100 124
170 #define STOP_TIMEOUT_1000 13
171 #define STOP_TIMEOUT_1000_JUMBO 73
173 static unsigned char default_mcast_addr[] = {
174 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190 "tx_bd_excessive_collisions", "tx_bd_late_collision",
191 "tx_bd_multple_collisions", "tx_bd_single_collision",
192 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
200 static inline int emac_phy_supports_gige(int phy_mode)
202 return phy_mode == PHY_MODE_GMII ||
203 phy_mode == PHY_MODE_RGMII ||
204 phy_mode == PHY_MODE_SGMII ||
205 phy_mode == PHY_MODE_TBI ||
206 phy_mode == PHY_MODE_RTBI;
209 static inline int emac_phy_gpcs(int phy_mode)
211 return phy_mode == PHY_MODE_SGMII ||
212 phy_mode == PHY_MODE_TBI ||
213 phy_mode == PHY_MODE_RTBI;
216 static inline void emac_tx_enable(struct emac_instance *dev)
218 struct emac_regs __iomem *p = dev->emacp;
221 DBG(dev, "tx_enable" NL);
223 r = in_be32(&p->mr0);
224 if (!(r & EMAC_MR0_TXE))
225 out_be32(&p->mr0, r | EMAC_MR0_TXE);
228 static void emac_tx_disable(struct emac_instance *dev)
230 struct emac_regs __iomem *p = dev->emacp;
233 DBG(dev, "tx_disable" NL);
235 r = in_be32(&p->mr0);
236 if (r & EMAC_MR0_TXE) {
237 int n = dev->stop_timeout;
238 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
239 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
244 emac_report_timeout_error(dev, "TX disable timeout");
248 static void emac_rx_enable(struct emac_instance *dev)
250 struct emac_regs __iomem *p = dev->emacp;
253 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
256 DBG(dev, "rx_enable" NL);
258 r = in_be32(&p->mr0);
259 if (!(r & EMAC_MR0_RXE)) {
260 if (unlikely(!(r & EMAC_MR0_RXI))) {
261 /* Wait if previous async disable is still in progress */
262 int n = dev->stop_timeout;
263 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
268 emac_report_timeout_error(dev,
269 "RX disable timeout");
271 out_be32(&p->mr0, r | EMAC_MR0_RXE);
277 static void emac_rx_disable(struct emac_instance *dev)
279 struct emac_regs __iomem *p = dev->emacp;
282 DBG(dev, "rx_disable" NL);
284 r = in_be32(&p->mr0);
285 if (r & EMAC_MR0_RXE) {
286 int n = dev->stop_timeout;
287 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
288 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
293 emac_report_timeout_error(dev, "RX disable timeout");
297 static inline void emac_netif_stop(struct emac_instance *dev)
299 netif_tx_lock_bh(dev->ndev);
300 netif_addr_lock(dev->ndev);
302 netif_addr_unlock(dev->ndev);
303 netif_tx_unlock_bh(dev->ndev);
304 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
305 mal_poll_disable(dev->mal, &dev->commac);
306 netif_tx_disable(dev->ndev);
309 static inline void emac_netif_start(struct emac_instance *dev)
311 netif_tx_lock_bh(dev->ndev);
312 netif_addr_lock(dev->ndev);
314 if (dev->mcast_pending && netif_running(dev->ndev))
315 __emac_set_multicast_list(dev);
316 netif_addr_unlock(dev->ndev);
317 netif_tx_unlock_bh(dev->ndev);
319 netif_wake_queue(dev->ndev);
321 /* NOTE: unconditional netif_wake_queue is only appropriate
322 * so long as all callers are assured to have free tx slots
323 * (taken from tg3... though the case where that is wrong is
324 * not terribly harmful)
326 mal_poll_enable(dev->mal, &dev->commac);
329 static inline void emac_rx_disable_async(struct emac_instance *dev)
331 struct emac_regs __iomem *p = dev->emacp;
334 DBG(dev, "rx_disable_async" NL);
336 r = in_be32(&p->mr0);
337 if (r & EMAC_MR0_RXE)
338 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
341 static int emac_reset(struct emac_instance *dev)
343 struct emac_regs __iomem *p = dev->emacp;
345 bool __maybe_unused try_internal_clock = false;
347 DBG(dev, "reset" NL);
349 if (!dev->reset_failed) {
350 /* 40x erratum suggests stopping RX channel before reset,
353 emac_rx_disable(dev);
354 emac_tx_disable(dev);
357 #ifdef CONFIG_PPC_DCR_NATIVE
360 * PPC460EX/GT Embedded Processor Advanced User's Manual
361 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
362 * Note: The PHY must provide a TX Clk in order to perform a soft reset
363 * of the EMAC. If none is present, select the internal clock
364 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
365 * After a soft reset, select the external clock.
367 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
368 * ethernet cable is not attached. This causes the reset to timeout
369 * and the PHY detection code in emac_init_phy() is unable to
370 * communicate and detect the AR8035-A PHY. As a result, the emac
371 * driver bails out early and the user has no ethernet.
372 * In order to stay compatible with existing configurations, the
373 * driver will temporarily switch to the internal clock, after
374 * the first reset fails.
376 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
377 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
378 dev->phy_map == 0xffffffff)) {
379 /* No PHY: select internal loop clock before reset */
380 dcri_clrset(SDR0, SDR0_ETH_CFG,
381 0, SDR0_ETH_CFG_ECS << dev->cell_index);
383 /* PHY present: select external clock before reset */
384 dcri_clrset(SDR0, SDR0_ETH_CFG,
385 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
390 out_be32(&p->mr0, EMAC_MR0_SRST);
391 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
394 #ifdef CONFIG_PPC_DCR_NATIVE
395 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
396 if (!n && !try_internal_clock) {
397 /* first attempt has timed out. */
399 try_internal_clock = true;
403 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
404 dev->phy_map == 0xffffffff)) {
405 /* No PHY: restore external clock source after reset */
406 dcri_clrset(SDR0, SDR0_ETH_CFG,
407 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
413 dev->reset_failed = 0;
416 emac_report_timeout_error(dev, "reset timeout");
417 dev->reset_failed = 1;
422 static void emac_hash_mc(struct emac_instance *dev)
424 const int regs = EMAC_XAHT_REGS(dev);
425 u32 *gaht_base = emac_gaht_base(dev);
427 struct netdev_hw_addr *ha;
430 DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
432 memset(gaht_temp, 0, sizeof (gaht_temp));
434 netdev_for_each_mc_addr(ha, dev->ndev) {
436 DBG2(dev, "mc %pM" NL, ha->addr);
438 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
439 ether_crc(ETH_ALEN, ha->addr));
440 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
441 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
443 gaht_temp[reg] |= mask;
446 for (i = 0; i < regs; i++)
447 out_be32(gaht_base + i, gaht_temp[i]);
450 static inline u32 emac_iff2rmr(struct net_device *ndev)
452 struct emac_instance *dev = netdev_priv(ndev);
455 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
457 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
462 if (ndev->flags & IFF_PROMISC)
464 else if (ndev->flags & IFF_ALLMULTI ||
465 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
467 else if (!netdev_mc_empty(ndev))
470 if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
471 r &= ~EMAC4_RMR_MJS_MASK;
472 r |= EMAC4_RMR_MJS(ndev->mtu);
478 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
480 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
482 DBG2(dev, "__emac_calc_base_mr1" NL);
486 ret |= EMAC_MR1_TFS_2K;
489 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
490 dev->ndev->name, tx_size);
495 ret |= EMAC_MR1_RFS_16K;
498 ret |= EMAC_MR1_RFS_4K;
501 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
502 dev->ndev->name, rx_size);
508 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
510 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
511 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
513 DBG2(dev, "__emac4_calc_base_mr1" NL);
517 ret |= EMAC4_MR1_TFS_16K;
520 ret |= EMAC4_MR1_TFS_4K;
523 ret |= EMAC4_MR1_TFS_2K;
526 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
527 dev->ndev->name, tx_size);
532 ret |= EMAC4_MR1_RFS_16K;
535 ret |= EMAC4_MR1_RFS_4K;
538 ret |= EMAC4_MR1_RFS_2K;
541 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
542 dev->ndev->name, rx_size);
548 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
550 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
551 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
552 __emac_calc_base_mr1(dev, tx_size, rx_size);
555 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
557 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
558 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
560 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
563 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
564 unsigned int low, unsigned int high)
566 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
567 return (low << 22) | ( (high & 0x3ff) << 6);
569 return (low << 23) | ( (high & 0x1ff) << 7);
572 static int emac_configure(struct emac_instance *dev)
574 struct emac_regs __iomem *p = dev->emacp;
575 struct net_device *ndev = dev->ndev;
576 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
579 DBG(dev, "configure" NL);
582 out_be32(&p->mr1, in_be32(&p->mr1)
583 | EMAC_MR1_FDE | EMAC_MR1_ILE);
585 } else if (emac_reset(dev) < 0)
588 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
589 tah_reset(dev->tah_dev);
591 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
592 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
594 /* Default fifo sizes */
595 tx_size = dev->tx_fifo_size;
596 rx_size = dev->rx_fifo_size;
598 /* No link, force loopback */
600 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
602 /* Check for full duplex */
603 else if (dev->phy.duplex == DUPLEX_FULL)
604 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
606 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
607 dev->stop_timeout = STOP_TIMEOUT_10;
608 switch (dev->phy.speed) {
610 if (emac_phy_gpcs(dev->phy.mode)) {
611 mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
612 (dev->phy.gpcs_address != 0xffffffff) ?
613 dev->phy.gpcs_address : dev->phy.address);
615 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
616 * identify this GPCS PHY later.
618 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
620 mr1 |= EMAC_MR1_MF_1000;
622 /* Extended fifo sizes */
623 tx_size = dev->tx_fifo_size_gige;
624 rx_size = dev->rx_fifo_size_gige;
626 if (dev->ndev->mtu > ETH_DATA_LEN) {
627 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
628 mr1 |= EMAC4_MR1_JPSM;
630 mr1 |= EMAC_MR1_JPSM;
631 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
633 dev->stop_timeout = STOP_TIMEOUT_1000;
636 mr1 |= EMAC_MR1_MF_100;
637 dev->stop_timeout = STOP_TIMEOUT_100;
639 default: /* make gcc happy */
643 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
644 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
646 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
647 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
649 /* on 40x erratum forces us to NOT use integrated flow control,
650 * let's hope it works on 44x ;)
652 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
653 dev->phy.duplex == DUPLEX_FULL) {
655 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
656 else if (dev->phy.asym_pause)
660 /* Add base settings & fifo sizes & program MR1 */
661 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
662 out_be32(&p->mr1, mr1);
664 /* Set individual MAC address */
665 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
666 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
667 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
670 /* VLAN Tag Protocol ID */
671 out_be32(&p->vtpid, 0x8100);
673 /* Receive mode register */
674 r = emac_iff2rmr(ndev);
675 if (r & EMAC_RMR_MAE)
677 out_be32(&p->rmr, r);
679 /* FIFOs thresholds */
680 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
681 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
682 tx_size / 2 / dev->fifo_entry_size);
684 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
685 tx_size / 2 / dev->fifo_entry_size);
686 out_be32(&p->tmr1, r);
687 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
689 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
690 there should be still enough space in FIFO to allow the our link
691 partner time to process this frame and also time to send PAUSE
694 Here is the worst case scenario for the RX FIFO "headroom"
695 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
697 1) One maximum-length frame on TX 1522 bytes
698 2) One PAUSE frame time 64 bytes
699 3) PAUSE frame decode time allowance 64 bytes
700 4) One maximum-length frame on RX 1522 bytes
701 5) Round-trip propagation delay of the link (100Mb) 15 bytes
705 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
706 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
708 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
709 rx_size / 4 / dev->fifo_entry_size);
710 out_be32(&p->rwmr, r);
712 /* Set PAUSE timer to the maximum */
713 out_be32(&p->ptr, 0xffff);
716 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
717 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
718 EMAC_ISR_IRE | EMAC_ISR_TE;
719 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
720 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
722 out_be32(&p->iser, r);
724 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
725 if (emac_phy_gpcs(dev->phy.mode)) {
726 if (dev->phy.gpcs_address != 0xffffffff)
727 emac_mii_reset_gpcs(&dev->phy);
729 emac_mii_reset_phy(&dev->phy);
735 static void emac_reinitialize(struct emac_instance *dev)
737 DBG(dev, "reinitialize" NL);
739 emac_netif_stop(dev);
740 if (!emac_configure(dev)) {
744 emac_netif_start(dev);
747 static void emac_full_tx_reset(struct emac_instance *dev)
749 DBG(dev, "full_tx_reset" NL);
751 emac_tx_disable(dev);
752 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
753 emac_clean_tx_ring(dev);
754 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
758 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
763 static void emac_reset_work(struct work_struct *work)
765 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
767 DBG(dev, "reset_work" NL);
769 mutex_lock(&dev->link_lock);
771 emac_netif_stop(dev);
772 emac_full_tx_reset(dev);
773 emac_netif_start(dev);
775 mutex_unlock(&dev->link_lock);
778 static void emac_tx_timeout(struct net_device *ndev)
780 struct emac_instance *dev = netdev_priv(ndev);
782 DBG(dev, "tx_timeout" NL);
784 schedule_work(&dev->reset_work);
788 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
790 int done = !!(stacr & EMAC_STACR_OC);
792 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
798 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
800 struct emac_regs __iomem *p = dev->emacp;
802 int n, err = -ETIMEDOUT;
804 mutex_lock(&dev->mdio_lock);
806 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
808 /* Enable proper MDIO port */
809 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
810 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
811 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
812 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
814 /* Wait for management interface to become idle */
816 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
819 DBG2(dev, " -> timeout wait idle\n");
824 /* Issue read command */
825 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
826 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
828 r = EMAC_STACR_BASE(dev->opb_bus_freq);
829 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
831 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
832 r |= EMACX_STACR_STAC_READ;
834 r |= EMAC_STACR_STAC_READ;
835 r |= (reg & EMAC_STACR_PRA_MASK)
836 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
837 out_be32(&p->stacr, r);
839 /* Wait for read to complete */
841 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
844 DBG2(dev, " -> timeout wait complete\n");
849 if (unlikely(r & EMAC_STACR_PHYE)) {
850 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
855 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
857 DBG2(dev, "mdio_read -> %04x" NL, r);
860 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
861 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
862 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
863 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
864 mutex_unlock(&dev->mdio_lock);
866 return err == 0 ? r : err;
869 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
872 struct emac_regs __iomem *p = dev->emacp;
874 int n, err = -ETIMEDOUT;
876 mutex_lock(&dev->mdio_lock);
878 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
880 /* Enable proper MDIO port */
881 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
882 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
883 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
884 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
886 /* Wait for management interface to be idle */
888 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
891 DBG2(dev, " -> timeout wait idle\n");
896 /* Issue write command */
897 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
898 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
900 r = EMAC_STACR_BASE(dev->opb_bus_freq);
901 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
903 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
904 r |= EMACX_STACR_STAC_WRITE;
906 r |= EMAC_STACR_STAC_WRITE;
907 r |= (reg & EMAC_STACR_PRA_MASK) |
908 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
909 (val << EMAC_STACR_PHYD_SHIFT);
910 out_be32(&p->stacr, r);
912 /* Wait for write to complete */
914 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
917 DBG2(dev, " -> timeout wait complete\n");
923 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
924 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
925 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
926 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
927 mutex_unlock(&dev->mdio_lock);
930 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
932 struct emac_instance *dev = netdev_priv(ndev);
935 res = __emac_mdio_read((dev->mdio_instance &&
936 dev->phy.gpcs_address != id) ?
937 dev->mdio_instance : dev,
942 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
944 struct emac_instance *dev = netdev_priv(ndev);
946 __emac_mdio_write((dev->mdio_instance &&
947 dev->phy.gpcs_address != id) ?
948 dev->mdio_instance : dev,
949 (u8) id, (u8) reg, (u16) val);
953 static void __emac_set_multicast_list(struct emac_instance *dev)
955 struct emac_regs __iomem *p = dev->emacp;
956 u32 rmr = emac_iff2rmr(dev->ndev);
958 DBG(dev, "__multicast %08x" NL, rmr);
960 /* I decided to relax register access rules here to avoid
963 * There is a real problem with EMAC4 core if we use MWSW_001 bit
964 * in MR1 register and do a full EMAC reset.
965 * One TX BD status update is delayed and, after EMAC reset, it
966 * never happens, resulting in TX hung (it'll be recovered by TX
967 * timeout handler eventually, but this is just gross).
968 * So we either have to do full TX reset or try to cheat here :)
970 * The only required change is to RX mode register, so I *think* all
971 * we need is just to stop RX channel. This seems to work on all
974 * If we need the full reset, we might just trigger the workqueue
975 * and do it async... a bit nasty but should work --BenH
977 dev->mcast_pending = 0;
978 emac_rx_disable(dev);
979 if (rmr & EMAC_RMR_MAE)
981 out_be32(&p->rmr, rmr);
986 static void emac_set_multicast_list(struct net_device *ndev)
988 struct emac_instance *dev = netdev_priv(ndev);
990 DBG(dev, "multicast" NL);
992 BUG_ON(!netif_running(dev->ndev));
995 dev->mcast_pending = 1;
998 __emac_set_multicast_list(dev);
1001 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
1003 int rx_sync_size = emac_rx_sync_size(new_mtu);
1004 int rx_skb_size = emac_rx_skb_size(new_mtu);
1006 int mr1_jumbo_bit_change = 0;
1008 mutex_lock(&dev->link_lock);
1009 emac_netif_stop(dev);
1010 emac_rx_disable(dev);
1011 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1013 if (dev->rx_sg_skb) {
1014 ++dev->estats.rx_dropped_resize;
1015 dev_kfree_skb(dev->rx_sg_skb);
1016 dev->rx_sg_skb = NULL;
1019 /* Make a first pass over RX ring and mark BDs ready, dropping
1020 * non-processed packets on the way. We need this as a separate pass
1021 * to simplify error recovery in the case of allocation failure later.
1023 for (i = 0; i < NUM_RX_BUFF; ++i) {
1024 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1025 ++dev->estats.rx_dropped_resize;
1027 dev->rx_desc[i].data_len = 0;
1028 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1029 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1032 /* Reallocate RX ring only if bigger skb buffers are required */
1033 if (rx_skb_size <= dev->rx_skb_size)
1036 /* Second pass, allocate new skbs */
1037 for (i = 0; i < NUM_RX_BUFF; ++i) {
1038 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1044 BUG_ON(!dev->rx_skb[i]);
1045 dev_kfree_skb(dev->rx_skb[i]);
1047 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1048 dev->rx_desc[i].data_ptr =
1049 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1050 DMA_FROM_DEVICE) + 2;
1051 dev->rx_skb[i] = skb;
1054 /* Check if we need to change "Jumbo" bit in MR1 */
1055 if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1056 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1057 (dev->ndev->mtu > ETH_DATA_LEN);
1059 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1060 (dev->ndev->mtu > ETH_DATA_LEN);
1063 if (mr1_jumbo_bit_change) {
1064 /* This is to prevent starting RX channel in emac_rx_enable() */
1065 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1067 dev->ndev->mtu = new_mtu;
1068 emac_full_tx_reset(dev);
1071 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1074 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1076 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1077 emac_rx_enable(dev);
1078 emac_netif_start(dev);
1079 mutex_unlock(&dev->link_lock);
1084 /* Process ctx, rtnl_lock semaphore */
1085 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1087 struct emac_instance *dev = netdev_priv(ndev);
1090 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1093 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1095 if (netif_running(ndev)) {
1096 /* Check if we really need to reinitialize RX ring */
1097 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1098 ret = emac_resize_rx_ring(dev, new_mtu);
1102 ndev->mtu = new_mtu;
1103 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1104 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1110 static void emac_clean_tx_ring(struct emac_instance *dev)
1114 for (i = 0; i < NUM_TX_BUFF; ++i) {
1115 if (dev->tx_skb[i]) {
1116 dev_kfree_skb(dev->tx_skb[i]);
1117 dev->tx_skb[i] = NULL;
1118 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1119 ++dev->estats.tx_dropped;
1121 dev->tx_desc[i].ctrl = 0;
1122 dev->tx_desc[i].data_ptr = 0;
1126 static void emac_clean_rx_ring(struct emac_instance *dev)
1130 for (i = 0; i < NUM_RX_BUFF; ++i)
1131 if (dev->rx_skb[i]) {
1132 dev->rx_desc[i].ctrl = 0;
1133 dev_kfree_skb(dev->rx_skb[i]);
1134 dev->rx_skb[i] = NULL;
1135 dev->rx_desc[i].data_ptr = 0;
1138 if (dev->rx_sg_skb) {
1139 dev_kfree_skb(dev->rx_sg_skb);
1140 dev->rx_sg_skb = NULL;
1144 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1147 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1151 dev->rx_skb[slot] = skb;
1152 dev->rx_desc[slot].data_len = 0;
1154 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1155 dev->rx_desc[slot].data_ptr =
1156 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1157 DMA_FROM_DEVICE) + 2;
1159 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1160 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1165 static void emac_print_link_status(struct emac_instance *dev)
1167 if (netif_carrier_ok(dev->ndev))
1168 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1169 dev->ndev->name, dev->phy.speed,
1170 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1171 dev->phy.pause ? ", pause enabled" :
1172 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1174 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1177 /* Process ctx, rtnl_lock semaphore */
1178 static int emac_open(struct net_device *ndev)
1180 struct emac_instance *dev = netdev_priv(ndev);
1183 DBG(dev, "open" NL);
1185 /* Setup error IRQ handler */
1186 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1188 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1189 ndev->name, dev->emac_irq);
1193 /* Allocate RX ring */
1194 for (i = 0; i < NUM_RX_BUFF; ++i)
1195 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1196 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1201 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1202 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1203 dev->rx_sg_skb = NULL;
1205 mutex_lock(&dev->link_lock);
1208 /* Start PHY polling now.
1210 if (dev->phy.address >= 0) {
1211 int link_poll_interval;
1212 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1213 dev->phy.def->ops->read_link(&dev->phy);
1214 emac_rx_clk_default(dev);
1215 netif_carrier_on(dev->ndev);
1216 link_poll_interval = PHY_POLL_LINK_ON;
1218 emac_rx_clk_tx(dev);
1219 netif_carrier_off(dev->ndev);
1220 link_poll_interval = PHY_POLL_LINK_OFF;
1222 dev->link_polling = 1;
1224 schedule_delayed_work(&dev->link_work, link_poll_interval);
1225 emac_print_link_status(dev);
1227 netif_carrier_on(dev->ndev);
1229 /* Required for Pause packet support in EMAC */
1230 dev_mc_add_global(ndev, default_mcast_addr);
1232 emac_configure(dev);
1233 mal_poll_add(dev->mal, &dev->commac);
1234 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1235 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1236 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1237 emac_tx_enable(dev);
1238 emac_rx_enable(dev);
1239 emac_netif_start(dev);
1241 mutex_unlock(&dev->link_lock);
1245 emac_clean_rx_ring(dev);
1246 free_irq(dev->emac_irq, dev);
1253 static int emac_link_differs(struct emac_instance *dev)
1255 u32 r = in_be32(&dev->emacp->mr1);
1257 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1258 int speed, pause, asym_pause;
1260 if (r & EMAC_MR1_MF_1000)
1262 else if (r & EMAC_MR1_MF_100)
1267 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1268 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1277 pause = asym_pause = 0;
1279 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1280 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1284 static void emac_link_timer(struct work_struct *work)
1286 struct emac_instance *dev =
1287 container_of(to_delayed_work(work),
1288 struct emac_instance, link_work);
1289 int link_poll_interval;
1291 mutex_lock(&dev->link_lock);
1292 DBG2(dev, "link timer" NL);
1297 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1298 if (!netif_carrier_ok(dev->ndev)) {
1299 emac_rx_clk_default(dev);
1300 /* Get new link parameters */
1301 dev->phy.def->ops->read_link(&dev->phy);
1303 netif_carrier_on(dev->ndev);
1304 emac_netif_stop(dev);
1305 emac_full_tx_reset(dev);
1306 emac_netif_start(dev);
1307 emac_print_link_status(dev);
1309 link_poll_interval = PHY_POLL_LINK_ON;
1311 if (netif_carrier_ok(dev->ndev)) {
1312 emac_rx_clk_tx(dev);
1313 netif_carrier_off(dev->ndev);
1314 netif_tx_disable(dev->ndev);
1315 emac_reinitialize(dev);
1316 emac_print_link_status(dev);
1318 link_poll_interval = PHY_POLL_LINK_OFF;
1320 schedule_delayed_work(&dev->link_work, link_poll_interval);
1322 mutex_unlock(&dev->link_lock);
1325 static void emac_force_link_update(struct emac_instance *dev)
1327 netif_carrier_off(dev->ndev);
1329 if (dev->link_polling) {
1330 cancel_delayed_work_sync(&dev->link_work);
1331 if (dev->link_polling)
1332 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1336 /* Process ctx, rtnl_lock semaphore */
1337 static int emac_close(struct net_device *ndev)
1339 struct emac_instance *dev = netdev_priv(ndev);
1341 DBG(dev, "close" NL);
1343 if (dev->phy.address >= 0) {
1344 dev->link_polling = 0;
1345 cancel_delayed_work_sync(&dev->link_work);
1347 mutex_lock(&dev->link_lock);
1348 emac_netif_stop(dev);
1350 mutex_unlock(&dev->link_lock);
1352 emac_rx_disable(dev);
1353 emac_tx_disable(dev);
1354 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1355 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1356 mal_poll_del(dev->mal, &dev->commac);
1358 emac_clean_tx_ring(dev);
1359 emac_clean_rx_ring(dev);
1361 free_irq(dev->emac_irq, dev);
1363 netif_carrier_off(ndev);
1368 static inline u16 emac_tx_csum(struct emac_instance *dev,
1369 struct sk_buff *skb)
1371 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1372 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1373 ++dev->stats.tx_packets_csum;
1374 return EMAC_TX_CTRL_TAH_CSUM;
1379 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1381 struct emac_regs __iomem *p = dev->emacp;
1382 struct net_device *ndev = dev->ndev;
1384 /* Send the packet out. If the if makes a significant perf
1385 * difference, then we can store the TMR0 value in "dev"
1388 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1389 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1391 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1393 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1394 netif_stop_queue(ndev);
1395 DBG2(dev, "stopped TX queue" NL);
1398 ndev->trans_start = jiffies;
1399 ++dev->stats.tx_packets;
1400 dev->stats.tx_bytes += len;
1402 return NETDEV_TX_OK;
1406 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1408 struct emac_instance *dev = netdev_priv(ndev);
1409 unsigned int len = skb->len;
1412 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1413 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1415 slot = dev->tx_slot++;
1416 if (dev->tx_slot == NUM_TX_BUFF) {
1418 ctrl |= MAL_TX_CTRL_WRAP;
1421 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1423 dev->tx_skb[slot] = skb;
1424 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1427 dev->tx_desc[slot].data_len = (u16) len;
1429 dev->tx_desc[slot].ctrl = ctrl;
1431 return emac_xmit_finish(dev, len);
1434 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1435 u32 pd, int len, int last, u16 base_ctrl)
1438 u16 ctrl = base_ctrl;
1439 int chunk = min(len, MAL_MAX_TX_SIZE);
1442 slot = (slot + 1) % NUM_TX_BUFF;
1445 ctrl |= MAL_TX_CTRL_LAST;
1446 if (slot == NUM_TX_BUFF - 1)
1447 ctrl |= MAL_TX_CTRL_WRAP;
1449 dev->tx_skb[slot] = NULL;
1450 dev->tx_desc[slot].data_ptr = pd;
1451 dev->tx_desc[slot].data_len = (u16) chunk;
1452 dev->tx_desc[slot].ctrl = ctrl;
1463 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1464 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1466 struct emac_instance *dev = netdev_priv(ndev);
1467 int nr_frags = skb_shinfo(skb)->nr_frags;
1468 int len = skb->len, chunk;
1473 /* This is common "fast" path */
1474 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1475 return emac_start_xmit(skb, ndev);
1477 len -= skb->data_len;
1479 /* Note, this is only an *estimation*, we can still run out of empty
1480 * slots because of the additional fragmentation into
1481 * MAL_MAX_TX_SIZE-sized chunks
1483 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1486 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1487 emac_tx_csum(dev, skb);
1488 slot = dev->tx_slot;
1491 dev->tx_skb[slot] = NULL;
1492 chunk = min(len, MAL_MAX_TX_SIZE);
1493 dev->tx_desc[slot].data_ptr = pd =
1494 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1495 dev->tx_desc[slot].data_len = (u16) chunk;
1498 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1501 for (i = 0; i < nr_frags; ++i) {
1502 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1503 len = skb_frag_size(frag);
1505 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1508 pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1511 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1515 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1517 /* Attach skb to the last slot so we don't release it too early */
1518 dev->tx_skb[slot] = skb;
1520 /* Send the packet out */
1521 if (dev->tx_slot == NUM_TX_BUFF - 1)
1522 ctrl |= MAL_TX_CTRL_WRAP;
1524 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1525 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1527 return emac_xmit_finish(dev, skb->len);
1530 /* Well, too bad. Our previous estimation was overly optimistic.
1533 while (slot != dev->tx_slot) {
1534 dev->tx_desc[slot].ctrl = 0;
1537 slot = NUM_TX_BUFF - 1;
1539 ++dev->estats.tx_undo;
1542 netif_stop_queue(ndev);
1543 DBG2(dev, "stopped TX queue" NL);
1544 return NETDEV_TX_BUSY;
1548 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1550 struct emac_error_stats *st = &dev->estats;
1552 DBG(dev, "BD TX error %04x" NL, ctrl);
1555 if (ctrl & EMAC_TX_ST_BFCS)
1556 ++st->tx_bd_bad_fcs;
1557 if (ctrl & EMAC_TX_ST_LCS)
1558 ++st->tx_bd_carrier_loss;
1559 if (ctrl & EMAC_TX_ST_ED)
1560 ++st->tx_bd_excessive_deferral;
1561 if (ctrl & EMAC_TX_ST_EC)
1562 ++st->tx_bd_excessive_collisions;
1563 if (ctrl & EMAC_TX_ST_LC)
1564 ++st->tx_bd_late_collision;
1565 if (ctrl & EMAC_TX_ST_MC)
1566 ++st->tx_bd_multple_collisions;
1567 if (ctrl & EMAC_TX_ST_SC)
1568 ++st->tx_bd_single_collision;
1569 if (ctrl & EMAC_TX_ST_UR)
1570 ++st->tx_bd_underrun;
1571 if (ctrl & EMAC_TX_ST_SQE)
1575 static void emac_poll_tx(void *param)
1577 struct emac_instance *dev = param;
1580 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1582 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1583 bad_mask = EMAC_IS_BAD_TX_TAH;
1585 bad_mask = EMAC_IS_BAD_TX;
1587 netif_tx_lock_bh(dev->ndev);
1590 int slot = dev->ack_slot, n = 0;
1592 ctrl = dev->tx_desc[slot].ctrl;
1593 if (!(ctrl & MAL_TX_CTRL_READY)) {
1594 struct sk_buff *skb = dev->tx_skb[slot];
1599 dev->tx_skb[slot] = NULL;
1601 slot = (slot + 1) % NUM_TX_BUFF;
1603 if (unlikely(ctrl & bad_mask))
1604 emac_parse_tx_error(dev, ctrl);
1610 dev->ack_slot = slot;
1611 if (netif_queue_stopped(dev->ndev) &&
1612 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1613 netif_wake_queue(dev->ndev);
1615 DBG2(dev, "tx %d pkts" NL, n);
1618 netif_tx_unlock_bh(dev->ndev);
1621 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1624 struct sk_buff *skb = dev->rx_skb[slot];
1626 DBG2(dev, "recycle %d %d" NL, slot, len);
1629 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1630 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1632 dev->rx_desc[slot].data_len = 0;
1634 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1635 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1638 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1640 struct emac_error_stats *st = &dev->estats;
1642 DBG(dev, "BD RX error %04x" NL, ctrl);
1645 if (ctrl & EMAC_RX_ST_OE)
1646 ++st->rx_bd_overrun;
1647 if (ctrl & EMAC_RX_ST_BP)
1648 ++st->rx_bd_bad_packet;
1649 if (ctrl & EMAC_RX_ST_RP)
1650 ++st->rx_bd_runt_packet;
1651 if (ctrl & EMAC_RX_ST_SE)
1652 ++st->rx_bd_short_event;
1653 if (ctrl & EMAC_RX_ST_AE)
1654 ++st->rx_bd_alignment_error;
1655 if (ctrl & EMAC_RX_ST_BFCS)
1656 ++st->rx_bd_bad_fcs;
1657 if (ctrl & EMAC_RX_ST_PTL)
1658 ++st->rx_bd_packet_too_long;
1659 if (ctrl & EMAC_RX_ST_ORE)
1660 ++st->rx_bd_out_of_range;
1661 if (ctrl & EMAC_RX_ST_IRE)
1662 ++st->rx_bd_in_range;
1665 static inline void emac_rx_csum(struct emac_instance *dev,
1666 struct sk_buff *skb, u16 ctrl)
1668 #ifdef CONFIG_IBM_EMAC_TAH
1669 if (!ctrl && dev->tah_dev) {
1670 skb->ip_summed = CHECKSUM_UNNECESSARY;
1671 ++dev->stats.rx_packets_csum;
1676 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1678 if (likely(dev->rx_sg_skb != NULL)) {
1679 int len = dev->rx_desc[slot].data_len;
1680 int tot_len = dev->rx_sg_skb->len + len;
1682 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1683 ++dev->estats.rx_dropped_mtu;
1684 dev_kfree_skb(dev->rx_sg_skb);
1685 dev->rx_sg_skb = NULL;
1687 memcpy(skb_tail_pointer(dev->rx_sg_skb),
1688 dev->rx_skb[slot]->data, len);
1689 skb_put(dev->rx_sg_skb, len);
1690 emac_recycle_rx_skb(dev, slot, len);
1694 emac_recycle_rx_skb(dev, slot, 0);
1698 /* NAPI poll context */
1699 static int emac_poll_rx(void *param, int budget)
1701 struct emac_instance *dev = param;
1702 int slot = dev->rx_slot, received = 0;
1704 DBG2(dev, "poll_rx(%d)" NL, budget);
1707 while (budget > 0) {
1709 struct sk_buff *skb;
1710 u16 ctrl = dev->rx_desc[slot].ctrl;
1712 if (ctrl & MAL_RX_CTRL_EMPTY)
1715 skb = dev->rx_skb[slot];
1717 len = dev->rx_desc[slot].data_len;
1719 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1722 ctrl &= EMAC_BAD_RX_MASK;
1723 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1724 emac_parse_rx_error(dev, ctrl);
1725 ++dev->estats.rx_dropped_error;
1726 emac_recycle_rx_skb(dev, slot, 0);
1731 if (len < ETH_HLEN) {
1732 ++dev->estats.rx_dropped_stack;
1733 emac_recycle_rx_skb(dev, slot, len);
1737 if (len && len < EMAC_RX_COPY_THRESH) {
1738 struct sk_buff *copy_skb =
1739 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1740 if (unlikely(!copy_skb))
1743 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1744 memcpy(copy_skb->data - 2, skb->data - 2, len + 2);
1745 emac_recycle_rx_skb(dev, slot, len);
1747 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1752 skb->protocol = eth_type_trans(skb, dev->ndev);
1753 emac_rx_csum(dev, skb, ctrl);
1755 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1756 ++dev->estats.rx_dropped_stack;
1758 ++dev->stats.rx_packets;
1760 dev->stats.rx_bytes += len;
1761 slot = (slot + 1) % NUM_RX_BUFF;
1766 if (ctrl & MAL_RX_CTRL_FIRST) {
1767 BUG_ON(dev->rx_sg_skb);
1768 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1769 DBG(dev, "rx OOM %d" NL, slot);
1770 ++dev->estats.rx_dropped_oom;
1771 emac_recycle_rx_skb(dev, slot, 0);
1773 dev->rx_sg_skb = skb;
1776 } else if (!emac_rx_sg_append(dev, slot) &&
1777 (ctrl & MAL_RX_CTRL_LAST)) {
1779 skb = dev->rx_sg_skb;
1780 dev->rx_sg_skb = NULL;
1782 ctrl &= EMAC_BAD_RX_MASK;
1783 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1784 emac_parse_rx_error(dev, ctrl);
1785 ++dev->estats.rx_dropped_error;
1793 DBG(dev, "rx OOM %d" NL, slot);
1794 /* Drop the packet and recycle skb */
1795 ++dev->estats.rx_dropped_oom;
1796 emac_recycle_rx_skb(dev, slot, 0);
1801 DBG2(dev, "rx %d BDs" NL, received);
1802 dev->rx_slot = slot;
1805 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1807 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1808 DBG2(dev, "rx restart" NL);
1813 if (dev->rx_sg_skb) {
1814 DBG2(dev, "dropping partial rx packet" NL);
1815 ++dev->estats.rx_dropped_error;
1816 dev_kfree_skb(dev->rx_sg_skb);
1817 dev->rx_sg_skb = NULL;
1820 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1821 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1822 emac_rx_enable(dev);
1828 /* NAPI poll context */
1829 static int emac_peek_rx(void *param)
1831 struct emac_instance *dev = param;
1833 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1836 /* NAPI poll context */
1837 static int emac_peek_rx_sg(void *param)
1839 struct emac_instance *dev = param;
1841 int slot = dev->rx_slot;
1843 u16 ctrl = dev->rx_desc[slot].ctrl;
1844 if (ctrl & MAL_RX_CTRL_EMPTY)
1846 else if (ctrl & MAL_RX_CTRL_LAST)
1849 slot = (slot + 1) % NUM_RX_BUFF;
1851 /* I'm just being paranoid here :) */
1852 if (unlikely(slot == dev->rx_slot))
1858 static void emac_rxde(void *param)
1860 struct emac_instance *dev = param;
1862 ++dev->estats.rx_stopped;
1863 emac_rx_disable_async(dev);
1867 static irqreturn_t emac_irq(int irq, void *dev_instance)
1869 struct emac_instance *dev = dev_instance;
1870 struct emac_regs __iomem *p = dev->emacp;
1871 struct emac_error_stats *st = &dev->estats;
1874 spin_lock(&dev->lock);
1876 isr = in_be32(&p->isr);
1877 out_be32(&p->isr, isr);
1879 DBG(dev, "isr = %08x" NL, isr);
1881 if (isr & EMAC4_ISR_TXPE)
1883 if (isr & EMAC4_ISR_RXPE)
1885 if (isr & EMAC4_ISR_TXUE)
1887 if (isr & EMAC4_ISR_RXOE)
1888 ++st->rx_fifo_overrun;
1889 if (isr & EMAC_ISR_OVR)
1891 if (isr & EMAC_ISR_BP)
1892 ++st->rx_bad_packet;
1893 if (isr & EMAC_ISR_RP)
1894 ++st->rx_runt_packet;
1895 if (isr & EMAC_ISR_SE)
1896 ++st->rx_short_event;
1897 if (isr & EMAC_ISR_ALE)
1898 ++st->rx_alignment_error;
1899 if (isr & EMAC_ISR_BFCS)
1901 if (isr & EMAC_ISR_PTLE)
1902 ++st->rx_packet_too_long;
1903 if (isr & EMAC_ISR_ORE)
1904 ++st->rx_out_of_range;
1905 if (isr & EMAC_ISR_IRE)
1907 if (isr & EMAC_ISR_SQE)
1909 if (isr & EMAC_ISR_TE)
1912 spin_unlock(&dev->lock);
1917 static struct net_device_stats *emac_stats(struct net_device *ndev)
1919 struct emac_instance *dev = netdev_priv(ndev);
1920 struct emac_stats *st = &dev->stats;
1921 struct emac_error_stats *est = &dev->estats;
1922 struct net_device_stats *nst = &dev->nstats;
1923 unsigned long flags;
1925 DBG2(dev, "stats" NL);
1927 /* Compute "legacy" statistics */
1928 spin_lock_irqsave(&dev->lock, flags);
1929 nst->rx_packets = (unsigned long)st->rx_packets;
1930 nst->rx_bytes = (unsigned long)st->rx_bytes;
1931 nst->tx_packets = (unsigned long)st->tx_packets;
1932 nst->tx_bytes = (unsigned long)st->tx_bytes;
1933 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1934 est->rx_dropped_error +
1935 est->rx_dropped_resize +
1936 est->rx_dropped_mtu);
1937 nst->tx_dropped = (unsigned long)est->tx_dropped;
1939 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1940 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1941 est->rx_fifo_overrun +
1943 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1944 est->rx_alignment_error);
1945 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1947 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1948 est->rx_bd_short_event +
1949 est->rx_bd_packet_too_long +
1950 est->rx_bd_out_of_range +
1951 est->rx_bd_in_range +
1952 est->rx_runt_packet +
1953 est->rx_short_event +
1954 est->rx_packet_too_long +
1955 est->rx_out_of_range +
1958 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1959 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1961 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1962 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1963 est->tx_bd_excessive_collisions +
1964 est->tx_bd_late_collision +
1965 est->tx_bd_multple_collisions);
1966 spin_unlock_irqrestore(&dev->lock, flags);
1970 static struct mal_commac_ops emac_commac_ops = {
1971 .poll_tx = &emac_poll_tx,
1972 .poll_rx = &emac_poll_rx,
1973 .peek_rx = &emac_peek_rx,
1977 static struct mal_commac_ops emac_commac_sg_ops = {
1978 .poll_tx = &emac_poll_tx,
1979 .poll_rx = &emac_poll_rx,
1980 .peek_rx = &emac_peek_rx_sg,
1984 /* Ethtool support */
1985 static int emac_ethtool_get_settings(struct net_device *ndev,
1986 struct ethtool_cmd *cmd)
1988 struct emac_instance *dev = netdev_priv(ndev);
1990 cmd->supported = dev->phy.features;
1991 cmd->port = PORT_MII;
1992 cmd->phy_address = dev->phy.address;
1994 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1996 mutex_lock(&dev->link_lock);
1997 cmd->advertising = dev->phy.advertising;
1998 cmd->autoneg = dev->phy.autoneg;
1999 cmd->speed = dev->phy.speed;
2000 cmd->duplex = dev->phy.duplex;
2001 mutex_unlock(&dev->link_lock);
2006 static int emac_ethtool_set_settings(struct net_device *ndev,
2007 struct ethtool_cmd *cmd)
2009 struct emac_instance *dev = netdev_priv(ndev);
2010 u32 f = dev->phy.features;
2012 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
2013 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
2015 /* Basic sanity checks */
2016 if (dev->phy.address < 0)
2018 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
2020 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
2022 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
2025 if (cmd->autoneg == AUTONEG_DISABLE) {
2026 switch (cmd->speed) {
2028 if (cmd->duplex == DUPLEX_HALF &&
2029 !(f & SUPPORTED_10baseT_Half))
2031 if (cmd->duplex == DUPLEX_FULL &&
2032 !(f & SUPPORTED_10baseT_Full))
2036 if (cmd->duplex == DUPLEX_HALF &&
2037 !(f & SUPPORTED_100baseT_Half))
2039 if (cmd->duplex == DUPLEX_FULL &&
2040 !(f & SUPPORTED_100baseT_Full))
2044 if (cmd->duplex == DUPLEX_HALF &&
2045 !(f & SUPPORTED_1000baseT_Half))
2047 if (cmd->duplex == DUPLEX_FULL &&
2048 !(f & SUPPORTED_1000baseT_Full))
2055 mutex_lock(&dev->link_lock);
2056 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2058 mutex_unlock(&dev->link_lock);
2061 if (!(f & SUPPORTED_Autoneg))
2064 mutex_lock(&dev->link_lock);
2065 dev->phy.def->ops->setup_aneg(&dev->phy,
2066 (cmd->advertising & f) |
2067 (dev->phy.advertising &
2069 ADVERTISED_Asym_Pause)));
2070 mutex_unlock(&dev->link_lock);
2072 emac_force_link_update(dev);
2077 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2078 struct ethtool_ringparam *rp)
2080 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2081 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2084 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2085 struct ethtool_pauseparam *pp)
2087 struct emac_instance *dev = netdev_priv(ndev);
2089 mutex_lock(&dev->link_lock);
2090 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2091 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2094 if (dev->phy.duplex == DUPLEX_FULL) {
2096 pp->rx_pause = pp->tx_pause = 1;
2097 else if (dev->phy.asym_pause)
2100 mutex_unlock(&dev->link_lock);
2103 static int emac_get_regs_len(struct emac_instance *dev)
2105 return sizeof(struct emac_ethtool_regs_subhdr) +
2106 sizeof(struct emac_regs);
2109 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2111 struct emac_instance *dev = netdev_priv(ndev);
2114 size = sizeof(struct emac_ethtool_regs_hdr) +
2115 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2116 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2117 size += zmii_get_regs_len(dev->zmii_dev);
2118 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2119 size += rgmii_get_regs_len(dev->rgmii_dev);
2120 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2121 size += tah_get_regs_len(dev->tah_dev);
2126 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2128 struct emac_ethtool_regs_subhdr *hdr = buf;
2130 hdr->index = dev->cell_index;
2131 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2132 hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2133 } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2134 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2136 hdr->version = EMAC_ETHTOOL_REGS_VER;
2138 memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2139 return (void *)(hdr + 1) + sizeof(struct emac_regs);
2142 static void emac_ethtool_get_regs(struct net_device *ndev,
2143 struct ethtool_regs *regs, void *buf)
2145 struct emac_instance *dev = netdev_priv(ndev);
2146 struct emac_ethtool_regs_hdr *hdr = buf;
2148 hdr->components = 0;
2151 buf = mal_dump_regs(dev->mal, buf);
2152 buf = emac_dump_regs(dev, buf);
2153 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2154 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2155 buf = zmii_dump_regs(dev->zmii_dev, buf);
2157 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2158 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2159 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2161 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2162 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2163 buf = tah_dump_regs(dev->tah_dev, buf);
2167 static int emac_ethtool_nway_reset(struct net_device *ndev)
2169 struct emac_instance *dev = netdev_priv(ndev);
2172 DBG(dev, "nway_reset" NL);
2174 if (dev->phy.address < 0)
2177 mutex_lock(&dev->link_lock);
2178 if (!dev->phy.autoneg) {
2183 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2185 mutex_unlock(&dev->link_lock);
2186 emac_force_link_update(dev);
2190 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2192 if (stringset == ETH_SS_STATS)
2193 return EMAC_ETHTOOL_STATS_COUNT;
2198 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2201 if (stringset == ETH_SS_STATS)
2202 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2205 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2206 struct ethtool_stats *estats,
2209 struct emac_instance *dev = netdev_priv(ndev);
2211 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2212 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2213 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2216 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2217 struct ethtool_drvinfo *info)
2219 struct emac_instance *dev = netdev_priv(ndev);
2221 strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2222 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2223 snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
2224 dev->cell_index, dev->ofdev->dev.of_node->full_name);
2227 static const struct ethtool_ops emac_ethtool_ops = {
2228 .get_settings = emac_ethtool_get_settings,
2229 .set_settings = emac_ethtool_set_settings,
2230 .get_drvinfo = emac_ethtool_get_drvinfo,
2232 .get_regs_len = emac_ethtool_get_regs_len,
2233 .get_regs = emac_ethtool_get_regs,
2235 .nway_reset = emac_ethtool_nway_reset,
2237 .get_ringparam = emac_ethtool_get_ringparam,
2238 .get_pauseparam = emac_ethtool_get_pauseparam,
2240 .get_strings = emac_ethtool_get_strings,
2241 .get_sset_count = emac_ethtool_get_sset_count,
2242 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2244 .get_link = ethtool_op_get_link,
2247 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2249 struct emac_instance *dev = netdev_priv(ndev);
2250 struct mii_ioctl_data *data = if_mii(rq);
2252 DBG(dev, "ioctl %08x" NL, cmd);
2254 if (dev->phy.address < 0)
2259 data->phy_id = dev->phy.address;
2262 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2267 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2275 struct emac_depentry {
2277 struct device_node *node;
2278 struct platform_device *ofdev;
2282 #define EMAC_DEP_MAL_IDX 0
2283 #define EMAC_DEP_ZMII_IDX 1
2284 #define EMAC_DEP_RGMII_IDX 2
2285 #define EMAC_DEP_TAH_IDX 3
2286 #define EMAC_DEP_MDIO_IDX 4
2287 #define EMAC_DEP_PREV_IDX 5
2288 #define EMAC_DEP_COUNT 6
2290 static int emac_check_deps(struct emac_instance *dev,
2291 struct emac_depentry *deps)
2294 struct device_node *np;
2296 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2297 /* no dependency on that item, allright */
2298 if (deps[i].phandle == 0) {
2302 /* special case for blist as the dependency might go away */
2303 if (i == EMAC_DEP_PREV_IDX) {
2304 np = *(dev->blist - 1);
2306 deps[i].phandle = 0;
2310 if (deps[i].node == NULL)
2311 deps[i].node = of_node_get(np);
2313 if (deps[i].node == NULL)
2314 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2315 if (deps[i].node == NULL)
2317 if (deps[i].ofdev == NULL)
2318 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2319 if (deps[i].ofdev == NULL)
2321 if (deps[i].drvdata == NULL)
2322 deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2323 if (deps[i].drvdata != NULL)
2326 return there == EMAC_DEP_COUNT;
2329 static void emac_put_deps(struct emac_instance *dev)
2331 of_dev_put(dev->mal_dev);
2332 of_dev_put(dev->zmii_dev);
2333 of_dev_put(dev->rgmii_dev);
2334 of_dev_put(dev->mdio_dev);
2335 of_dev_put(dev->tah_dev);
2338 static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2341 /* We are only intereted in device addition */
2342 if (action == BUS_NOTIFY_BOUND_DRIVER)
2343 wake_up_all(&emac_probe_wait);
2347 static struct notifier_block emac_of_bus_notifier = {
2348 .notifier_call = emac_of_bus_notify
2351 static int emac_wait_deps(struct emac_instance *dev)
2353 struct emac_depentry deps[EMAC_DEP_COUNT];
2356 memset(&deps, 0, sizeof(deps));
2358 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2359 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2360 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2362 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2364 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2365 if (dev->blist && dev->blist > emac_boot_list)
2366 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2367 bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2368 wait_event_timeout(emac_probe_wait,
2369 emac_check_deps(dev, deps),
2370 EMAC_PROBE_DEP_TIMEOUT);
2371 bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2372 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2373 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2374 of_node_put(deps[i].node);
2376 of_dev_put(deps[i].ofdev);
2379 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2380 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2381 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2382 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2383 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2385 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2389 static int emac_read_uint_prop(struct device_node *np, const char *name,
2390 u32 *val, int fatal)
2393 const u32 *prop = of_get_property(np, name, &len);
2394 if (prop == NULL || len < sizeof(u32)) {
2396 printk(KERN_ERR "%s: missing %s property\n",
2397 np->full_name, name);
2404 static int emac_init_phy(struct emac_instance *dev)
2406 struct device_node *np = dev->ofdev->dev.of_node;
2407 struct net_device *ndev = dev->ndev;
2411 dev->phy.dev = ndev;
2412 dev->phy.mode = dev->phy_mode;
2414 /* PHY-less configuration.
2415 * XXX I probably should move these settings to the dev tree
2417 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2420 /* PHY-less configuration.
2421 * XXX I probably should move these settings to the dev tree
2423 dev->phy.address = -1;
2424 dev->phy.features = SUPPORTED_MII;
2425 if (emac_phy_supports_gige(dev->phy_mode))
2426 dev->phy.features |= SUPPORTED_1000baseT_Full;
2428 dev->phy.features |= SUPPORTED_100baseT_Full;
2434 mutex_lock(&emac_phy_map_lock);
2435 phy_map = dev->phy_map | busy_phy_map;
2437 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2439 dev->phy.mdio_read = emac_mdio_read;
2440 dev->phy.mdio_write = emac_mdio_write;
2442 /* Enable internal clock source */
2443 #ifdef CONFIG_PPC_DCR_NATIVE
2444 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2445 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2447 /* PHY clock workaround */
2448 emac_rx_clk_tx(dev);
2450 /* Enable internal clock source on 440GX*/
2451 #ifdef CONFIG_PPC_DCR_NATIVE
2452 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2453 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2455 /* Configure EMAC with defaults so we can at least use MDIO
2456 * This is needed mostly for 440GX
2458 if (emac_phy_gpcs(dev->phy.mode)) {
2460 * Make GPCS PHY address equal to EMAC index.
2461 * We probably should take into account busy_phy_map
2462 * and/or phy_map here.
2464 * Note that the busy_phy_map is currently global
2465 * while it should probably be per-ASIC...
2467 dev->phy.gpcs_address = dev->gpcs_address;
2468 if (dev->phy.gpcs_address == 0xffffffff)
2469 dev->phy.address = dev->cell_index;
2472 emac_configure(dev);
2474 if (dev->phy_address != 0xffffffff)
2475 phy_map = ~(1 << dev->phy_address);
2477 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2478 if (!(phy_map & 1)) {
2480 busy_phy_map |= 1 << i;
2482 /* Quick check if there is a PHY at the address */
2483 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2484 if (r == 0xffff || r < 0)
2486 if (!emac_mii_phy_probe(&dev->phy, i))
2490 /* Enable external clock source */
2491 #ifdef CONFIG_PPC_DCR_NATIVE
2492 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2493 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2495 mutex_unlock(&emac_phy_map_lock);
2497 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2502 if (dev->phy.def->ops->init)
2503 dev->phy.def->ops->init(&dev->phy);
2505 /* Disable any PHY features not supported by the platform */
2506 dev->phy.def->features &= ~dev->phy_feat_exc;
2507 dev->phy.features &= ~dev->phy_feat_exc;
2509 /* Setup initial link parameters */
2510 if (dev->phy.features & SUPPORTED_Autoneg) {
2511 adv = dev->phy.features;
2512 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2513 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2514 /* Restart autonegotiation */
2515 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2517 u32 f = dev->phy.def->features;
2518 int speed = SPEED_10, fd = DUPLEX_HALF;
2520 /* Select highest supported speed/duplex */
2521 if (f & SUPPORTED_1000baseT_Full) {
2524 } else if (f & SUPPORTED_1000baseT_Half)
2526 else if (f & SUPPORTED_100baseT_Full) {
2529 } else if (f & SUPPORTED_100baseT_Half)
2531 else if (f & SUPPORTED_10baseT_Full)
2534 /* Force link parameters */
2535 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2540 static int emac_init_config(struct emac_instance *dev)
2542 struct device_node *np = dev->ofdev->dev.of_node;
2545 /* Read config from device-tree */
2546 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2548 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2550 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2552 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2554 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2555 dev->max_mtu = 1500;
2556 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2557 dev->rx_fifo_size = 2048;
2558 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2559 dev->tx_fifo_size = 2048;
2560 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2561 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2562 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2563 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2564 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2565 dev->phy_address = 0xffffffff;
2566 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2567 dev->phy_map = 0xffffffff;
2568 if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2569 dev->gpcs_address = 0xffffffff;
2570 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2572 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2574 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2576 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2578 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2580 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2581 dev->zmii_port = 0xffffffff;
2582 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2584 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2585 dev->rgmii_port = 0xffffffff;
2586 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2587 dev->fifo_entry_size = 16;
2588 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2589 dev->mal_burst_size = 256;
2591 /* PHY mode needs some decoding */
2592 dev->phy_mode = of_get_phy_mode(np);
2593 if (dev->phy_mode < 0)
2594 dev->phy_mode = PHY_MODE_NA;
2596 /* Check EMAC version */
2597 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2598 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2599 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2600 of_device_is_compatible(np, "ibm,emac-460gt"))
2601 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2602 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2603 of_device_is_compatible(np, "ibm,emac-405exr"))
2604 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2605 if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2606 dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2607 EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2608 EMAC_FTR_460EX_PHY_CLK_FIX);
2610 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2611 dev->features |= EMAC_FTR_EMAC4;
2612 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2613 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2615 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2616 of_device_is_compatible(np, "ibm,emac-440gr"))
2617 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2618 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2619 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2620 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2622 printk(KERN_ERR "%s: Flow control not disabled!\n",
2630 /* Fixup some feature bits based on the device tree */
2631 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2632 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2633 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2634 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2636 /* CAB lacks the appropriate properties */
2637 if (of_device_is_compatible(np, "ibm,emac-axon"))
2638 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2639 EMAC_FTR_STACR_OC_INVERT;
2641 /* Enable TAH/ZMII/RGMII features as found */
2642 if (dev->tah_ph != 0) {
2643 #ifdef CONFIG_IBM_EMAC_TAH
2644 dev->features |= EMAC_FTR_HAS_TAH;
2646 printk(KERN_ERR "%s: TAH support not enabled !\n",
2652 if (dev->zmii_ph != 0) {
2653 #ifdef CONFIG_IBM_EMAC_ZMII
2654 dev->features |= EMAC_FTR_HAS_ZMII;
2656 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2662 if (dev->rgmii_ph != 0) {
2663 #ifdef CONFIG_IBM_EMAC_RGMII
2664 dev->features |= EMAC_FTR_HAS_RGMII;
2666 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2672 /* Read MAC-address */
2673 p = of_get_property(np, "local-mac-address", NULL);
2675 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2679 memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
2681 /* IAHT and GAHT filter parameterization */
2682 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2683 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2684 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2686 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2687 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2690 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2691 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2692 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2693 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2694 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2699 static const struct net_device_ops emac_netdev_ops = {
2700 .ndo_open = emac_open,
2701 .ndo_stop = emac_close,
2702 .ndo_get_stats = emac_stats,
2703 .ndo_set_rx_mode = emac_set_multicast_list,
2704 .ndo_do_ioctl = emac_ioctl,
2705 .ndo_tx_timeout = emac_tx_timeout,
2706 .ndo_validate_addr = eth_validate_addr,
2707 .ndo_set_mac_address = eth_mac_addr,
2708 .ndo_start_xmit = emac_start_xmit,
2709 .ndo_change_mtu = eth_change_mtu,
2712 static const struct net_device_ops emac_gige_netdev_ops = {
2713 .ndo_open = emac_open,
2714 .ndo_stop = emac_close,
2715 .ndo_get_stats = emac_stats,
2716 .ndo_set_rx_mode = emac_set_multicast_list,
2717 .ndo_do_ioctl = emac_ioctl,
2718 .ndo_tx_timeout = emac_tx_timeout,
2719 .ndo_validate_addr = eth_validate_addr,
2720 .ndo_set_mac_address = eth_mac_addr,
2721 .ndo_start_xmit = emac_start_xmit_sg,
2722 .ndo_change_mtu = emac_change_mtu,
2725 static int emac_probe(struct platform_device *ofdev)
2727 struct net_device *ndev;
2728 struct emac_instance *dev;
2729 struct device_node *np = ofdev->dev.of_node;
2730 struct device_node **blist = NULL;
2733 /* Skip unused/unwired EMACS. We leave the check for an unused
2734 * property here for now, but new flat device trees should set a
2735 * status property to "disabled" instead.
2737 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2740 /* Find ourselves in the bootlist if we are there */
2741 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2742 if (emac_boot_list[i] == np)
2743 blist = &emac_boot_list[i];
2745 /* Allocate our net_device structure */
2747 ndev = alloc_etherdev(sizeof(struct emac_instance));
2751 dev = netdev_priv(ndev);
2755 SET_NETDEV_DEV(ndev, &ofdev->dev);
2757 /* Initialize some embedded data structures */
2758 mutex_init(&dev->mdio_lock);
2759 mutex_init(&dev->link_lock);
2760 spin_lock_init(&dev->lock);
2761 INIT_WORK(&dev->reset_work, emac_reset_work);
2763 /* Init various config data based on device-tree */
2764 err = emac_init_config(dev);
2768 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2769 dev->emac_irq = irq_of_parse_and_map(np, 0);
2770 dev->wol_irq = irq_of_parse_and_map(np, 1);
2771 if (dev->emac_irq == NO_IRQ) {
2772 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2775 ndev->irq = dev->emac_irq;
2778 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2779 printk(KERN_ERR "%s: Can't get registers address\n",
2783 // TODO : request_mem_region
2784 dev->emacp = ioremap(dev->rsrc_regs.start,
2785 resource_size(&dev->rsrc_regs));
2786 if (dev->emacp == NULL) {
2787 printk(KERN_ERR "%s: Can't map device registers!\n",
2793 /* Wait for dependent devices */
2794 err = emac_wait_deps(dev);
2797 "%s: Timeout waiting for dependent devices\n",
2799 /* display more info about what's missing ? */
2802 dev->mal = platform_get_drvdata(dev->mal_dev);
2803 if (dev->mdio_dev != NULL)
2804 dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
2806 /* Register with MAL */
2807 dev->commac.ops = &emac_commac_ops;
2808 dev->commac.dev = dev;
2809 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2810 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2811 err = mal_register_commac(dev->mal, &dev->commac);
2813 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2814 np->full_name, dev->mal_dev->dev.of_node->full_name);
2817 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2818 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2820 /* Get pointers to BD rings */
2822 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2824 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2826 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2827 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2830 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2831 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2832 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2833 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2835 /* Attach to ZMII, if needed */
2836 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2837 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2838 goto err_unreg_commac;
2840 /* Attach to RGMII, if needed */
2841 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2842 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2843 goto err_detach_zmii;
2845 /* Attach to TAH, if needed */
2846 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2847 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2848 goto err_detach_rgmii;
2850 /* Set some link defaults before we can find out real parameters */
2851 dev->phy.speed = SPEED_100;
2852 dev->phy.duplex = DUPLEX_FULL;
2853 dev->phy.autoneg = AUTONEG_DISABLE;
2854 dev->phy.pause = dev->phy.asym_pause = 0;
2855 dev->stop_timeout = STOP_TIMEOUT_100;
2856 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2858 /* Some SoCs like APM821xx does not support Half Duplex mode. */
2859 if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
2860 dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
2861 SUPPORTED_100baseT_Half |
2862 SUPPORTED_10baseT_Half);
2865 /* Find PHY if any */
2866 err = emac_init_phy(dev);
2868 goto err_detach_tah;
2871 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
2872 ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
2874 ndev->watchdog_timeo = 5 * HZ;
2875 if (emac_phy_supports_gige(dev->phy_mode)) {
2876 ndev->netdev_ops = &emac_gige_netdev_ops;
2877 dev->commac.ops = &emac_commac_sg_ops;
2879 ndev->netdev_ops = &emac_netdev_ops;
2880 ndev->ethtool_ops = &emac_ethtool_ops;
2882 netif_carrier_off(ndev);
2884 err = register_netdev(ndev);
2886 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2887 np->full_name, err);
2888 goto err_detach_tah;
2891 /* Set our drvdata last as we don't want them visible until we are
2895 platform_set_drvdata(ofdev, dev);
2897 /* There's a new kid in town ! Let's tell everybody */
2898 wake_up_all(&emac_probe_wait);
2901 printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2902 ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2904 if (dev->phy_mode == PHY_MODE_SGMII)
2905 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2907 if (dev->phy.address >= 0)
2908 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2909 dev->phy.def->name, dev->phy.address);
2911 emac_dbg_register(dev);
2916 /* I have a bad feeling about this ... */
2919 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2920 tah_detach(dev->tah_dev, dev->tah_port);
2922 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2923 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2925 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2926 zmii_detach(dev->zmii_dev, dev->zmii_port);
2928 mal_unregister_commac(dev->mal, &dev->commac);
2932 iounmap(dev->emacp);
2934 if (dev->wol_irq != NO_IRQ)
2935 irq_dispose_mapping(dev->wol_irq);
2936 if (dev->emac_irq != NO_IRQ)
2937 irq_dispose_mapping(dev->emac_irq);
2941 /* if we were on the bootlist, remove us as we won't show up and
2942 * wake up all waiters to notify them in case they were waiting
2947 wake_up_all(&emac_probe_wait);
2952 static int emac_remove(struct platform_device *ofdev)
2954 struct emac_instance *dev = platform_get_drvdata(ofdev);
2956 DBG(dev, "remove" NL);
2958 unregister_netdev(dev->ndev);
2960 cancel_work_sync(&dev->reset_work);
2962 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2963 tah_detach(dev->tah_dev, dev->tah_port);
2964 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2965 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2966 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2967 zmii_detach(dev->zmii_dev, dev->zmii_port);
2969 busy_phy_map &= ~(1 << dev->phy.address);
2970 DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
2972 mal_unregister_commac(dev->mal, &dev->commac);
2975 emac_dbg_unregister(dev);
2976 iounmap(dev->emacp);
2978 if (dev->wol_irq != NO_IRQ)
2979 irq_dispose_mapping(dev->wol_irq);
2980 if (dev->emac_irq != NO_IRQ)
2981 irq_dispose_mapping(dev->emac_irq);
2983 free_netdev(dev->ndev);
2988 /* XXX Features in here should be replaced by properties... */
2989 static const struct of_device_id emac_match[] =
2993 .compatible = "ibm,emac",
2997 .compatible = "ibm,emac4",
3001 .compatible = "ibm,emac4sync",
3005 MODULE_DEVICE_TABLE(of, emac_match);
3007 static struct platform_driver emac_driver = {
3010 .of_match_table = emac_match,
3012 .probe = emac_probe,
3013 .remove = emac_remove,
3016 static void __init emac_make_bootlist(void)
3018 struct device_node *np = NULL;
3020 int cell_indices[EMAC_BOOT_LIST_SIZE];
3023 while((np = of_find_all_nodes(np)) != NULL) {
3026 if (of_match_node(emac_match, np) == NULL)
3028 if (of_get_property(np, "unused", NULL))
3030 idx = of_get_property(np, "cell-index", NULL);
3033 cell_indices[i] = *idx;
3034 emac_boot_list[i++] = of_node_get(np);
3035 if (i >= EMAC_BOOT_LIST_SIZE) {
3042 /* Bubble sort them (doh, what a creative algorithm :-) */
3043 for (i = 0; max > 1 && (i < (max - 1)); i++)
3044 for (j = i; j < max; j++) {
3045 if (cell_indices[i] > cell_indices[j]) {
3046 swap(emac_boot_list[i], emac_boot_list[j]);
3047 swap(cell_indices[i], cell_indices[j]);
3052 static int __init emac_init(void)
3056 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3058 /* Init debug stuff */
3061 /* Build EMAC boot list */
3062 emac_make_bootlist();
3064 /* Init submodules */
3077 rc = platform_driver_register(&emac_driver);
3095 static void __exit emac_exit(void)
3099 platform_driver_unregister(&emac_driver);
3107 /* Destroy EMAC boot list */
3108 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3109 of_node_put(emac_boot_list[i]);
3112 module_init(emac_init);
3113 module_exit(emac_exit);