GNU Linux-libre 4.4.285-gnu1
[releases.git] / drivers / net / ethernet / ibm / emac / core.c
1 /*
2  * drivers/net/ethernet/ibm/emac/core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7  *                <benh@kernel.crashing.org>
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13  *
14  * Based on original work by
15  *      Matt Porter <mporter@kernel.crashing.org>
16  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17  *      Armin Kuster <akuster@mvista.com>
18  *      Johnnie Peters <jpeters@mvista.com>
19  *
20  * This program is free software; you can redistribute  it and/or modify it
21  * under  the terms of  the GNU General  Public License as published by the
22  * Free Software Foundation;  either version 2 of the  License, or (at your
23  * option) any later version.
24  *
25  */
26
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
41 #include <linux/of.h>
42 #include <linux/of_address.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_net.h>
45 #include <linux/slab.h>
46
47 #include <asm/processor.h>
48 #include <asm/io.h>
49 #include <asm/dma.h>
50 #include <asm/uaccess.h>
51 #include <asm/dcr.h>
52 #include <asm/dcr-regs.h>
53
54 #include "core.h"
55
56 /*
57  * Lack of dma_unmap_???? calls is intentional.
58  *
59  * API-correct usage requires additional support state information to be
60  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
61  * EMAC design (e.g. TX buffer passed from network stack can be split into
62  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
63  * maintaining such information will add additional overhead.
64  * Current DMA API implementation for 4xx processors only ensures cache coherency
65  * and dma_unmap_???? routines are empty and are likely to stay this way.
66  * I decided to omit dma_unmap_??? calls because I don't want to add additional
67  * complexity just for the sake of following some abstract API, when it doesn't
68  * add any real benefit to the driver. I understand that this decision maybe
69  * controversial, but I really tried to make code API-correct and efficient
70  * at the same time and didn't come up with code I liked :(.                --ebs
71  */
72
73 #define DRV_NAME        "emac"
74 #define DRV_VERSION     "3.54"
75 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
76
77 MODULE_DESCRIPTION(DRV_DESC);
78 MODULE_AUTHOR
79     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
80 MODULE_LICENSE("GPL");
81
82 /* minimum number of free TX descriptors required to wake up TX process */
83 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
84
85 /* If packet size is less than this number, we allocate small skb and copy packet
86  * contents into it instead of just sending original big skb up
87  */
88 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
89
90 /* Since multiple EMACs share MDIO lines in various ways, we need
91  * to avoid re-using the same PHY ID in cases where the arch didn't
92  * setup precise phy_map entries
93  *
94  * XXX This is something that needs to be reworked as we can have multiple
95  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
96  * probably require in that case to have explicit PHY IDs in the device-tree
97  */
98 static u32 busy_phy_map;
99 static DEFINE_MUTEX(emac_phy_map_lock);
100
101 /* This is the wait queue used to wait on any event related to probe, that
102  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
103  */
104 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
105
106 /* Having stable interface names is a doomed idea. However, it would be nice
107  * if we didn't have completely random interface names at boot too :-) It's
108  * just a matter of making everybody's life easier. Since we are doing
109  * threaded probing, it's a bit harder though. The base idea here is that
110  * we make up a list of all emacs in the device-tree before we register the
111  * driver. Every emac will then wait for the previous one in the list to
112  * initialize before itself. We should also keep that list ordered by
113  * cell_index.
114  * That list is only 4 entries long, meaning that additional EMACs don't
115  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
116  */
117
118 #define EMAC_BOOT_LIST_SIZE     4
119 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
120
121 /* How long should I wait for dependent devices ? */
122 #define EMAC_PROBE_DEP_TIMEOUT  (HZ * 5)
123
124 /* I don't want to litter system log with timeout errors
125  * when we have brain-damaged PHY.
126  */
127 static inline void emac_report_timeout_error(struct emac_instance *dev,
128                                              const char *error)
129 {
130         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
131                                   EMAC_FTR_460EX_PHY_CLK_FIX |
132                                   EMAC_FTR_440EP_PHY_CLK_FIX))
133                 DBG(dev, "%s" NL, error);
134         else if (net_ratelimit())
135                 printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
136                         error);
137 }
138
139 /* EMAC PHY clock workaround:
140  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141  * which allows controlling each EMAC clock
142  */
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
144 {
145 #ifdef CONFIG_PPC_DCR_NATIVE
146         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147                 dcri_clrset(SDR0, SDR0_MFR,
148                             0, SDR0_MFR_ECS >> dev->cell_index);
149 #endif
150 }
151
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
153 {
154 #ifdef CONFIG_PPC_DCR_NATIVE
155         if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156                 dcri_clrset(SDR0, SDR0_MFR,
157                             SDR0_MFR_ECS >> dev->cell_index, 0);
158 #endif
159 }
160
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON        HZ
163 #define PHY_POLL_LINK_OFF       (HZ / 5)
164
165 /* Graceful stop timeouts in us.
166  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
167  */
168 #define STOP_TIMEOUT_10         1230
169 #define STOP_TIMEOUT_100        124
170 #define STOP_TIMEOUT_1000       13
171 #define STOP_TIMEOUT_1000_JUMBO 73
172
173 static unsigned char default_mcast_addr[] = {
174         0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
175 };
176
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190         "tx_bd_excessive_collisions", "tx_bd_late_collision",
191         "tx_bd_multple_collisions", "tx_bd_single_collision",
192         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
193         "tx_errors"
194 };
195
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
199
200 static inline int emac_phy_supports_gige(int phy_mode)
201 {
202         return  phy_mode == PHY_MODE_GMII ||
203                 phy_mode == PHY_MODE_RGMII ||
204                 phy_mode == PHY_MODE_SGMII ||
205                 phy_mode == PHY_MODE_TBI ||
206                 phy_mode == PHY_MODE_RTBI;
207 }
208
209 static inline int emac_phy_gpcs(int phy_mode)
210 {
211         return  phy_mode == PHY_MODE_SGMII ||
212                 phy_mode == PHY_MODE_TBI ||
213                 phy_mode == PHY_MODE_RTBI;
214 }
215
216 static inline void emac_tx_enable(struct emac_instance *dev)
217 {
218         struct emac_regs __iomem *p = dev->emacp;
219         u32 r;
220
221         DBG(dev, "tx_enable" NL);
222
223         r = in_be32(&p->mr0);
224         if (!(r & EMAC_MR0_TXE))
225                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
226 }
227
228 static void emac_tx_disable(struct emac_instance *dev)
229 {
230         struct emac_regs __iomem *p = dev->emacp;
231         u32 r;
232
233         DBG(dev, "tx_disable" NL);
234
235         r = in_be32(&p->mr0);
236         if (r & EMAC_MR0_TXE) {
237                 int n = dev->stop_timeout;
238                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
239                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
240                         udelay(1);
241                         --n;
242                 }
243                 if (unlikely(!n))
244                         emac_report_timeout_error(dev, "TX disable timeout");
245         }
246 }
247
248 static void emac_rx_enable(struct emac_instance *dev)
249 {
250         struct emac_regs __iomem *p = dev->emacp;
251         u32 r;
252
253         if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
254                 goto out;
255
256         DBG(dev, "rx_enable" NL);
257
258         r = in_be32(&p->mr0);
259         if (!(r & EMAC_MR0_RXE)) {
260                 if (unlikely(!(r & EMAC_MR0_RXI))) {
261                         /* Wait if previous async disable is still in progress */
262                         int n = dev->stop_timeout;
263                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
264                                 udelay(1);
265                                 --n;
266                         }
267                         if (unlikely(!n))
268                                 emac_report_timeout_error(dev,
269                                                           "RX disable timeout");
270                 }
271                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
272         }
273  out:
274         ;
275 }
276
277 static void emac_rx_disable(struct emac_instance *dev)
278 {
279         struct emac_regs __iomem *p = dev->emacp;
280         u32 r;
281
282         DBG(dev, "rx_disable" NL);
283
284         r = in_be32(&p->mr0);
285         if (r & EMAC_MR0_RXE) {
286                 int n = dev->stop_timeout;
287                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
288                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
289                         udelay(1);
290                         --n;
291                 }
292                 if (unlikely(!n))
293                         emac_report_timeout_error(dev, "RX disable timeout");
294         }
295 }
296
297 static inline void emac_netif_stop(struct emac_instance *dev)
298 {
299         netif_tx_lock_bh(dev->ndev);
300         netif_addr_lock(dev->ndev);
301         dev->no_mcast = 1;
302         netif_addr_unlock(dev->ndev);
303         netif_tx_unlock_bh(dev->ndev);
304         dev->ndev->trans_start = jiffies;       /* prevent tx timeout */
305         mal_poll_disable(dev->mal, &dev->commac);
306         netif_tx_disable(dev->ndev);
307 }
308
309 static inline void emac_netif_start(struct emac_instance *dev)
310 {
311         netif_tx_lock_bh(dev->ndev);
312         netif_addr_lock(dev->ndev);
313         dev->no_mcast = 0;
314         if (dev->mcast_pending && netif_running(dev->ndev))
315                 __emac_set_multicast_list(dev);
316         netif_addr_unlock(dev->ndev);
317         netif_tx_unlock_bh(dev->ndev);
318
319         netif_wake_queue(dev->ndev);
320
321         /* NOTE: unconditional netif_wake_queue is only appropriate
322          * so long as all callers are assured to have free tx slots
323          * (taken from tg3... though the case where that is wrong is
324          *  not terribly harmful)
325          */
326         mal_poll_enable(dev->mal, &dev->commac);
327 }
328
329 static inline void emac_rx_disable_async(struct emac_instance *dev)
330 {
331         struct emac_regs __iomem *p = dev->emacp;
332         u32 r;
333
334         DBG(dev, "rx_disable_async" NL);
335
336         r = in_be32(&p->mr0);
337         if (r & EMAC_MR0_RXE)
338                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
339 }
340
341 static int emac_reset(struct emac_instance *dev)
342 {
343         struct emac_regs __iomem *p = dev->emacp;
344         int n = 20;
345         bool __maybe_unused try_internal_clock = false;
346
347         DBG(dev, "reset" NL);
348
349         if (!dev->reset_failed) {
350                 /* 40x erratum suggests stopping RX channel before reset,
351                  * we stop TX as well
352                  */
353                 emac_rx_disable(dev);
354                 emac_tx_disable(dev);
355         }
356
357 #ifdef CONFIG_PPC_DCR_NATIVE
358 do_retry:
359         /*
360          * PPC460EX/GT Embedded Processor Advanced User's Manual
361          * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
362          * Note: The PHY must provide a TX Clk in order to perform a soft reset
363          * of the EMAC. If none is present, select the internal clock
364          * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
365          * After a soft reset, select the external clock.
366          *
367          * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
368          * ethernet cable is not attached. This causes the reset to timeout
369          * and the PHY detection code in emac_init_phy() is unable to
370          * communicate and detect the AR8035-A PHY. As a result, the emac
371          * driver bails out early and the user has no ethernet.
372          * In order to stay compatible with existing configurations, the
373          * driver will temporarily switch to the internal clock, after
374          * the first reset fails.
375          */
376         if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
377                 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
378                                            dev->phy_map == 0xffffffff)) {
379                         /* No PHY: select internal loop clock before reset */
380                         dcri_clrset(SDR0, SDR0_ETH_CFG,
381                                     0, SDR0_ETH_CFG_ECS << dev->cell_index);
382                 } else {
383                         /* PHY present: select external clock before reset */
384                         dcri_clrset(SDR0, SDR0_ETH_CFG,
385                                     SDR0_ETH_CFG_ECS << dev->cell_index, 0);
386                 }
387         }
388 #endif
389
390         out_be32(&p->mr0, EMAC_MR0_SRST);
391         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
392                 --n;
393
394 #ifdef CONFIG_PPC_DCR_NATIVE
395         if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
396                 if (!n && !try_internal_clock) {
397                         /* first attempt has timed out. */
398                         n = 20;
399                         try_internal_clock = true;
400                         goto do_retry;
401                 }
402
403                 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
404                                            dev->phy_map == 0xffffffff)) {
405                         /* No PHY: restore external clock source after reset */
406                         dcri_clrset(SDR0, SDR0_ETH_CFG,
407                                     SDR0_ETH_CFG_ECS << dev->cell_index, 0);
408                 }
409         }
410 #endif
411
412         if (n) {
413                 dev->reset_failed = 0;
414                 return 0;
415         } else {
416                 emac_report_timeout_error(dev, "reset timeout");
417                 dev->reset_failed = 1;
418                 return -ETIMEDOUT;
419         }
420 }
421
422 static void emac_hash_mc(struct emac_instance *dev)
423 {
424         const int regs = EMAC_XAHT_REGS(dev);
425         u32 *gaht_base = emac_gaht_base(dev);
426         u32 gaht_temp[regs];
427         struct netdev_hw_addr *ha;
428         int i;
429
430         DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
431
432         memset(gaht_temp, 0, sizeof (gaht_temp));
433
434         netdev_for_each_mc_addr(ha, dev->ndev) {
435                 int slot, reg, mask;
436                 DBG2(dev, "mc %pM" NL, ha->addr);
437
438                 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
439                                              ether_crc(ETH_ALEN, ha->addr));
440                 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
441                 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
442
443                 gaht_temp[reg] |= mask;
444         }
445
446         for (i = 0; i < regs; i++)
447                 out_be32(gaht_base + i, gaht_temp[i]);
448 }
449
450 static inline u32 emac_iff2rmr(struct net_device *ndev)
451 {
452         struct emac_instance *dev = netdev_priv(ndev);
453         u32 r;
454
455         r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
456
457         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
458             r |= EMAC4_RMR_BASE;
459         else
460             r |= EMAC_RMR_BASE;
461
462         if (ndev->flags & IFF_PROMISC)
463                 r |= EMAC_RMR_PME;
464         else if (ndev->flags & IFF_ALLMULTI ||
465                          (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
466                 r |= EMAC_RMR_PMME;
467         else if (!netdev_mc_empty(ndev))
468                 r |= EMAC_RMR_MAE;
469
470         if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
471                 r &= ~EMAC4_RMR_MJS_MASK;
472                 r |= EMAC4_RMR_MJS(ndev->mtu);
473         }
474
475         return r;
476 }
477
478 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
479 {
480         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
481
482         DBG2(dev, "__emac_calc_base_mr1" NL);
483
484         switch(tx_size) {
485         case 2048:
486                 ret |= EMAC_MR1_TFS_2K;
487                 break;
488         default:
489                 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
490                        dev->ndev->name, tx_size);
491         }
492
493         switch(rx_size) {
494         case 16384:
495                 ret |= EMAC_MR1_RFS_16K;
496                 break;
497         case 4096:
498                 ret |= EMAC_MR1_RFS_4K;
499                 break;
500         default:
501                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
502                        dev->ndev->name, rx_size);
503         }
504
505         return ret;
506 }
507
508 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
509 {
510         u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
511                 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
512
513         DBG2(dev, "__emac4_calc_base_mr1" NL);
514
515         switch(tx_size) {
516         case 16384:
517                 ret |= EMAC4_MR1_TFS_16K;
518                 break;
519         case 4096:
520                 ret |= EMAC4_MR1_TFS_4K;
521                 break;
522         case 2048:
523                 ret |= EMAC4_MR1_TFS_2K;
524                 break;
525         default:
526                 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
527                        dev->ndev->name, tx_size);
528         }
529
530         switch(rx_size) {
531         case 16384:
532                 ret |= EMAC4_MR1_RFS_16K;
533                 break;
534         case 4096:
535                 ret |= EMAC4_MR1_RFS_4K;
536                 break;
537         case 2048:
538                 ret |= EMAC4_MR1_RFS_2K;
539                 break;
540         default:
541                 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
542                        dev->ndev->name, rx_size);
543         }
544
545         return ret;
546 }
547
548 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
549 {
550         return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
551                 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
552                 __emac_calc_base_mr1(dev, tx_size, rx_size);
553 }
554
555 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
556 {
557         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
558                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
559         else
560                 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
561 }
562
563 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
564                                  unsigned int low, unsigned int high)
565 {
566         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
567                 return (low << 22) | ( (high & 0x3ff) << 6);
568         else
569                 return (low << 23) | ( (high & 0x1ff) << 7);
570 }
571
572 static int emac_configure(struct emac_instance *dev)
573 {
574         struct emac_regs __iomem *p = dev->emacp;
575         struct net_device *ndev = dev->ndev;
576         int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
577         u32 r, mr1 = 0;
578
579         DBG(dev, "configure" NL);
580
581         if (!link) {
582                 out_be32(&p->mr1, in_be32(&p->mr1)
583                          | EMAC_MR1_FDE | EMAC_MR1_ILE);
584                 udelay(100);
585         } else if (emac_reset(dev) < 0)
586                 return -ETIMEDOUT;
587
588         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
589                 tah_reset(dev->tah_dev);
590
591         DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
592             link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
593
594         /* Default fifo sizes */
595         tx_size = dev->tx_fifo_size;
596         rx_size = dev->rx_fifo_size;
597
598         /* No link, force loopback */
599         if (!link)
600                 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
601
602         /* Check for full duplex */
603         else if (dev->phy.duplex == DUPLEX_FULL)
604                 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
605
606         /* Adjust fifo sizes, mr1 and timeouts based on link speed */
607         dev->stop_timeout = STOP_TIMEOUT_10;
608         switch (dev->phy.speed) {
609         case SPEED_1000:
610                 if (emac_phy_gpcs(dev->phy.mode)) {
611                         mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
612                                 (dev->phy.gpcs_address != 0xffffffff) ?
613                                  dev->phy.gpcs_address : dev->phy.address);
614
615                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
616                          * identify this GPCS PHY later.
617                          */
618                         out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
619                 } else
620                         mr1 |= EMAC_MR1_MF_1000;
621
622                 /* Extended fifo sizes */
623                 tx_size = dev->tx_fifo_size_gige;
624                 rx_size = dev->rx_fifo_size_gige;
625
626                 if (dev->ndev->mtu > ETH_DATA_LEN) {
627                         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
628                                 mr1 |= EMAC4_MR1_JPSM;
629                         else
630                                 mr1 |= EMAC_MR1_JPSM;
631                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
632                 } else
633                         dev->stop_timeout = STOP_TIMEOUT_1000;
634                 break;
635         case SPEED_100:
636                 mr1 |= EMAC_MR1_MF_100;
637                 dev->stop_timeout = STOP_TIMEOUT_100;
638                 break;
639         default: /* make gcc happy */
640                 break;
641         }
642
643         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
644                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
645                                 dev->phy.speed);
646         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
647                 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
648
649         /* on 40x erratum forces us to NOT use integrated flow control,
650          * let's hope it works on 44x ;)
651          */
652         if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
653             dev->phy.duplex == DUPLEX_FULL) {
654                 if (dev->phy.pause)
655                         mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
656                 else if (dev->phy.asym_pause)
657                         mr1 |= EMAC_MR1_APP;
658         }
659
660         /* Add base settings & fifo sizes & program MR1 */
661         mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
662         out_be32(&p->mr1, mr1);
663
664         /* Set individual MAC address */
665         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
666         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
667                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
668                  ndev->dev_addr[5]);
669
670         /* VLAN Tag Protocol ID */
671         out_be32(&p->vtpid, 0x8100);
672
673         /* Receive mode register */
674         r = emac_iff2rmr(ndev);
675         if (r & EMAC_RMR_MAE)
676                 emac_hash_mc(dev);
677         out_be32(&p->rmr, r);
678
679         /* FIFOs thresholds */
680         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
681                 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
682                                tx_size / 2 / dev->fifo_entry_size);
683         else
684                 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
685                               tx_size / 2 / dev->fifo_entry_size);
686         out_be32(&p->tmr1, r);
687         out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
688
689         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
690            there should be still enough space in FIFO to allow the our link
691            partner time to process this frame and also time to send PAUSE
692            frame itself.
693
694            Here is the worst case scenario for the RX FIFO "headroom"
695            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
696
697            1) One maximum-length frame on TX                    1522 bytes
698            2) One PAUSE frame time                                64 bytes
699            3) PAUSE frame decode time allowance                   64 bytes
700            4) One maximum-length frame on RX                    1522 bytes
701            5) Round-trip propagation delay of the link (100Mb)    15 bytes
702            ----------
703            3187 bytes
704
705            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
706            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
707          */
708         r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
709                            rx_size / 4 / dev->fifo_entry_size);
710         out_be32(&p->rwmr, r);
711
712         /* Set PAUSE timer to the maximum */
713         out_be32(&p->ptr, 0xffff);
714
715         /* IRQ sources */
716         r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
717                 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
718                 EMAC_ISR_IRE | EMAC_ISR_TE;
719         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
720             r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
721                                                   EMAC4_ISR_RXOE | */;
722         out_be32(&p->iser,  r);
723
724         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
725         if (emac_phy_gpcs(dev->phy.mode)) {
726                 if (dev->phy.gpcs_address != 0xffffffff)
727                         emac_mii_reset_gpcs(&dev->phy);
728                 else
729                         emac_mii_reset_phy(&dev->phy);
730         }
731
732         return 0;
733 }
734
735 static void emac_reinitialize(struct emac_instance *dev)
736 {
737         DBG(dev, "reinitialize" NL);
738
739         emac_netif_stop(dev);
740         if (!emac_configure(dev)) {
741                 emac_tx_enable(dev);
742                 emac_rx_enable(dev);
743         }
744         emac_netif_start(dev);
745 }
746
747 static void emac_full_tx_reset(struct emac_instance *dev)
748 {
749         DBG(dev, "full_tx_reset" NL);
750
751         emac_tx_disable(dev);
752         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
753         emac_clean_tx_ring(dev);
754         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
755
756         emac_configure(dev);
757
758         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
759         emac_tx_enable(dev);
760         emac_rx_enable(dev);
761 }
762
763 static void emac_reset_work(struct work_struct *work)
764 {
765         struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
766
767         DBG(dev, "reset_work" NL);
768
769         mutex_lock(&dev->link_lock);
770         if (dev->opened) {
771                 emac_netif_stop(dev);
772                 emac_full_tx_reset(dev);
773                 emac_netif_start(dev);
774         }
775         mutex_unlock(&dev->link_lock);
776 }
777
778 static void emac_tx_timeout(struct net_device *ndev)
779 {
780         struct emac_instance *dev = netdev_priv(ndev);
781
782         DBG(dev, "tx_timeout" NL);
783
784         schedule_work(&dev->reset_work);
785 }
786
787
788 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
789 {
790         int done = !!(stacr & EMAC_STACR_OC);
791
792         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
793                 done = !done;
794
795         return done;
796 };
797
798 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
799 {
800         struct emac_regs __iomem *p = dev->emacp;
801         u32 r = 0;
802         int n, err = -ETIMEDOUT;
803
804         mutex_lock(&dev->mdio_lock);
805
806         DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
807
808         /* Enable proper MDIO port */
809         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
810                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
811         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
812                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
813
814         /* Wait for management interface to become idle */
815         n = 20;
816         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
817                 udelay(1);
818                 if (!--n) {
819                         DBG2(dev, " -> timeout wait idle\n");
820                         goto bail;
821                 }
822         }
823
824         /* Issue read command */
825         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
826                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
827         else
828                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
829         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
830                 r |= EMAC_STACR_OC;
831         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
832                 r |= EMACX_STACR_STAC_READ;
833         else
834                 r |= EMAC_STACR_STAC_READ;
835         r |= (reg & EMAC_STACR_PRA_MASK)
836                 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
837         out_be32(&p->stacr, r);
838
839         /* Wait for read to complete */
840         n = 200;
841         while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
842                 udelay(1);
843                 if (!--n) {
844                         DBG2(dev, " -> timeout wait complete\n");
845                         goto bail;
846                 }
847         }
848
849         if (unlikely(r & EMAC_STACR_PHYE)) {
850                 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
851                 err = -EREMOTEIO;
852                 goto bail;
853         }
854
855         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
856
857         DBG2(dev, "mdio_read -> %04x" NL, r);
858         err = 0;
859  bail:
860         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
861                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
862         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
863                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
864         mutex_unlock(&dev->mdio_lock);
865
866         return err == 0 ? r : err;
867 }
868
869 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
870                               u16 val)
871 {
872         struct emac_regs __iomem *p = dev->emacp;
873         u32 r = 0;
874         int n, err = -ETIMEDOUT;
875
876         mutex_lock(&dev->mdio_lock);
877
878         DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
879
880         /* Enable proper MDIO port */
881         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
882                 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
883         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
884                 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
885
886         /* Wait for management interface to be idle */
887         n = 20;
888         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
889                 udelay(1);
890                 if (!--n) {
891                         DBG2(dev, " -> timeout wait idle\n");
892                         goto bail;
893                 }
894         }
895
896         /* Issue write command */
897         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
898                 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
899         else
900                 r = EMAC_STACR_BASE(dev->opb_bus_freq);
901         if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
902                 r |= EMAC_STACR_OC;
903         if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
904                 r |= EMACX_STACR_STAC_WRITE;
905         else
906                 r |= EMAC_STACR_STAC_WRITE;
907         r |= (reg & EMAC_STACR_PRA_MASK) |
908                 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
909                 (val << EMAC_STACR_PHYD_SHIFT);
910         out_be32(&p->stacr, r);
911
912         /* Wait for write to complete */
913         n = 200;
914         while (!emac_phy_done(dev, in_be32(&p->stacr))) {
915                 udelay(1);
916                 if (!--n) {
917                         DBG2(dev, " -> timeout wait complete\n");
918                         goto bail;
919                 }
920         }
921         err = 0;
922  bail:
923         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
924                 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
925         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
926                 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
927         mutex_unlock(&dev->mdio_lock);
928 }
929
930 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
931 {
932         struct emac_instance *dev = netdev_priv(ndev);
933         int res;
934
935         res = __emac_mdio_read((dev->mdio_instance &&
936                                 dev->phy.gpcs_address != id) ?
937                                 dev->mdio_instance : dev,
938                                (u8) id, (u8) reg);
939         return res;
940 }
941
942 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
943 {
944         struct emac_instance *dev = netdev_priv(ndev);
945
946         __emac_mdio_write((dev->mdio_instance &&
947                            dev->phy.gpcs_address != id) ?
948                            dev->mdio_instance : dev,
949                           (u8) id, (u8) reg, (u16) val);
950 }
951
952 /* Tx lock BH */
953 static void __emac_set_multicast_list(struct emac_instance *dev)
954 {
955         struct emac_regs __iomem *p = dev->emacp;
956         u32 rmr = emac_iff2rmr(dev->ndev);
957
958         DBG(dev, "__multicast %08x" NL, rmr);
959
960         /* I decided to relax register access rules here to avoid
961          * full EMAC reset.
962          *
963          * There is a real problem with EMAC4 core if we use MWSW_001 bit
964          * in MR1 register and do a full EMAC reset.
965          * One TX BD status update is delayed and, after EMAC reset, it
966          * never happens, resulting in TX hung (it'll be recovered by TX
967          * timeout handler eventually, but this is just gross).
968          * So we either have to do full TX reset or try to cheat here :)
969          *
970          * The only required change is to RX mode register, so I *think* all
971          * we need is just to stop RX channel. This seems to work on all
972          * tested SoCs.                                                --ebs
973          *
974          * If we need the full reset, we might just trigger the workqueue
975          * and do it async... a bit nasty but should work --BenH
976          */
977         dev->mcast_pending = 0;
978         emac_rx_disable(dev);
979         if (rmr & EMAC_RMR_MAE)
980                 emac_hash_mc(dev);
981         out_be32(&p->rmr, rmr);
982         emac_rx_enable(dev);
983 }
984
985 /* Tx lock BH */
986 static void emac_set_multicast_list(struct net_device *ndev)
987 {
988         struct emac_instance *dev = netdev_priv(ndev);
989
990         DBG(dev, "multicast" NL);
991
992         BUG_ON(!netif_running(dev->ndev));
993
994         if (dev->no_mcast) {
995                 dev->mcast_pending = 1;
996                 return;
997         }
998         __emac_set_multicast_list(dev);
999 }
1000
1001 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
1002 {
1003         int rx_sync_size = emac_rx_sync_size(new_mtu);
1004         int rx_skb_size = emac_rx_skb_size(new_mtu);
1005         int i, ret = 0;
1006         int mr1_jumbo_bit_change = 0;
1007
1008         mutex_lock(&dev->link_lock);
1009         emac_netif_stop(dev);
1010         emac_rx_disable(dev);
1011         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1012
1013         if (dev->rx_sg_skb) {
1014                 ++dev->estats.rx_dropped_resize;
1015                 dev_kfree_skb(dev->rx_sg_skb);
1016                 dev->rx_sg_skb = NULL;
1017         }
1018
1019         /* Make a first pass over RX ring and mark BDs ready, dropping
1020          * non-processed packets on the way. We need this as a separate pass
1021          * to simplify error recovery in the case of allocation failure later.
1022          */
1023         for (i = 0; i < NUM_RX_BUFF; ++i) {
1024                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1025                         ++dev->estats.rx_dropped_resize;
1026
1027                 dev->rx_desc[i].data_len = 0;
1028                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1029                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1030         }
1031
1032         /* Reallocate RX ring only if bigger skb buffers are required */
1033         if (rx_skb_size <= dev->rx_skb_size)
1034                 goto skip;
1035
1036         /* Second pass, allocate new skbs */
1037         for (i = 0; i < NUM_RX_BUFF; ++i) {
1038                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1039                 if (!skb) {
1040                         ret = -ENOMEM;
1041                         goto oom;
1042                 }
1043
1044                 BUG_ON(!dev->rx_skb[i]);
1045                 dev_kfree_skb(dev->rx_skb[i]);
1046
1047                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1048                 dev->rx_desc[i].data_ptr =
1049                     dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1050                                    DMA_FROM_DEVICE) + 2;
1051                 dev->rx_skb[i] = skb;
1052         }
1053  skip:
1054         /* Check if we need to change "Jumbo" bit in MR1 */
1055         if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1056                 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1057                                 (dev->ndev->mtu > ETH_DATA_LEN);
1058         } else {
1059                 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1060                                 (dev->ndev->mtu > ETH_DATA_LEN);
1061         }
1062
1063         if (mr1_jumbo_bit_change) {
1064                 /* This is to prevent starting RX channel in emac_rx_enable() */
1065                 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1066
1067                 dev->ndev->mtu = new_mtu;
1068                 emac_full_tx_reset(dev);
1069         }
1070
1071         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1072  oom:
1073         /* Restart RX */
1074         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1075         dev->rx_slot = 0;
1076         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1077         emac_rx_enable(dev);
1078         emac_netif_start(dev);
1079         mutex_unlock(&dev->link_lock);
1080
1081         return ret;
1082 }
1083
1084 /* Process ctx, rtnl_lock semaphore */
1085 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1086 {
1087         struct emac_instance *dev = netdev_priv(ndev);
1088         int ret = 0;
1089
1090         if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1091                 return -EINVAL;
1092
1093         DBG(dev, "change_mtu(%d)" NL, new_mtu);
1094
1095         if (netif_running(ndev)) {
1096                 /* Check if we really need to reinitialize RX ring */
1097                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1098                         ret = emac_resize_rx_ring(dev, new_mtu);
1099         }
1100
1101         if (!ret) {
1102                 ndev->mtu = new_mtu;
1103                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1104                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1105         }
1106
1107         return ret;
1108 }
1109
1110 static void emac_clean_tx_ring(struct emac_instance *dev)
1111 {
1112         int i;
1113
1114         for (i = 0; i < NUM_TX_BUFF; ++i) {
1115                 if (dev->tx_skb[i]) {
1116                         dev_kfree_skb(dev->tx_skb[i]);
1117                         dev->tx_skb[i] = NULL;
1118                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1119                                 ++dev->estats.tx_dropped;
1120                 }
1121                 dev->tx_desc[i].ctrl = 0;
1122                 dev->tx_desc[i].data_ptr = 0;
1123         }
1124 }
1125
1126 static void emac_clean_rx_ring(struct emac_instance *dev)
1127 {
1128         int i;
1129
1130         for (i = 0; i < NUM_RX_BUFF; ++i)
1131                 if (dev->rx_skb[i]) {
1132                         dev->rx_desc[i].ctrl = 0;
1133                         dev_kfree_skb(dev->rx_skb[i]);
1134                         dev->rx_skb[i] = NULL;
1135                         dev->rx_desc[i].data_ptr = 0;
1136                 }
1137
1138         if (dev->rx_sg_skb) {
1139                 dev_kfree_skb(dev->rx_sg_skb);
1140                 dev->rx_sg_skb = NULL;
1141         }
1142 }
1143
1144 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1145                                     gfp_t flags)
1146 {
1147         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1148         if (unlikely(!skb))
1149                 return -ENOMEM;
1150
1151         dev->rx_skb[slot] = skb;
1152         dev->rx_desc[slot].data_len = 0;
1153
1154         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1155         dev->rx_desc[slot].data_ptr =
1156             dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1157                            DMA_FROM_DEVICE) + 2;
1158         wmb();
1159         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1160             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1161
1162         return 0;
1163 }
1164
1165 static void emac_print_link_status(struct emac_instance *dev)
1166 {
1167         if (netif_carrier_ok(dev->ndev))
1168                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1169                        dev->ndev->name, dev->phy.speed,
1170                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1171                        dev->phy.pause ? ", pause enabled" :
1172                        dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1173         else
1174                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1175 }
1176
1177 /* Process ctx, rtnl_lock semaphore */
1178 static int emac_open(struct net_device *ndev)
1179 {
1180         struct emac_instance *dev = netdev_priv(ndev);
1181         int err, i;
1182
1183         DBG(dev, "open" NL);
1184
1185         /* Setup error IRQ handler */
1186         err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1187         if (err) {
1188                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1189                        ndev->name, dev->emac_irq);
1190                 return err;
1191         }
1192
1193         /* Allocate RX ring */
1194         for (i = 0; i < NUM_RX_BUFF; ++i)
1195                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1196                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
1197                                ndev->name);
1198                         goto oom;
1199                 }
1200
1201         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1202         clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1203         dev->rx_sg_skb = NULL;
1204
1205         mutex_lock(&dev->link_lock);
1206         dev->opened = 1;
1207
1208         /* Start PHY polling now.
1209          */
1210         if (dev->phy.address >= 0) {
1211                 int link_poll_interval;
1212                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1213                         dev->phy.def->ops->read_link(&dev->phy);
1214                         emac_rx_clk_default(dev);
1215                         netif_carrier_on(dev->ndev);
1216                         link_poll_interval = PHY_POLL_LINK_ON;
1217                 } else {
1218                         emac_rx_clk_tx(dev);
1219                         netif_carrier_off(dev->ndev);
1220                         link_poll_interval = PHY_POLL_LINK_OFF;
1221                 }
1222                 dev->link_polling = 1;
1223                 wmb();
1224                 schedule_delayed_work(&dev->link_work, link_poll_interval);
1225                 emac_print_link_status(dev);
1226         } else
1227                 netif_carrier_on(dev->ndev);
1228
1229         /* Required for Pause packet support in EMAC */
1230         dev_mc_add_global(ndev, default_mcast_addr);
1231
1232         emac_configure(dev);
1233         mal_poll_add(dev->mal, &dev->commac);
1234         mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1235         mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1236         mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1237         emac_tx_enable(dev);
1238         emac_rx_enable(dev);
1239         emac_netif_start(dev);
1240
1241         mutex_unlock(&dev->link_lock);
1242
1243         return 0;
1244  oom:
1245         emac_clean_rx_ring(dev);
1246         free_irq(dev->emac_irq, dev);
1247
1248         return -ENOMEM;
1249 }
1250
1251 /* BHs disabled */
1252 #if 0
1253 static int emac_link_differs(struct emac_instance *dev)
1254 {
1255         u32 r = in_be32(&dev->emacp->mr1);
1256
1257         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1258         int speed, pause, asym_pause;
1259
1260         if (r & EMAC_MR1_MF_1000)
1261                 speed = SPEED_1000;
1262         else if (r & EMAC_MR1_MF_100)
1263                 speed = SPEED_100;
1264         else
1265                 speed = SPEED_10;
1266
1267         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1268         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1269                 pause = 1;
1270                 asym_pause = 0;
1271                 break;
1272         case EMAC_MR1_APP:
1273                 pause = 0;
1274                 asym_pause = 1;
1275                 break;
1276         default:
1277                 pause = asym_pause = 0;
1278         }
1279         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1280             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1281 }
1282 #endif
1283
1284 static void emac_link_timer(struct work_struct *work)
1285 {
1286         struct emac_instance *dev =
1287                 container_of(to_delayed_work(work),
1288                              struct emac_instance, link_work);
1289         int link_poll_interval;
1290
1291         mutex_lock(&dev->link_lock);
1292         DBG2(dev, "link timer" NL);
1293
1294         if (!dev->opened)
1295                 goto bail;
1296
1297         if (dev->phy.def->ops->poll_link(&dev->phy)) {
1298                 if (!netif_carrier_ok(dev->ndev)) {
1299                         emac_rx_clk_default(dev);
1300                         /* Get new link parameters */
1301                         dev->phy.def->ops->read_link(&dev->phy);
1302
1303                         netif_carrier_on(dev->ndev);
1304                         emac_netif_stop(dev);
1305                         emac_full_tx_reset(dev);
1306                         emac_netif_start(dev);
1307                         emac_print_link_status(dev);
1308                 }
1309                 link_poll_interval = PHY_POLL_LINK_ON;
1310         } else {
1311                 if (netif_carrier_ok(dev->ndev)) {
1312                         emac_rx_clk_tx(dev);
1313                         netif_carrier_off(dev->ndev);
1314                         netif_tx_disable(dev->ndev);
1315                         emac_reinitialize(dev);
1316                         emac_print_link_status(dev);
1317                 }
1318                 link_poll_interval = PHY_POLL_LINK_OFF;
1319         }
1320         schedule_delayed_work(&dev->link_work, link_poll_interval);
1321  bail:
1322         mutex_unlock(&dev->link_lock);
1323 }
1324
1325 static void emac_force_link_update(struct emac_instance *dev)
1326 {
1327         netif_carrier_off(dev->ndev);
1328         smp_rmb();
1329         if (dev->link_polling) {
1330                 cancel_delayed_work_sync(&dev->link_work);
1331                 if (dev->link_polling)
1332                         schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
1333         }
1334 }
1335
1336 /* Process ctx, rtnl_lock semaphore */
1337 static int emac_close(struct net_device *ndev)
1338 {
1339         struct emac_instance *dev = netdev_priv(ndev);
1340
1341         DBG(dev, "close" NL);
1342
1343         if (dev->phy.address >= 0) {
1344                 dev->link_polling = 0;
1345                 cancel_delayed_work_sync(&dev->link_work);
1346         }
1347         mutex_lock(&dev->link_lock);
1348         emac_netif_stop(dev);
1349         dev->opened = 0;
1350         mutex_unlock(&dev->link_lock);
1351
1352         emac_rx_disable(dev);
1353         emac_tx_disable(dev);
1354         mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1355         mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1356         mal_poll_del(dev->mal, &dev->commac);
1357
1358         emac_clean_tx_ring(dev);
1359         emac_clean_rx_ring(dev);
1360
1361         free_irq(dev->emac_irq, dev);
1362
1363         netif_carrier_off(ndev);
1364
1365         return 0;
1366 }
1367
1368 static inline u16 emac_tx_csum(struct emac_instance *dev,
1369                                struct sk_buff *skb)
1370 {
1371         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1372                 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1373                 ++dev->stats.tx_packets_csum;
1374                 return EMAC_TX_CTRL_TAH_CSUM;
1375         }
1376         return 0;
1377 }
1378
1379 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1380 {
1381         struct emac_regs __iomem *p = dev->emacp;
1382         struct net_device *ndev = dev->ndev;
1383
1384         /* Send the packet out. If the if makes a significant perf
1385          * difference, then we can store the TMR0 value in "dev"
1386          * instead
1387          */
1388         if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1389                 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1390         else
1391                 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1392
1393         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1394                 netif_stop_queue(ndev);
1395                 DBG2(dev, "stopped TX queue" NL);
1396         }
1397
1398         ndev->trans_start = jiffies;
1399         ++dev->stats.tx_packets;
1400         dev->stats.tx_bytes += len;
1401
1402         return NETDEV_TX_OK;
1403 }
1404
1405 /* Tx lock BH */
1406 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1407 {
1408         struct emac_instance *dev = netdev_priv(ndev);
1409         unsigned int len = skb->len;
1410         int slot;
1411
1412         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1413             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1414
1415         slot = dev->tx_slot++;
1416         if (dev->tx_slot == NUM_TX_BUFF) {
1417                 dev->tx_slot = 0;
1418                 ctrl |= MAL_TX_CTRL_WRAP;
1419         }
1420
1421         DBG2(dev, "xmit(%u) %d" NL, len, slot);
1422
1423         dev->tx_skb[slot] = skb;
1424         dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1425                                                      skb->data, len,
1426                                                      DMA_TO_DEVICE);
1427         dev->tx_desc[slot].data_len = (u16) len;
1428         wmb();
1429         dev->tx_desc[slot].ctrl = ctrl;
1430
1431         return emac_xmit_finish(dev, len);
1432 }
1433
1434 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1435                                   u32 pd, int len, int last, u16 base_ctrl)
1436 {
1437         while (1) {
1438                 u16 ctrl = base_ctrl;
1439                 int chunk = min(len, MAL_MAX_TX_SIZE);
1440                 len -= chunk;
1441
1442                 slot = (slot + 1) % NUM_TX_BUFF;
1443
1444                 if (last && !len)
1445                         ctrl |= MAL_TX_CTRL_LAST;
1446                 if (slot == NUM_TX_BUFF - 1)
1447                         ctrl |= MAL_TX_CTRL_WRAP;
1448
1449                 dev->tx_skb[slot] = NULL;
1450                 dev->tx_desc[slot].data_ptr = pd;
1451                 dev->tx_desc[slot].data_len = (u16) chunk;
1452                 dev->tx_desc[slot].ctrl = ctrl;
1453                 ++dev->tx_cnt;
1454
1455                 if (!len)
1456                         break;
1457
1458                 pd += chunk;
1459         }
1460         return slot;
1461 }
1462
1463 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1464 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1465 {
1466         struct emac_instance *dev = netdev_priv(ndev);
1467         int nr_frags = skb_shinfo(skb)->nr_frags;
1468         int len = skb->len, chunk;
1469         int slot, i;
1470         u16 ctrl;
1471         u32 pd;
1472
1473         /* This is common "fast" path */
1474         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1475                 return emac_start_xmit(skb, ndev);
1476
1477         len -= skb->data_len;
1478
1479         /* Note, this is only an *estimation*, we can still run out of empty
1480          * slots because of the additional fragmentation into
1481          * MAL_MAX_TX_SIZE-sized chunks
1482          */
1483         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1484                 goto stop_queue;
1485
1486         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1487             emac_tx_csum(dev, skb);
1488         slot = dev->tx_slot;
1489
1490         /* skb data */
1491         dev->tx_skb[slot] = NULL;
1492         chunk = min(len, MAL_MAX_TX_SIZE);
1493         dev->tx_desc[slot].data_ptr = pd =
1494             dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1495         dev->tx_desc[slot].data_len = (u16) chunk;
1496         len -= chunk;
1497         if (unlikely(len))
1498                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1499                                        ctrl);
1500         /* skb fragments */
1501         for (i = 0; i < nr_frags; ++i) {
1502                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1503                 len = skb_frag_size(frag);
1504
1505                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1506                         goto undo_frame;
1507
1508                 pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1509                                       DMA_TO_DEVICE);
1510
1511                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1512                                        ctrl);
1513         }
1514
1515         DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1516
1517         /* Attach skb to the last slot so we don't release it too early */
1518         dev->tx_skb[slot] = skb;
1519
1520         /* Send the packet out */
1521         if (dev->tx_slot == NUM_TX_BUFF - 1)
1522                 ctrl |= MAL_TX_CTRL_WRAP;
1523         wmb();
1524         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1525         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1526
1527         return emac_xmit_finish(dev, skb->len);
1528
1529  undo_frame:
1530         /* Well, too bad. Our previous estimation was overly optimistic.
1531          * Undo everything.
1532          */
1533         while (slot != dev->tx_slot) {
1534                 dev->tx_desc[slot].ctrl = 0;
1535                 --dev->tx_cnt;
1536                 if (--slot < 0)
1537                         slot = NUM_TX_BUFF - 1;
1538         }
1539         ++dev->estats.tx_undo;
1540
1541  stop_queue:
1542         netif_stop_queue(ndev);
1543         DBG2(dev, "stopped TX queue" NL);
1544         return NETDEV_TX_BUSY;
1545 }
1546
1547 /* Tx lock BHs */
1548 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1549 {
1550         struct emac_error_stats *st = &dev->estats;
1551
1552         DBG(dev, "BD TX error %04x" NL, ctrl);
1553
1554         ++st->tx_bd_errors;
1555         if (ctrl & EMAC_TX_ST_BFCS)
1556                 ++st->tx_bd_bad_fcs;
1557         if (ctrl & EMAC_TX_ST_LCS)
1558                 ++st->tx_bd_carrier_loss;
1559         if (ctrl & EMAC_TX_ST_ED)
1560                 ++st->tx_bd_excessive_deferral;
1561         if (ctrl & EMAC_TX_ST_EC)
1562                 ++st->tx_bd_excessive_collisions;
1563         if (ctrl & EMAC_TX_ST_LC)
1564                 ++st->tx_bd_late_collision;
1565         if (ctrl & EMAC_TX_ST_MC)
1566                 ++st->tx_bd_multple_collisions;
1567         if (ctrl & EMAC_TX_ST_SC)
1568                 ++st->tx_bd_single_collision;
1569         if (ctrl & EMAC_TX_ST_UR)
1570                 ++st->tx_bd_underrun;
1571         if (ctrl & EMAC_TX_ST_SQE)
1572                 ++st->tx_bd_sqe;
1573 }
1574
1575 static void emac_poll_tx(void *param)
1576 {
1577         struct emac_instance *dev = param;
1578         u32 bad_mask;
1579
1580         DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1581
1582         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1583                 bad_mask = EMAC_IS_BAD_TX_TAH;
1584         else
1585                 bad_mask = EMAC_IS_BAD_TX;
1586
1587         netif_tx_lock_bh(dev->ndev);
1588         if (dev->tx_cnt) {
1589                 u16 ctrl;
1590                 int slot = dev->ack_slot, n = 0;
1591         again:
1592                 ctrl = dev->tx_desc[slot].ctrl;
1593                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1594                         struct sk_buff *skb = dev->tx_skb[slot];
1595                         ++n;
1596
1597                         if (skb) {
1598                                 dev_kfree_skb(skb);
1599                                 dev->tx_skb[slot] = NULL;
1600                         }
1601                         slot = (slot + 1) % NUM_TX_BUFF;
1602
1603                         if (unlikely(ctrl & bad_mask))
1604                                 emac_parse_tx_error(dev, ctrl);
1605
1606                         if (--dev->tx_cnt)
1607                                 goto again;
1608                 }
1609                 if (n) {
1610                         dev->ack_slot = slot;
1611                         if (netif_queue_stopped(dev->ndev) &&
1612                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1613                                 netif_wake_queue(dev->ndev);
1614
1615                         DBG2(dev, "tx %d pkts" NL, n);
1616                 }
1617         }
1618         netif_tx_unlock_bh(dev->ndev);
1619 }
1620
1621 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1622                                        int len)
1623 {
1624         struct sk_buff *skb = dev->rx_skb[slot];
1625
1626         DBG2(dev, "recycle %d %d" NL, slot, len);
1627
1628         if (len)
1629                 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1630                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1631
1632         dev->rx_desc[slot].data_len = 0;
1633         wmb();
1634         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1635             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1636 }
1637
1638 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1639 {
1640         struct emac_error_stats *st = &dev->estats;
1641
1642         DBG(dev, "BD RX error %04x" NL, ctrl);
1643
1644         ++st->rx_bd_errors;
1645         if (ctrl & EMAC_RX_ST_OE)
1646                 ++st->rx_bd_overrun;
1647         if (ctrl & EMAC_RX_ST_BP)
1648                 ++st->rx_bd_bad_packet;
1649         if (ctrl & EMAC_RX_ST_RP)
1650                 ++st->rx_bd_runt_packet;
1651         if (ctrl & EMAC_RX_ST_SE)
1652                 ++st->rx_bd_short_event;
1653         if (ctrl & EMAC_RX_ST_AE)
1654                 ++st->rx_bd_alignment_error;
1655         if (ctrl & EMAC_RX_ST_BFCS)
1656                 ++st->rx_bd_bad_fcs;
1657         if (ctrl & EMAC_RX_ST_PTL)
1658                 ++st->rx_bd_packet_too_long;
1659         if (ctrl & EMAC_RX_ST_ORE)
1660                 ++st->rx_bd_out_of_range;
1661         if (ctrl & EMAC_RX_ST_IRE)
1662                 ++st->rx_bd_in_range;
1663 }
1664
1665 static inline void emac_rx_csum(struct emac_instance *dev,
1666                                 struct sk_buff *skb, u16 ctrl)
1667 {
1668 #ifdef CONFIG_IBM_EMAC_TAH
1669         if (!ctrl && dev->tah_dev) {
1670                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1671                 ++dev->stats.rx_packets_csum;
1672         }
1673 #endif
1674 }
1675
1676 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1677 {
1678         if (likely(dev->rx_sg_skb != NULL)) {
1679                 int len = dev->rx_desc[slot].data_len;
1680                 int tot_len = dev->rx_sg_skb->len + len;
1681
1682                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1683                         ++dev->estats.rx_dropped_mtu;
1684                         dev_kfree_skb(dev->rx_sg_skb);
1685                         dev->rx_sg_skb = NULL;
1686                 } else {
1687                         memcpy(skb_tail_pointer(dev->rx_sg_skb),
1688                                          dev->rx_skb[slot]->data, len);
1689                         skb_put(dev->rx_sg_skb, len);
1690                         emac_recycle_rx_skb(dev, slot, len);
1691                         return 0;
1692                 }
1693         }
1694         emac_recycle_rx_skb(dev, slot, 0);
1695         return -1;
1696 }
1697
1698 /* NAPI poll context */
1699 static int emac_poll_rx(void *param, int budget)
1700 {
1701         struct emac_instance *dev = param;
1702         int slot = dev->rx_slot, received = 0;
1703
1704         DBG2(dev, "poll_rx(%d)" NL, budget);
1705
1706  again:
1707         while (budget > 0) {
1708                 int len;
1709                 struct sk_buff *skb;
1710                 u16 ctrl = dev->rx_desc[slot].ctrl;
1711
1712                 if (ctrl & MAL_RX_CTRL_EMPTY)
1713                         break;
1714
1715                 skb = dev->rx_skb[slot];
1716                 mb();
1717                 len = dev->rx_desc[slot].data_len;
1718
1719                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1720                         goto sg;
1721
1722                 ctrl &= EMAC_BAD_RX_MASK;
1723                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1724                         emac_parse_rx_error(dev, ctrl);
1725                         ++dev->estats.rx_dropped_error;
1726                         emac_recycle_rx_skb(dev, slot, 0);
1727                         len = 0;
1728                         goto next;
1729                 }
1730
1731                 if (len < ETH_HLEN) {
1732                         ++dev->estats.rx_dropped_stack;
1733                         emac_recycle_rx_skb(dev, slot, len);
1734                         goto next;
1735                 }
1736
1737                 if (len && len < EMAC_RX_COPY_THRESH) {
1738                         struct sk_buff *copy_skb =
1739                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1740                         if (unlikely(!copy_skb))
1741                                 goto oom;
1742
1743                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1744                         memcpy(copy_skb->data - 2, skb->data - 2, len + 2);
1745                         emac_recycle_rx_skb(dev, slot, len);
1746                         skb = copy_skb;
1747                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1748                         goto oom;
1749
1750                 skb_put(skb, len);
1751         push_packet:
1752                 skb->protocol = eth_type_trans(skb, dev->ndev);
1753                 emac_rx_csum(dev, skb, ctrl);
1754
1755                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1756                         ++dev->estats.rx_dropped_stack;
1757         next:
1758                 ++dev->stats.rx_packets;
1759         skip:
1760                 dev->stats.rx_bytes += len;
1761                 slot = (slot + 1) % NUM_RX_BUFF;
1762                 --budget;
1763                 ++received;
1764                 continue;
1765         sg:
1766                 if (ctrl & MAL_RX_CTRL_FIRST) {
1767                         BUG_ON(dev->rx_sg_skb);
1768                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1769                                 DBG(dev, "rx OOM %d" NL, slot);
1770                                 ++dev->estats.rx_dropped_oom;
1771                                 emac_recycle_rx_skb(dev, slot, 0);
1772                         } else {
1773                                 dev->rx_sg_skb = skb;
1774                                 skb_put(skb, len);
1775                         }
1776                 } else if (!emac_rx_sg_append(dev, slot) &&
1777                            (ctrl & MAL_RX_CTRL_LAST)) {
1778
1779                         skb = dev->rx_sg_skb;
1780                         dev->rx_sg_skb = NULL;
1781
1782                         ctrl &= EMAC_BAD_RX_MASK;
1783                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1784                                 emac_parse_rx_error(dev, ctrl);
1785                                 ++dev->estats.rx_dropped_error;
1786                                 dev_kfree_skb(skb);
1787                                 len = 0;
1788                         } else
1789                                 goto push_packet;
1790                 }
1791                 goto skip;
1792         oom:
1793                 DBG(dev, "rx OOM %d" NL, slot);
1794                 /* Drop the packet and recycle skb */
1795                 ++dev->estats.rx_dropped_oom;
1796                 emac_recycle_rx_skb(dev, slot, 0);
1797                 goto next;
1798         }
1799
1800         if (received) {
1801                 DBG2(dev, "rx %d BDs" NL, received);
1802                 dev->rx_slot = slot;
1803         }
1804
1805         if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1806                 mb();
1807                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1808                         DBG2(dev, "rx restart" NL);
1809                         received = 0;
1810                         goto again;
1811                 }
1812
1813                 if (dev->rx_sg_skb) {
1814                         DBG2(dev, "dropping partial rx packet" NL);
1815                         ++dev->estats.rx_dropped_error;
1816                         dev_kfree_skb(dev->rx_sg_skb);
1817                         dev->rx_sg_skb = NULL;
1818                 }
1819
1820                 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1821                 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1822                 emac_rx_enable(dev);
1823                 dev->rx_slot = 0;
1824         }
1825         return received;
1826 }
1827
1828 /* NAPI poll context */
1829 static int emac_peek_rx(void *param)
1830 {
1831         struct emac_instance *dev = param;
1832
1833         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1834 }
1835
1836 /* NAPI poll context */
1837 static int emac_peek_rx_sg(void *param)
1838 {
1839         struct emac_instance *dev = param;
1840
1841         int slot = dev->rx_slot;
1842         while (1) {
1843                 u16 ctrl = dev->rx_desc[slot].ctrl;
1844                 if (ctrl & MAL_RX_CTRL_EMPTY)
1845                         return 0;
1846                 else if (ctrl & MAL_RX_CTRL_LAST)
1847                         return 1;
1848
1849                 slot = (slot + 1) % NUM_RX_BUFF;
1850
1851                 /* I'm just being paranoid here :) */
1852                 if (unlikely(slot == dev->rx_slot))
1853                         return 0;
1854         }
1855 }
1856
1857 /* Hard IRQ */
1858 static void emac_rxde(void *param)
1859 {
1860         struct emac_instance *dev = param;
1861
1862         ++dev->estats.rx_stopped;
1863         emac_rx_disable_async(dev);
1864 }
1865
1866 /* Hard IRQ */
1867 static irqreturn_t emac_irq(int irq, void *dev_instance)
1868 {
1869         struct emac_instance *dev = dev_instance;
1870         struct emac_regs __iomem *p = dev->emacp;
1871         struct emac_error_stats *st = &dev->estats;
1872         u32 isr;
1873
1874         spin_lock(&dev->lock);
1875
1876         isr = in_be32(&p->isr);
1877         out_be32(&p->isr, isr);
1878
1879         DBG(dev, "isr = %08x" NL, isr);
1880
1881         if (isr & EMAC4_ISR_TXPE)
1882                 ++st->tx_parity;
1883         if (isr & EMAC4_ISR_RXPE)
1884                 ++st->rx_parity;
1885         if (isr & EMAC4_ISR_TXUE)
1886                 ++st->tx_underrun;
1887         if (isr & EMAC4_ISR_RXOE)
1888                 ++st->rx_fifo_overrun;
1889         if (isr & EMAC_ISR_OVR)
1890                 ++st->rx_overrun;
1891         if (isr & EMAC_ISR_BP)
1892                 ++st->rx_bad_packet;
1893         if (isr & EMAC_ISR_RP)
1894                 ++st->rx_runt_packet;
1895         if (isr & EMAC_ISR_SE)
1896                 ++st->rx_short_event;
1897         if (isr & EMAC_ISR_ALE)
1898                 ++st->rx_alignment_error;
1899         if (isr & EMAC_ISR_BFCS)
1900                 ++st->rx_bad_fcs;
1901         if (isr & EMAC_ISR_PTLE)
1902                 ++st->rx_packet_too_long;
1903         if (isr & EMAC_ISR_ORE)
1904                 ++st->rx_out_of_range;
1905         if (isr & EMAC_ISR_IRE)
1906                 ++st->rx_in_range;
1907         if (isr & EMAC_ISR_SQE)
1908                 ++st->tx_sqe;
1909         if (isr & EMAC_ISR_TE)
1910                 ++st->tx_errors;
1911
1912         spin_unlock(&dev->lock);
1913
1914         return IRQ_HANDLED;
1915 }
1916
1917 static struct net_device_stats *emac_stats(struct net_device *ndev)
1918 {
1919         struct emac_instance *dev = netdev_priv(ndev);
1920         struct emac_stats *st = &dev->stats;
1921         struct emac_error_stats *est = &dev->estats;
1922         struct net_device_stats *nst = &dev->nstats;
1923         unsigned long flags;
1924
1925         DBG2(dev, "stats" NL);
1926
1927         /* Compute "legacy" statistics */
1928         spin_lock_irqsave(&dev->lock, flags);
1929         nst->rx_packets = (unsigned long)st->rx_packets;
1930         nst->rx_bytes = (unsigned long)st->rx_bytes;
1931         nst->tx_packets = (unsigned long)st->tx_packets;
1932         nst->tx_bytes = (unsigned long)st->tx_bytes;
1933         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1934                                           est->rx_dropped_error +
1935                                           est->rx_dropped_resize +
1936                                           est->rx_dropped_mtu);
1937         nst->tx_dropped = (unsigned long)est->tx_dropped;
1938
1939         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1940         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1941                                               est->rx_fifo_overrun +
1942                                               est->rx_overrun);
1943         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1944                                                est->rx_alignment_error);
1945         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1946                                              est->rx_bad_fcs);
1947         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1948                                                 est->rx_bd_short_event +
1949                                                 est->rx_bd_packet_too_long +
1950                                                 est->rx_bd_out_of_range +
1951                                                 est->rx_bd_in_range +
1952                                                 est->rx_runt_packet +
1953                                                 est->rx_short_event +
1954                                                 est->rx_packet_too_long +
1955                                                 est->rx_out_of_range +
1956                                                 est->rx_in_range);
1957
1958         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1959         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1960                                               est->tx_underrun);
1961         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1962         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1963                                           est->tx_bd_excessive_collisions +
1964                                           est->tx_bd_late_collision +
1965                                           est->tx_bd_multple_collisions);
1966         spin_unlock_irqrestore(&dev->lock, flags);
1967         return nst;
1968 }
1969
1970 static struct mal_commac_ops emac_commac_ops = {
1971         .poll_tx = &emac_poll_tx,
1972         .poll_rx = &emac_poll_rx,
1973         .peek_rx = &emac_peek_rx,
1974         .rxde = &emac_rxde,
1975 };
1976
1977 static struct mal_commac_ops emac_commac_sg_ops = {
1978         .poll_tx = &emac_poll_tx,
1979         .poll_rx = &emac_poll_rx,
1980         .peek_rx = &emac_peek_rx_sg,
1981         .rxde = &emac_rxde,
1982 };
1983
1984 /* Ethtool support */
1985 static int emac_ethtool_get_settings(struct net_device *ndev,
1986                                      struct ethtool_cmd *cmd)
1987 {
1988         struct emac_instance *dev = netdev_priv(ndev);
1989
1990         cmd->supported = dev->phy.features;
1991         cmd->port = PORT_MII;
1992         cmd->phy_address = dev->phy.address;
1993         cmd->transceiver =
1994             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1995
1996         mutex_lock(&dev->link_lock);
1997         cmd->advertising = dev->phy.advertising;
1998         cmd->autoneg = dev->phy.autoneg;
1999         cmd->speed = dev->phy.speed;
2000         cmd->duplex = dev->phy.duplex;
2001         mutex_unlock(&dev->link_lock);
2002
2003         return 0;
2004 }
2005
2006 static int emac_ethtool_set_settings(struct net_device *ndev,
2007                                      struct ethtool_cmd *cmd)
2008 {
2009         struct emac_instance *dev = netdev_priv(ndev);
2010         u32 f = dev->phy.features;
2011
2012         DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
2013             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
2014
2015         /* Basic sanity checks */
2016         if (dev->phy.address < 0)
2017                 return -EOPNOTSUPP;
2018         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
2019                 return -EINVAL;
2020         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
2021                 return -EINVAL;
2022         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
2023                 return -EINVAL;
2024
2025         if (cmd->autoneg == AUTONEG_DISABLE) {
2026                 switch (cmd->speed) {
2027                 case SPEED_10:
2028                         if (cmd->duplex == DUPLEX_HALF &&
2029                             !(f & SUPPORTED_10baseT_Half))
2030                                 return -EINVAL;
2031                         if (cmd->duplex == DUPLEX_FULL &&
2032                             !(f & SUPPORTED_10baseT_Full))
2033                                 return -EINVAL;
2034                         break;
2035                 case SPEED_100:
2036                         if (cmd->duplex == DUPLEX_HALF &&
2037                             !(f & SUPPORTED_100baseT_Half))
2038                                 return -EINVAL;
2039                         if (cmd->duplex == DUPLEX_FULL &&
2040                             !(f & SUPPORTED_100baseT_Full))
2041                                 return -EINVAL;
2042                         break;
2043                 case SPEED_1000:
2044                         if (cmd->duplex == DUPLEX_HALF &&
2045                             !(f & SUPPORTED_1000baseT_Half))
2046                                 return -EINVAL;
2047                         if (cmd->duplex == DUPLEX_FULL &&
2048                             !(f & SUPPORTED_1000baseT_Full))
2049                                 return -EINVAL;
2050                         break;
2051                 default:
2052                         return -EINVAL;
2053                 }
2054
2055                 mutex_lock(&dev->link_lock);
2056                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2057                                                 cmd->duplex);
2058                 mutex_unlock(&dev->link_lock);
2059
2060         } else {
2061                 if (!(f & SUPPORTED_Autoneg))
2062                         return -EINVAL;
2063
2064                 mutex_lock(&dev->link_lock);
2065                 dev->phy.def->ops->setup_aneg(&dev->phy,
2066                                               (cmd->advertising & f) |
2067                                               (dev->phy.advertising &
2068                                                (ADVERTISED_Pause |
2069                                                 ADVERTISED_Asym_Pause)));
2070                 mutex_unlock(&dev->link_lock);
2071         }
2072         emac_force_link_update(dev);
2073
2074         return 0;
2075 }
2076
2077 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2078                                        struct ethtool_ringparam *rp)
2079 {
2080         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2081         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2082 }
2083
2084 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2085                                         struct ethtool_pauseparam *pp)
2086 {
2087         struct emac_instance *dev = netdev_priv(ndev);
2088
2089         mutex_lock(&dev->link_lock);
2090         if ((dev->phy.features & SUPPORTED_Autoneg) &&
2091             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2092                 pp->autoneg = 1;
2093
2094         if (dev->phy.duplex == DUPLEX_FULL) {
2095                 if (dev->phy.pause)
2096                         pp->rx_pause = pp->tx_pause = 1;
2097                 else if (dev->phy.asym_pause)
2098                         pp->tx_pause = 1;
2099         }
2100         mutex_unlock(&dev->link_lock);
2101 }
2102
2103 static int emac_get_regs_len(struct emac_instance *dev)
2104 {
2105                 return sizeof(struct emac_ethtool_regs_subhdr) +
2106                         sizeof(struct emac_regs);
2107 }
2108
2109 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2110 {
2111         struct emac_instance *dev = netdev_priv(ndev);
2112         int size;
2113
2114         size = sizeof(struct emac_ethtool_regs_hdr) +
2115                 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2116         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2117                 size += zmii_get_regs_len(dev->zmii_dev);
2118         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2119                 size += rgmii_get_regs_len(dev->rgmii_dev);
2120         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2121                 size += tah_get_regs_len(dev->tah_dev);
2122
2123         return size;
2124 }
2125
2126 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2127 {
2128         struct emac_ethtool_regs_subhdr *hdr = buf;
2129
2130         hdr->index = dev->cell_index;
2131         if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2132                 hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2133         } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2134                 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2135         } else {
2136                 hdr->version = EMAC_ETHTOOL_REGS_VER;
2137         }
2138         memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2139         return (void *)(hdr + 1) + sizeof(struct emac_regs);
2140 }
2141
2142 static void emac_ethtool_get_regs(struct net_device *ndev,
2143                                   struct ethtool_regs *regs, void *buf)
2144 {
2145         struct emac_instance *dev = netdev_priv(ndev);
2146         struct emac_ethtool_regs_hdr *hdr = buf;
2147
2148         hdr->components = 0;
2149         buf = hdr + 1;
2150
2151         buf = mal_dump_regs(dev->mal, buf);
2152         buf = emac_dump_regs(dev, buf);
2153         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2154                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2155                 buf = zmii_dump_regs(dev->zmii_dev, buf);
2156         }
2157         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2158                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2159                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2160         }
2161         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2162                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2163                 buf = tah_dump_regs(dev->tah_dev, buf);
2164         }
2165 }
2166
2167 static int emac_ethtool_nway_reset(struct net_device *ndev)
2168 {
2169         struct emac_instance *dev = netdev_priv(ndev);
2170         int res = 0;
2171
2172         DBG(dev, "nway_reset" NL);
2173
2174         if (dev->phy.address < 0)
2175                 return -EOPNOTSUPP;
2176
2177         mutex_lock(&dev->link_lock);
2178         if (!dev->phy.autoneg) {
2179                 res = -EINVAL;
2180                 goto out;
2181         }
2182
2183         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2184  out:
2185         mutex_unlock(&dev->link_lock);
2186         emac_force_link_update(dev);
2187         return res;
2188 }
2189
2190 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2191 {
2192         if (stringset == ETH_SS_STATS)
2193                 return EMAC_ETHTOOL_STATS_COUNT;
2194         else
2195                 return -EINVAL;
2196 }
2197
2198 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2199                                      u8 * buf)
2200 {
2201         if (stringset == ETH_SS_STATS)
2202                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2203 }
2204
2205 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2206                                            struct ethtool_stats *estats,
2207                                            u64 * tmp_stats)
2208 {
2209         struct emac_instance *dev = netdev_priv(ndev);
2210
2211         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2212         tmp_stats += sizeof(dev->stats) / sizeof(u64);
2213         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2214 }
2215
2216 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2217                                      struct ethtool_drvinfo *info)
2218 {
2219         struct emac_instance *dev = netdev_priv(ndev);
2220
2221         strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2222         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2223         snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
2224                  dev->cell_index, dev->ofdev->dev.of_node->full_name);
2225 }
2226
2227 static const struct ethtool_ops emac_ethtool_ops = {
2228         .get_settings = emac_ethtool_get_settings,
2229         .set_settings = emac_ethtool_set_settings,
2230         .get_drvinfo = emac_ethtool_get_drvinfo,
2231
2232         .get_regs_len = emac_ethtool_get_regs_len,
2233         .get_regs = emac_ethtool_get_regs,
2234
2235         .nway_reset = emac_ethtool_nway_reset,
2236
2237         .get_ringparam = emac_ethtool_get_ringparam,
2238         .get_pauseparam = emac_ethtool_get_pauseparam,
2239
2240         .get_strings = emac_ethtool_get_strings,
2241         .get_sset_count = emac_ethtool_get_sset_count,
2242         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2243
2244         .get_link = ethtool_op_get_link,
2245 };
2246
2247 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2248 {
2249         struct emac_instance *dev = netdev_priv(ndev);
2250         struct mii_ioctl_data *data = if_mii(rq);
2251
2252         DBG(dev, "ioctl %08x" NL, cmd);
2253
2254         if (dev->phy.address < 0)
2255                 return -EOPNOTSUPP;
2256
2257         switch (cmd) {
2258         case SIOCGMIIPHY:
2259                 data->phy_id = dev->phy.address;
2260                 /* Fall through */
2261         case SIOCGMIIREG:
2262                 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2263                                                data->reg_num);
2264                 return 0;
2265
2266         case SIOCSMIIREG:
2267                 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2268                                 data->val_in);
2269                 return 0;
2270         default:
2271                 return -EOPNOTSUPP;
2272         }
2273 }
2274
2275 struct emac_depentry {
2276         u32                     phandle;
2277         struct device_node      *node;
2278         struct platform_device  *ofdev;
2279         void                    *drvdata;
2280 };
2281
2282 #define EMAC_DEP_MAL_IDX        0
2283 #define EMAC_DEP_ZMII_IDX       1
2284 #define EMAC_DEP_RGMII_IDX      2
2285 #define EMAC_DEP_TAH_IDX        3
2286 #define EMAC_DEP_MDIO_IDX       4
2287 #define EMAC_DEP_PREV_IDX       5
2288 #define EMAC_DEP_COUNT          6
2289
2290 static int emac_check_deps(struct emac_instance *dev,
2291                            struct emac_depentry *deps)
2292 {
2293         int i, there = 0;
2294         struct device_node *np;
2295
2296         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2297                 /* no dependency on that item, allright */
2298                 if (deps[i].phandle == 0) {
2299                         there++;
2300                         continue;
2301                 }
2302                 /* special case for blist as the dependency might go away */
2303                 if (i == EMAC_DEP_PREV_IDX) {
2304                         np = *(dev->blist - 1);
2305                         if (np == NULL) {
2306                                 deps[i].phandle = 0;
2307                                 there++;
2308                                 continue;
2309                         }
2310                         if (deps[i].node == NULL)
2311                                 deps[i].node = of_node_get(np);
2312                 }
2313                 if (deps[i].node == NULL)
2314                         deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2315                 if (deps[i].node == NULL)
2316                         continue;
2317                 if (deps[i].ofdev == NULL)
2318                         deps[i].ofdev = of_find_device_by_node(deps[i].node);
2319                 if (deps[i].ofdev == NULL)
2320                         continue;
2321                 if (deps[i].drvdata == NULL)
2322                         deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2323                 if (deps[i].drvdata != NULL)
2324                         there++;
2325         }
2326         return there == EMAC_DEP_COUNT;
2327 }
2328
2329 static void emac_put_deps(struct emac_instance *dev)
2330 {
2331         of_dev_put(dev->mal_dev);
2332         of_dev_put(dev->zmii_dev);
2333         of_dev_put(dev->rgmii_dev);
2334         of_dev_put(dev->mdio_dev);
2335         of_dev_put(dev->tah_dev);
2336 }
2337
2338 static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2339                               void *data)
2340 {
2341         /* We are only intereted in device addition */
2342         if (action == BUS_NOTIFY_BOUND_DRIVER)
2343                 wake_up_all(&emac_probe_wait);
2344         return 0;
2345 }
2346
2347 static struct notifier_block emac_of_bus_notifier = {
2348         .notifier_call = emac_of_bus_notify
2349 };
2350
2351 static int emac_wait_deps(struct emac_instance *dev)
2352 {
2353         struct emac_depentry deps[EMAC_DEP_COUNT];
2354         int i, err;
2355
2356         memset(&deps, 0, sizeof(deps));
2357
2358         deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2359         deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2360         deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2361         if (dev->tah_ph)
2362                 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2363         if (dev->mdio_ph)
2364                 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2365         if (dev->blist && dev->blist > emac_boot_list)
2366                 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2367         bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2368         wait_event_timeout(emac_probe_wait,
2369                            emac_check_deps(dev, deps),
2370                            EMAC_PROBE_DEP_TIMEOUT);
2371         bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2372         err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2373         for (i = 0; i < EMAC_DEP_COUNT; i++) {
2374                 of_node_put(deps[i].node);
2375                 if (err)
2376                         of_dev_put(deps[i].ofdev);
2377         }
2378         if (err == 0) {
2379                 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2380                 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2381                 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2382                 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2383                 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2384         }
2385         of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2386         return err;
2387 }
2388
2389 static int emac_read_uint_prop(struct device_node *np, const char *name,
2390                                u32 *val, int fatal)
2391 {
2392         int len;
2393         const u32 *prop = of_get_property(np, name, &len);
2394         if (prop == NULL || len < sizeof(u32)) {
2395                 if (fatal)
2396                         printk(KERN_ERR "%s: missing %s property\n",
2397                                np->full_name, name);
2398                 return -ENODEV;
2399         }
2400         *val = *prop;
2401         return 0;
2402 }
2403
2404 static int emac_init_phy(struct emac_instance *dev)
2405 {
2406         struct device_node *np = dev->ofdev->dev.of_node;
2407         struct net_device *ndev = dev->ndev;
2408         u32 phy_map, adv;
2409         int i;
2410
2411         dev->phy.dev = ndev;
2412         dev->phy.mode = dev->phy_mode;
2413
2414         /* PHY-less configuration.
2415          * XXX I probably should move these settings to the dev tree
2416          */
2417         if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2418                 emac_reset(dev);
2419
2420                 /* PHY-less configuration.
2421                  * XXX I probably should move these settings to the dev tree
2422                  */
2423                 dev->phy.address = -1;
2424                 dev->phy.features = SUPPORTED_MII;
2425                 if (emac_phy_supports_gige(dev->phy_mode))
2426                         dev->phy.features |= SUPPORTED_1000baseT_Full;
2427                 else
2428                         dev->phy.features |= SUPPORTED_100baseT_Full;
2429                 dev->phy.pause = 1;
2430
2431                 return 0;
2432         }
2433
2434         mutex_lock(&emac_phy_map_lock);
2435         phy_map = dev->phy_map | busy_phy_map;
2436
2437         DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2438
2439         dev->phy.mdio_read = emac_mdio_read;
2440         dev->phy.mdio_write = emac_mdio_write;
2441
2442         /* Enable internal clock source */
2443 #ifdef CONFIG_PPC_DCR_NATIVE
2444         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2445                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2446 #endif
2447         /* PHY clock workaround */
2448         emac_rx_clk_tx(dev);
2449
2450         /* Enable internal clock source on 440GX*/
2451 #ifdef CONFIG_PPC_DCR_NATIVE
2452         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2453                 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2454 #endif
2455         /* Configure EMAC with defaults so we can at least use MDIO
2456          * This is needed mostly for 440GX
2457          */
2458         if (emac_phy_gpcs(dev->phy.mode)) {
2459                 /* XXX
2460                  * Make GPCS PHY address equal to EMAC index.
2461                  * We probably should take into account busy_phy_map
2462                  * and/or phy_map here.
2463                  *
2464                  * Note that the busy_phy_map is currently global
2465                  * while it should probably be per-ASIC...
2466                  */
2467                 dev->phy.gpcs_address = dev->gpcs_address;
2468                 if (dev->phy.gpcs_address == 0xffffffff)
2469                         dev->phy.address = dev->cell_index;
2470         }
2471
2472         emac_configure(dev);
2473
2474         if (dev->phy_address != 0xffffffff)
2475                 phy_map = ~(1 << dev->phy_address);
2476
2477         for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2478                 if (!(phy_map & 1)) {
2479                         int r;
2480                         busy_phy_map |= 1 << i;
2481
2482                         /* Quick check if there is a PHY at the address */
2483                         r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2484                         if (r == 0xffff || r < 0)
2485                                 continue;
2486                         if (!emac_mii_phy_probe(&dev->phy, i))
2487                                 break;
2488                 }
2489
2490         /* Enable external clock source */
2491 #ifdef CONFIG_PPC_DCR_NATIVE
2492         if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2493                 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2494 #endif
2495         mutex_unlock(&emac_phy_map_lock);
2496         if (i == 0x20) {
2497                 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2498                 return -ENXIO;
2499         }
2500
2501         /* Init PHY */
2502         if (dev->phy.def->ops->init)
2503                 dev->phy.def->ops->init(&dev->phy);
2504
2505         /* Disable any PHY features not supported by the platform */
2506         dev->phy.def->features &= ~dev->phy_feat_exc;
2507         dev->phy.features &= ~dev->phy_feat_exc;
2508
2509         /* Setup initial link parameters */
2510         if (dev->phy.features & SUPPORTED_Autoneg) {
2511                 adv = dev->phy.features;
2512                 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2513                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2514                 /* Restart autonegotiation */
2515                 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2516         } else {
2517                 u32 f = dev->phy.def->features;
2518                 int speed = SPEED_10, fd = DUPLEX_HALF;
2519
2520                 /* Select highest supported speed/duplex */
2521                 if (f & SUPPORTED_1000baseT_Full) {
2522                         speed = SPEED_1000;
2523                         fd = DUPLEX_FULL;
2524                 } else if (f & SUPPORTED_1000baseT_Half)
2525                         speed = SPEED_1000;
2526                 else if (f & SUPPORTED_100baseT_Full) {
2527                         speed = SPEED_100;
2528                         fd = DUPLEX_FULL;
2529                 } else if (f & SUPPORTED_100baseT_Half)
2530                         speed = SPEED_100;
2531                 else if (f & SUPPORTED_10baseT_Full)
2532                         fd = DUPLEX_FULL;
2533
2534                 /* Force link parameters */
2535                 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2536         }
2537         return 0;
2538 }
2539
2540 static int emac_init_config(struct emac_instance *dev)
2541 {
2542         struct device_node *np = dev->ofdev->dev.of_node;
2543         const void *p;
2544
2545         /* Read config from device-tree */
2546         if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2547                 return -ENXIO;
2548         if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2549                 return -ENXIO;
2550         if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2551                 return -ENXIO;
2552         if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2553                 return -ENXIO;
2554         if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2555                 dev->max_mtu = 1500;
2556         if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2557                 dev->rx_fifo_size = 2048;
2558         if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2559                 dev->tx_fifo_size = 2048;
2560         if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2561                 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2562         if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2563                 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2564         if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2565                 dev->phy_address = 0xffffffff;
2566         if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2567                 dev->phy_map = 0xffffffff;
2568         if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2569                 dev->gpcs_address = 0xffffffff;
2570         if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2571                 return -ENXIO;
2572         if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2573                 dev->tah_ph = 0;
2574         if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2575                 dev->tah_port = 0;
2576         if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2577                 dev->mdio_ph = 0;
2578         if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2579                 dev->zmii_ph = 0;
2580         if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2581                 dev->zmii_port = 0xffffffff;
2582         if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2583                 dev->rgmii_ph = 0;
2584         if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2585                 dev->rgmii_port = 0xffffffff;
2586         if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2587                 dev->fifo_entry_size = 16;
2588         if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2589                 dev->mal_burst_size = 256;
2590
2591         /* PHY mode needs some decoding */
2592         dev->phy_mode = of_get_phy_mode(np);
2593         if (dev->phy_mode < 0)
2594                 dev->phy_mode = PHY_MODE_NA;
2595
2596         /* Check EMAC version */
2597         if (of_device_is_compatible(np, "ibm,emac4sync")) {
2598                 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2599                 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2600                     of_device_is_compatible(np, "ibm,emac-460gt"))
2601                         dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2602                 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2603                     of_device_is_compatible(np, "ibm,emac-405exr"))
2604                         dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2605                 if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2606                         dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2607                                           EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2608                                           EMAC_FTR_460EX_PHY_CLK_FIX);
2609                 }
2610         } else if (of_device_is_compatible(np, "ibm,emac4")) {
2611                 dev->features |= EMAC_FTR_EMAC4;
2612                 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2613                         dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2614         } else {
2615                 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2616                     of_device_is_compatible(np, "ibm,emac-440gr"))
2617                         dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2618                 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2619 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2620                         dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2621 #else
2622                         printk(KERN_ERR "%s: Flow control not disabled!\n",
2623                                         np->full_name);
2624                         return -ENXIO;
2625 #endif
2626                 }
2627
2628         }
2629
2630         /* Fixup some feature bits based on the device tree */
2631         if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2632                 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2633         if (of_get_property(np, "has-new-stacr-staopc", NULL))
2634                 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2635
2636         /* CAB lacks the appropriate properties */
2637         if (of_device_is_compatible(np, "ibm,emac-axon"))
2638                 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2639                         EMAC_FTR_STACR_OC_INVERT;
2640
2641         /* Enable TAH/ZMII/RGMII features as found */
2642         if (dev->tah_ph != 0) {
2643 #ifdef CONFIG_IBM_EMAC_TAH
2644                 dev->features |= EMAC_FTR_HAS_TAH;
2645 #else
2646                 printk(KERN_ERR "%s: TAH support not enabled !\n",
2647                        np->full_name);
2648                 return -ENXIO;
2649 #endif
2650         }
2651
2652         if (dev->zmii_ph != 0) {
2653 #ifdef CONFIG_IBM_EMAC_ZMII
2654                 dev->features |= EMAC_FTR_HAS_ZMII;
2655 #else
2656                 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2657                        np->full_name);
2658                 return -ENXIO;
2659 #endif
2660         }
2661
2662         if (dev->rgmii_ph != 0) {
2663 #ifdef CONFIG_IBM_EMAC_RGMII
2664                 dev->features |= EMAC_FTR_HAS_RGMII;
2665 #else
2666                 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2667                        np->full_name);
2668                 return -ENXIO;
2669 #endif
2670         }
2671
2672         /* Read MAC-address */
2673         p = of_get_property(np, "local-mac-address", NULL);
2674         if (p == NULL) {
2675                 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2676                        np->full_name);
2677                 return -ENXIO;
2678         }
2679         memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
2680
2681         /* IAHT and GAHT filter parameterization */
2682         if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2683                 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2684                 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2685         } else {
2686                 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2687                 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2688         }
2689
2690         DBG(dev, "features     : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2691         DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2692         DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2693         DBG(dev, "max_mtu      : %d\n", dev->max_mtu);
2694         DBG(dev, "OPB freq     : %d\n", dev->opb_bus_freq);
2695
2696         return 0;
2697 }
2698
2699 static const struct net_device_ops emac_netdev_ops = {
2700         .ndo_open               = emac_open,
2701         .ndo_stop               = emac_close,
2702         .ndo_get_stats          = emac_stats,
2703         .ndo_set_rx_mode        = emac_set_multicast_list,
2704         .ndo_do_ioctl           = emac_ioctl,
2705         .ndo_tx_timeout         = emac_tx_timeout,
2706         .ndo_validate_addr      = eth_validate_addr,
2707         .ndo_set_mac_address    = eth_mac_addr,
2708         .ndo_start_xmit         = emac_start_xmit,
2709         .ndo_change_mtu         = eth_change_mtu,
2710 };
2711
2712 static const struct net_device_ops emac_gige_netdev_ops = {
2713         .ndo_open               = emac_open,
2714         .ndo_stop               = emac_close,
2715         .ndo_get_stats          = emac_stats,
2716         .ndo_set_rx_mode        = emac_set_multicast_list,
2717         .ndo_do_ioctl           = emac_ioctl,
2718         .ndo_tx_timeout         = emac_tx_timeout,
2719         .ndo_validate_addr      = eth_validate_addr,
2720         .ndo_set_mac_address    = eth_mac_addr,
2721         .ndo_start_xmit         = emac_start_xmit_sg,
2722         .ndo_change_mtu         = emac_change_mtu,
2723 };
2724
2725 static int emac_probe(struct platform_device *ofdev)
2726 {
2727         struct net_device *ndev;
2728         struct emac_instance *dev;
2729         struct device_node *np = ofdev->dev.of_node;
2730         struct device_node **blist = NULL;
2731         int err, i;
2732
2733         /* Skip unused/unwired EMACS.  We leave the check for an unused
2734          * property here for now, but new flat device trees should set a
2735          * status property to "disabled" instead.
2736          */
2737         if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2738                 return -ENODEV;
2739
2740         /* Find ourselves in the bootlist if we are there */
2741         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2742                 if (emac_boot_list[i] == np)
2743                         blist = &emac_boot_list[i];
2744
2745         /* Allocate our net_device structure */
2746         err = -ENOMEM;
2747         ndev = alloc_etherdev(sizeof(struct emac_instance));
2748         if (!ndev)
2749                 goto err_gone;
2750
2751         dev = netdev_priv(ndev);
2752         dev->ndev = ndev;
2753         dev->ofdev = ofdev;
2754         dev->blist = blist;
2755         SET_NETDEV_DEV(ndev, &ofdev->dev);
2756
2757         /* Initialize some embedded data structures */
2758         mutex_init(&dev->mdio_lock);
2759         mutex_init(&dev->link_lock);
2760         spin_lock_init(&dev->lock);
2761         INIT_WORK(&dev->reset_work, emac_reset_work);
2762
2763         /* Init various config data based on device-tree */
2764         err = emac_init_config(dev);
2765         if (err != 0)
2766                 goto err_free;
2767
2768         /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2769         dev->emac_irq = irq_of_parse_and_map(np, 0);
2770         dev->wol_irq = irq_of_parse_and_map(np, 1);
2771         if (dev->emac_irq == NO_IRQ) {
2772                 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2773                 goto err_free;
2774         }
2775         ndev->irq = dev->emac_irq;
2776
2777         /* Map EMAC regs */
2778         if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2779                 printk(KERN_ERR "%s: Can't get registers address\n",
2780                        np->full_name);
2781                 goto err_irq_unmap;
2782         }
2783         // TODO : request_mem_region
2784         dev->emacp = ioremap(dev->rsrc_regs.start,
2785                              resource_size(&dev->rsrc_regs));
2786         if (dev->emacp == NULL) {
2787                 printk(KERN_ERR "%s: Can't map device registers!\n",
2788                        np->full_name);
2789                 err = -ENOMEM;
2790                 goto err_irq_unmap;
2791         }
2792
2793         /* Wait for dependent devices */
2794         err = emac_wait_deps(dev);
2795         if (err) {
2796                 printk(KERN_ERR
2797                        "%s: Timeout waiting for dependent devices\n",
2798                        np->full_name);
2799                 /*  display more info about what's missing ? */
2800                 goto err_reg_unmap;
2801         }
2802         dev->mal = platform_get_drvdata(dev->mal_dev);
2803         if (dev->mdio_dev != NULL)
2804                 dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
2805
2806         /* Register with MAL */
2807         dev->commac.ops = &emac_commac_ops;
2808         dev->commac.dev = dev;
2809         dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2810         dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2811         err = mal_register_commac(dev->mal, &dev->commac);
2812         if (err) {
2813                 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2814                        np->full_name, dev->mal_dev->dev.of_node->full_name);
2815                 goto err_rel_deps;
2816         }
2817         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2818         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2819
2820         /* Get pointers to BD rings */
2821         dev->tx_desc =
2822             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2823         dev->rx_desc =
2824             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2825
2826         DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2827         DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2828
2829         /* Clean rings */
2830         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2831         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2832         memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2833         memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2834
2835         /* Attach to ZMII, if needed */
2836         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2837             (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2838                 goto err_unreg_commac;
2839
2840         /* Attach to RGMII, if needed */
2841         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2842             (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2843                 goto err_detach_zmii;
2844
2845         /* Attach to TAH, if needed */
2846         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2847             (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2848                 goto err_detach_rgmii;
2849
2850         /* Set some link defaults before we can find out real parameters */
2851         dev->phy.speed = SPEED_100;
2852         dev->phy.duplex = DUPLEX_FULL;
2853         dev->phy.autoneg = AUTONEG_DISABLE;
2854         dev->phy.pause = dev->phy.asym_pause = 0;
2855         dev->stop_timeout = STOP_TIMEOUT_100;
2856         INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2857
2858         /* Some SoCs like APM821xx does not support Half Duplex mode. */
2859         if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
2860                 dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
2861                                      SUPPORTED_100baseT_Half |
2862                                      SUPPORTED_10baseT_Half);
2863         }
2864
2865         /* Find PHY if any */
2866         err = emac_init_phy(dev);
2867         if (err != 0)
2868                 goto err_detach_tah;
2869
2870         if (dev->tah_dev) {
2871                 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
2872                 ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
2873         }
2874         ndev->watchdog_timeo = 5 * HZ;
2875         if (emac_phy_supports_gige(dev->phy_mode)) {
2876                 ndev->netdev_ops = &emac_gige_netdev_ops;
2877                 dev->commac.ops = &emac_commac_sg_ops;
2878         } else
2879                 ndev->netdev_ops = &emac_netdev_ops;
2880         ndev->ethtool_ops = &emac_ethtool_ops;
2881
2882         netif_carrier_off(ndev);
2883
2884         err = register_netdev(ndev);
2885         if (err) {
2886                 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2887                        np->full_name, err);
2888                 goto err_detach_tah;
2889         }
2890
2891         /* Set our drvdata last as we don't want them visible until we are
2892          * fully initialized
2893          */
2894         wmb();
2895         platform_set_drvdata(ofdev, dev);
2896
2897         /* There's a new kid in town ! Let's tell everybody */
2898         wake_up_all(&emac_probe_wait);
2899
2900
2901         printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2902                ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2903
2904         if (dev->phy_mode == PHY_MODE_SGMII)
2905                 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2906
2907         if (dev->phy.address >= 0)
2908                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2909                        dev->phy.def->name, dev->phy.address);
2910
2911         emac_dbg_register(dev);
2912
2913         /* Life is good */
2914         return 0;
2915
2916         /* I have a bad feeling about this ... */
2917
2918  err_detach_tah:
2919         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2920                 tah_detach(dev->tah_dev, dev->tah_port);
2921  err_detach_rgmii:
2922         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2923                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2924  err_detach_zmii:
2925         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2926                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2927  err_unreg_commac:
2928         mal_unregister_commac(dev->mal, &dev->commac);
2929  err_rel_deps:
2930         emac_put_deps(dev);
2931  err_reg_unmap:
2932         iounmap(dev->emacp);
2933  err_irq_unmap:
2934         if (dev->wol_irq != NO_IRQ)
2935                 irq_dispose_mapping(dev->wol_irq);
2936         if (dev->emac_irq != NO_IRQ)
2937                 irq_dispose_mapping(dev->emac_irq);
2938  err_free:
2939         free_netdev(ndev);
2940  err_gone:
2941         /* if we were on the bootlist, remove us as we won't show up and
2942          * wake up all waiters to notify them in case they were waiting
2943          * on us
2944          */
2945         if (blist) {
2946                 *blist = NULL;
2947                 wake_up_all(&emac_probe_wait);
2948         }
2949         return err;
2950 }
2951
2952 static int emac_remove(struct platform_device *ofdev)
2953 {
2954         struct emac_instance *dev = platform_get_drvdata(ofdev);
2955
2956         DBG(dev, "remove" NL);
2957
2958         unregister_netdev(dev->ndev);
2959
2960         cancel_work_sync(&dev->reset_work);
2961
2962         if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2963                 tah_detach(dev->tah_dev, dev->tah_port);
2964         if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2965                 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2966         if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2967                 zmii_detach(dev->zmii_dev, dev->zmii_port);
2968
2969         busy_phy_map &= ~(1 << dev->phy.address);
2970         DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
2971
2972         mal_unregister_commac(dev->mal, &dev->commac);
2973         emac_put_deps(dev);
2974
2975         emac_dbg_unregister(dev);
2976         iounmap(dev->emacp);
2977
2978         if (dev->wol_irq != NO_IRQ)
2979                 irq_dispose_mapping(dev->wol_irq);
2980         if (dev->emac_irq != NO_IRQ)
2981                 irq_dispose_mapping(dev->emac_irq);
2982
2983         free_netdev(dev->ndev);
2984
2985         return 0;
2986 }
2987
2988 /* XXX Features in here should be replaced by properties... */
2989 static const struct of_device_id emac_match[] =
2990 {
2991         {
2992                 .type           = "network",
2993                 .compatible     = "ibm,emac",
2994         },
2995         {
2996                 .type           = "network",
2997                 .compatible     = "ibm,emac4",
2998         },
2999         {
3000                 .type           = "network",
3001                 .compatible     = "ibm,emac4sync",
3002         },
3003         {},
3004 };
3005 MODULE_DEVICE_TABLE(of, emac_match);
3006
3007 static struct platform_driver emac_driver = {
3008         .driver = {
3009                 .name = "emac",
3010                 .of_match_table = emac_match,
3011         },
3012         .probe = emac_probe,
3013         .remove = emac_remove,
3014 };
3015
3016 static void __init emac_make_bootlist(void)
3017 {
3018         struct device_node *np = NULL;
3019         int j, max, i = 0;
3020         int cell_indices[EMAC_BOOT_LIST_SIZE];
3021
3022         /* Collect EMACs */
3023         while((np = of_find_all_nodes(np)) != NULL) {
3024                 const u32 *idx;
3025
3026                 if (of_match_node(emac_match, np) == NULL)
3027                         continue;
3028                 if (of_get_property(np, "unused", NULL))
3029                         continue;
3030                 idx = of_get_property(np, "cell-index", NULL);
3031                 if (idx == NULL)
3032                         continue;
3033                 cell_indices[i] = *idx;
3034                 emac_boot_list[i++] = of_node_get(np);
3035                 if (i >= EMAC_BOOT_LIST_SIZE) {
3036                         of_node_put(np);
3037                         break;
3038                 }
3039         }
3040         max = i;
3041
3042         /* Bubble sort them (doh, what a creative algorithm :-) */
3043         for (i = 0; max > 1 && (i < (max - 1)); i++)
3044                 for (j = i; j < max; j++) {
3045                         if (cell_indices[i] > cell_indices[j]) {
3046                                 swap(emac_boot_list[i], emac_boot_list[j]);
3047                                 swap(cell_indices[i], cell_indices[j]);
3048                         }
3049                 }
3050 }
3051
3052 static int __init emac_init(void)
3053 {
3054         int rc;
3055
3056         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3057
3058         /* Init debug stuff */
3059         emac_init_debug();
3060
3061         /* Build EMAC boot list */
3062         emac_make_bootlist();
3063
3064         /* Init submodules */
3065         rc = mal_init();
3066         if (rc)
3067                 goto err;
3068         rc = zmii_init();
3069         if (rc)
3070                 goto err_mal;
3071         rc = rgmii_init();
3072         if (rc)
3073                 goto err_zmii;
3074         rc = tah_init();
3075         if (rc)
3076                 goto err_rgmii;
3077         rc = platform_driver_register(&emac_driver);
3078         if (rc)
3079                 goto err_tah;
3080
3081         return 0;
3082
3083  err_tah:
3084         tah_exit();
3085  err_rgmii:
3086         rgmii_exit();
3087  err_zmii:
3088         zmii_exit();
3089  err_mal:
3090         mal_exit();
3091  err:
3092         return rc;
3093 }
3094
3095 static void __exit emac_exit(void)
3096 {
3097         int i;
3098
3099         platform_driver_unregister(&emac_driver);
3100
3101         tah_exit();
3102         rgmii_exit();
3103         zmii_exit();
3104         mal_exit();
3105         emac_fini_debug();
3106
3107         /* Destroy EMAC boot list */
3108         for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3109                 of_node_put(emac_boot_list[i]);
3110 }
3111
3112 module_init(emac_init);
3113 module_exit(emac_exit);