2 * Driver for BCM963xx builtin Ethernet mac
4 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/clk.h>
24 #include <linux/etherdevice.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/ethtool.h>
28 #include <linux/crc32.h>
29 #include <linux/err.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/platform_device.h>
32 #include <linux/if_vlan.h>
34 #include <bcm63xx_dev_enet.h>
35 #include "bcm63xx_enet.h"
37 static char bcm_enet_driver_name[] = "bcm63xx_enet";
38 static char bcm_enet_driver_version[] = "1.0";
40 static int copybreak __read_mostly = 128;
41 module_param(copybreak, int, 0);
42 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
44 /* io registers memory shared between all devices */
45 static void __iomem *bcm_enet_shared_base[3];
48 * io helpers to access mac registers
50 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
52 return bcm_readl(priv->base + off);
55 static inline void enet_writel(struct bcm_enet_priv *priv,
58 bcm_writel(val, priv->base + off);
62 * io helpers to access switch registers
64 static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
66 return bcm_readl(priv->base + off);
69 static inline void enetsw_writel(struct bcm_enet_priv *priv,
72 bcm_writel(val, priv->base + off);
75 static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
77 return bcm_readw(priv->base + off);
80 static inline void enetsw_writew(struct bcm_enet_priv *priv,
83 bcm_writew(val, priv->base + off);
86 static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
88 return bcm_readb(priv->base + off);
91 static inline void enetsw_writeb(struct bcm_enet_priv *priv,
94 bcm_writeb(val, priv->base + off);
98 /* io helpers to access shared registers */
99 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
101 return bcm_readl(bcm_enet_shared_base[0] + off);
104 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
107 bcm_writel(val, bcm_enet_shared_base[0] + off);
110 static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
112 return bcm_readl(bcm_enet_shared_base[1] +
113 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
116 static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
117 u32 val, u32 off, int chan)
119 bcm_writel(val, bcm_enet_shared_base[1] +
120 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
123 static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
125 return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
128 static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
129 u32 val, u32 off, int chan)
131 bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
135 * write given data into mii register and wait for transfer to end
136 * with timeout (average measured transfer time is 25us)
138 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
142 /* make sure mii interrupt status is cleared */
143 enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
145 enet_writel(priv, data, ENET_MIIDATA_REG);
148 /* busy wait on mii interrupt bit, with timeout */
151 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
154 } while (limit-- > 0);
156 return (limit < 0) ? 1 : 0;
160 * MII internal read callback
162 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
167 tmp = regnum << ENET_MIIDATA_REG_SHIFT;
168 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
169 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
170 tmp |= ENET_MIIDATA_OP_READ_MASK;
172 if (do_mdio_op(priv, tmp))
175 val = enet_readl(priv, ENET_MIIDATA_REG);
181 * MII internal write callback
183 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
184 int regnum, u16 value)
188 tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
189 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
190 tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
191 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
192 tmp |= ENET_MIIDATA_OP_WRITE_MASK;
194 (void)do_mdio_op(priv, tmp);
199 * MII read callback from phylib
201 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
204 return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
208 * MII write callback from phylib
210 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
211 int regnum, u16 value)
213 return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
217 * MII read callback from mii core
219 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
222 return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
226 * MII write callback from mii core
228 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
229 int regnum, int value)
231 bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
237 static int bcm_enet_refill_rx(struct net_device *dev)
239 struct bcm_enet_priv *priv;
241 priv = netdev_priv(dev);
243 while (priv->rx_desc_count < priv->rx_ring_size) {
244 struct bcm_enet_desc *desc;
250 desc_idx = priv->rx_dirty_desc;
251 desc = &priv->rx_desc_cpu[desc_idx];
253 if (!priv->rx_skb[desc_idx]) {
254 skb = netdev_alloc_skb(dev, priv->rx_skb_size);
257 priv->rx_skb[desc_idx] = skb;
258 p = dma_map_single(&priv->pdev->dev, skb->data,
264 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
265 len_stat |= DMADESC_OWNER_MASK;
266 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
267 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
268 priv->rx_dirty_desc = 0;
270 priv->rx_dirty_desc++;
273 desc->len_stat = len_stat;
275 priv->rx_desc_count++;
277 /* tell dma engine we allocated one buffer */
278 if (priv->dma_has_sram)
279 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
281 enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
284 /* If rx ring is still empty, set a timer to try allocating
285 * again at a later time. */
286 if (priv->rx_desc_count == 0 && netif_running(dev)) {
287 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
288 priv->rx_timeout.expires = jiffies + HZ;
289 add_timer(&priv->rx_timeout);
296 * timer callback to defer refill rx queue in case we're OOM
298 static void bcm_enet_refill_rx_timer(unsigned long data)
300 struct net_device *dev;
301 struct bcm_enet_priv *priv;
303 dev = (struct net_device *)data;
304 priv = netdev_priv(dev);
306 spin_lock(&priv->rx_lock);
307 bcm_enet_refill_rx((struct net_device *)data);
308 spin_unlock(&priv->rx_lock);
312 * extract packet from rx queue
314 static int bcm_enet_receive_queue(struct net_device *dev, int budget)
316 struct bcm_enet_priv *priv;
320 priv = netdev_priv(dev);
321 kdev = &priv->pdev->dev;
324 /* don't scan ring further than number of refilled
326 if (budget > priv->rx_desc_count)
327 budget = priv->rx_desc_count;
330 struct bcm_enet_desc *desc;
336 desc_idx = priv->rx_curr_desc;
337 desc = &priv->rx_desc_cpu[desc_idx];
339 /* make sure we actually read the descriptor status at
343 len_stat = desc->len_stat;
345 /* break if dma ownership belongs to hw */
346 if (len_stat & DMADESC_OWNER_MASK)
350 priv->rx_curr_desc++;
351 if (priv->rx_curr_desc == priv->rx_ring_size)
352 priv->rx_curr_desc = 0;
353 priv->rx_desc_count--;
355 /* if the packet does not have start of packet _and_
356 * end of packet flag set, then just recycle it */
357 if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
358 (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
359 dev->stats.rx_dropped++;
363 /* recycle packet if it's marked as bad */
364 if (!priv->enet_is_sw &&
365 unlikely(len_stat & DMADESC_ERR_MASK)) {
366 dev->stats.rx_errors++;
368 if (len_stat & DMADESC_OVSIZE_MASK)
369 dev->stats.rx_length_errors++;
370 if (len_stat & DMADESC_CRC_MASK)
371 dev->stats.rx_crc_errors++;
372 if (len_stat & DMADESC_UNDER_MASK)
373 dev->stats.rx_frame_errors++;
374 if (len_stat & DMADESC_OV_MASK)
375 dev->stats.rx_fifo_errors++;
380 skb = priv->rx_skb[desc_idx];
381 len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
382 /* don't include FCS */
385 if (len < copybreak) {
386 struct sk_buff *nskb;
388 nskb = napi_alloc_skb(&priv->napi, len);
390 /* forget packet, just rearm desc */
391 dev->stats.rx_dropped++;
395 dma_sync_single_for_cpu(kdev, desc->address,
396 len, DMA_FROM_DEVICE);
397 memcpy(nskb->data, skb->data, len);
398 dma_sync_single_for_device(kdev, desc->address,
399 len, DMA_FROM_DEVICE);
402 dma_unmap_single(&priv->pdev->dev, desc->address,
403 priv->rx_skb_size, DMA_FROM_DEVICE);
404 priv->rx_skb[desc_idx] = NULL;
408 skb->protocol = eth_type_trans(skb, dev);
409 dev->stats.rx_packets++;
410 dev->stats.rx_bytes += len;
411 netif_receive_skb(skb);
413 } while (--budget > 0);
415 if (processed || !priv->rx_desc_count) {
416 bcm_enet_refill_rx(dev);
419 enet_dmac_writel(priv, priv->dma_chan_en_mask,
420 ENETDMAC_CHANCFG, priv->rx_chan);
428 * try to or force reclaim of transmitted buffers
430 static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
432 struct bcm_enet_priv *priv;
435 priv = netdev_priv(dev);
438 while (priv->tx_desc_count < priv->tx_ring_size) {
439 struct bcm_enet_desc *desc;
442 /* We run in a bh and fight against start_xmit, which
443 * is called with bh disabled */
444 spin_lock(&priv->tx_lock);
446 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
448 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
449 spin_unlock(&priv->tx_lock);
453 /* ensure other field of the descriptor were not read
454 * before we checked ownership */
457 skb = priv->tx_skb[priv->tx_dirty_desc];
458 priv->tx_skb[priv->tx_dirty_desc] = NULL;
459 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
462 priv->tx_dirty_desc++;
463 if (priv->tx_dirty_desc == priv->tx_ring_size)
464 priv->tx_dirty_desc = 0;
465 priv->tx_desc_count++;
467 spin_unlock(&priv->tx_lock);
469 if (desc->len_stat & DMADESC_UNDER_MASK)
470 dev->stats.tx_errors++;
476 if (netif_queue_stopped(dev) && released)
477 netif_wake_queue(dev);
483 * poll func, called by network core
485 static int bcm_enet_poll(struct napi_struct *napi, int budget)
487 struct bcm_enet_priv *priv;
488 struct net_device *dev;
491 priv = container_of(napi, struct bcm_enet_priv, napi);
495 enet_dmac_writel(priv, priv->dma_chan_int_mask,
496 ENETDMAC_IR, priv->rx_chan);
497 enet_dmac_writel(priv, priv->dma_chan_int_mask,
498 ENETDMAC_IR, priv->tx_chan);
500 /* reclaim sent skb */
501 bcm_enet_tx_reclaim(dev, 0);
503 spin_lock(&priv->rx_lock);
504 rx_work_done = bcm_enet_receive_queue(dev, budget);
505 spin_unlock(&priv->rx_lock);
507 if (rx_work_done >= budget) {
508 /* rx queue is not yet empty/clean */
512 /* no more packet in rx/tx queue, remove device from poll
516 /* restore rx/tx interrupt */
517 enet_dmac_writel(priv, priv->dma_chan_int_mask,
518 ENETDMAC_IRMASK, priv->rx_chan);
519 enet_dmac_writel(priv, priv->dma_chan_int_mask,
520 ENETDMAC_IRMASK, priv->tx_chan);
526 * mac interrupt handler
528 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
530 struct net_device *dev;
531 struct bcm_enet_priv *priv;
535 priv = netdev_priv(dev);
537 stat = enet_readl(priv, ENET_IR_REG);
538 if (!(stat & ENET_IR_MIB))
541 /* clear & mask interrupt */
542 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
543 enet_writel(priv, 0, ENET_IRMASK_REG);
545 /* read mib registers in workqueue */
546 schedule_work(&priv->mib_update_task);
552 * rx/tx dma interrupt handler
554 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
556 struct net_device *dev;
557 struct bcm_enet_priv *priv;
560 priv = netdev_priv(dev);
562 /* mask rx/tx interrupts */
563 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
564 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
566 napi_schedule(&priv->napi);
572 * tx request callback
575 bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
577 struct bcm_enet_priv *priv;
578 struct bcm_enet_desc *desc;
582 priv = netdev_priv(dev);
584 /* lock against tx reclaim */
585 spin_lock(&priv->tx_lock);
587 /* make sure the tx hw queue is not full, should not happen
588 * since we stop queue before it's the case */
589 if (unlikely(!priv->tx_desc_count)) {
590 netif_stop_queue(dev);
591 dev_err(&priv->pdev->dev, "xmit called with no tx desc "
593 ret = NETDEV_TX_BUSY;
597 /* pad small packets sent on a switch device */
598 if (priv->enet_is_sw && skb->len < 64) {
599 int needed = 64 - skb->len;
602 if (unlikely(skb_tailroom(skb) < needed)) {
603 struct sk_buff *nskb;
605 nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
607 ret = NETDEV_TX_BUSY;
613 data = skb_put(skb, needed);
614 memset(data, 0, needed);
617 /* point to the next available desc */
618 desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
619 priv->tx_skb[priv->tx_curr_desc] = skb;
621 /* fill descriptor */
622 desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
625 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
626 len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
630 priv->tx_curr_desc++;
631 if (priv->tx_curr_desc == priv->tx_ring_size) {
632 priv->tx_curr_desc = 0;
633 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
635 priv->tx_desc_count--;
637 /* dma might be already polling, make sure we update desc
638 * fields in correct order */
640 desc->len_stat = len_stat;
644 enet_dmac_writel(priv, priv->dma_chan_en_mask,
645 ENETDMAC_CHANCFG, priv->tx_chan);
647 /* stop queue if no more desc available */
648 if (!priv->tx_desc_count)
649 netif_stop_queue(dev);
651 dev->stats.tx_bytes += skb->len;
652 dev->stats.tx_packets++;
656 spin_unlock(&priv->tx_lock);
661 * Change the interface's mac address.
663 static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
665 struct bcm_enet_priv *priv;
666 struct sockaddr *addr = p;
669 priv = netdev_priv(dev);
670 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
672 /* use perfect match register 0 to store my mac address */
673 val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
674 (dev->dev_addr[4] << 8) | dev->dev_addr[5];
675 enet_writel(priv, val, ENET_PML_REG(0));
677 val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
678 val |= ENET_PMH_DATAVALID_MASK;
679 enet_writel(priv, val, ENET_PMH_REG(0));
685 * Change rx mode (promiscuous/allmulti) and update multicast list
687 static void bcm_enet_set_multicast_list(struct net_device *dev)
689 struct bcm_enet_priv *priv;
690 struct netdev_hw_addr *ha;
694 priv = netdev_priv(dev);
696 val = enet_readl(priv, ENET_RXCFG_REG);
698 if (dev->flags & IFF_PROMISC)
699 val |= ENET_RXCFG_PROMISC_MASK;
701 val &= ~ENET_RXCFG_PROMISC_MASK;
703 /* only 3 perfect match registers left, first one is used for
705 if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
706 val |= ENET_RXCFG_ALLMCAST_MASK;
708 val &= ~ENET_RXCFG_ALLMCAST_MASK;
710 /* no need to set perfect match registers if we catch all
712 if (val & ENET_RXCFG_ALLMCAST_MASK) {
713 enet_writel(priv, val, ENET_RXCFG_REG);
718 netdev_for_each_mc_addr(ha, dev) {
724 /* update perfect match registers */
726 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
727 (dmi_addr[4] << 8) | dmi_addr[5];
728 enet_writel(priv, tmp, ENET_PML_REG(i + 1));
730 tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
731 tmp |= ENET_PMH_DATAVALID_MASK;
732 enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
736 enet_writel(priv, 0, ENET_PML_REG(i + 1));
737 enet_writel(priv, 0, ENET_PMH_REG(i + 1));
740 enet_writel(priv, val, ENET_RXCFG_REG);
744 * set mac duplex parameters
746 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
750 val = enet_readl(priv, ENET_TXCTL_REG);
752 val |= ENET_TXCTL_FD_MASK;
754 val &= ~ENET_TXCTL_FD_MASK;
755 enet_writel(priv, val, ENET_TXCTL_REG);
759 * set mac flow control parameters
761 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
765 /* rx flow control (pause frame handling) */
766 val = enet_readl(priv, ENET_RXCFG_REG);
768 val |= ENET_RXCFG_ENFLOW_MASK;
770 val &= ~ENET_RXCFG_ENFLOW_MASK;
771 enet_writel(priv, val, ENET_RXCFG_REG);
773 if (!priv->dma_has_sram)
776 /* tx flow control (pause frame generation) */
777 val = enet_dma_readl(priv, ENETDMA_CFG_REG);
779 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
781 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
782 enet_dma_writel(priv, val, ENETDMA_CFG_REG);
786 * link changed callback (from phylib)
788 static void bcm_enet_adjust_phy_link(struct net_device *dev)
790 struct bcm_enet_priv *priv;
791 struct phy_device *phydev;
794 priv = netdev_priv(dev);
795 phydev = priv->phydev;
798 if (priv->old_link != phydev->link) {
800 priv->old_link = phydev->link;
803 /* reflect duplex change in mac configuration */
804 if (phydev->link && phydev->duplex != priv->old_duplex) {
805 bcm_enet_set_duplex(priv,
806 (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
808 priv->old_duplex = phydev->duplex;
811 /* enable flow control if remote advertise it (trust phylib to
812 * check that duplex is full */
813 if (phydev->link && phydev->pause != priv->old_pause) {
814 int rx_pause_en, tx_pause_en;
817 /* pause was advertised by lpa and us */
820 } else if (!priv->pause_auto) {
821 /* pause setting overrided by user */
822 rx_pause_en = priv->pause_rx;
823 tx_pause_en = priv->pause_tx;
829 bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
831 priv->old_pause = phydev->pause;
834 if (status_changed) {
835 pr_info("%s: link %s", dev->name, phydev->link ?
838 pr_cont(" - %d/%s - flow control %s", phydev->speed,
839 DUPLEX_FULL == phydev->duplex ? "full" : "half",
840 phydev->pause == 1 ? "rx&tx" : "off");
847 * link changed callback (if phylib is not used)
849 static void bcm_enet_adjust_link(struct net_device *dev)
851 struct bcm_enet_priv *priv;
853 priv = netdev_priv(dev);
854 bcm_enet_set_duplex(priv, priv->force_duplex_full);
855 bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
856 netif_carrier_on(dev);
858 pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
860 priv->force_speed_100 ? 100 : 10,
861 priv->force_duplex_full ? "full" : "half",
862 priv->pause_rx ? "rx" : "off",
863 priv->pause_tx ? "tx" : "off");
867 * open callback, allocate dma rings & buffers and start rx operation
869 static int bcm_enet_open(struct net_device *dev)
871 struct bcm_enet_priv *priv;
872 struct sockaddr addr;
874 struct phy_device *phydev;
877 char phy_id[MII_BUS_ID_SIZE + 3];
881 priv = netdev_priv(dev);
882 kdev = &priv->pdev->dev;
886 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
887 priv->mii_bus->id, priv->phy_id);
889 phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
890 PHY_INTERFACE_MODE_MII);
892 if (IS_ERR(phydev)) {
893 dev_err(kdev, "could not attach to PHY\n");
894 return PTR_ERR(phydev);
897 /* mask with MAC supported features */
898 phydev->supported &= (SUPPORTED_10baseT_Half |
899 SUPPORTED_10baseT_Full |
900 SUPPORTED_100baseT_Half |
901 SUPPORTED_100baseT_Full |
905 phydev->advertising = phydev->supported;
907 if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
908 phydev->advertising |= SUPPORTED_Pause;
910 phydev->advertising &= ~SUPPORTED_Pause;
912 dev_info(kdev, "attached PHY at address %d [%s]\n",
913 phydev->addr, phydev->drv->name);
916 priv->old_duplex = -1;
917 priv->old_pause = -1;
918 priv->phydev = phydev;
921 /* mask all interrupts and request them */
922 enet_writel(priv, 0, ENET_IRMASK_REG);
923 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
924 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
926 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
928 goto out_phy_disconnect;
930 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
935 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
940 /* initialize perfect match registers */
941 for (i = 0; i < 4; i++) {
942 enet_writel(priv, 0, ENET_PML_REG(i));
943 enet_writel(priv, 0, ENET_PMH_REG(i));
946 /* write device mac address */
947 memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
948 bcm_enet_set_mac_address(dev, &addr);
950 /* allocate rx dma ring */
951 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
952 p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
958 priv->rx_desc_alloc_size = size;
959 priv->rx_desc_cpu = p;
961 /* allocate tx dma ring */
962 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
963 p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
966 goto out_free_rx_ring;
969 priv->tx_desc_alloc_size = size;
970 priv->tx_desc_cpu = p;
972 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
976 goto out_free_tx_ring;
979 priv->tx_desc_count = priv->tx_ring_size;
980 priv->tx_dirty_desc = 0;
981 priv->tx_curr_desc = 0;
982 spin_lock_init(&priv->tx_lock);
984 /* init & fill rx ring with skbs */
985 priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
989 goto out_free_tx_skb;
992 priv->rx_desc_count = 0;
993 priv->rx_dirty_desc = 0;
994 priv->rx_curr_desc = 0;
996 /* initialize flow control buffer allocation */
997 if (priv->dma_has_sram)
998 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
999 ENETDMA_BUFALLOC_REG(priv->rx_chan));
1001 enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1002 ENETDMAC_BUFALLOC, priv->rx_chan);
1004 if (bcm_enet_refill_rx(dev)) {
1005 dev_err(kdev, "cannot allocate rx skb queue\n");
1010 /* write rx & tx ring addresses */
1011 if (priv->dma_has_sram) {
1012 enet_dmas_writel(priv, priv->rx_desc_dma,
1013 ENETDMAS_RSTART_REG, priv->rx_chan);
1014 enet_dmas_writel(priv, priv->tx_desc_dma,
1015 ENETDMAS_RSTART_REG, priv->tx_chan);
1017 enet_dmac_writel(priv, priv->rx_desc_dma,
1018 ENETDMAC_RSTART, priv->rx_chan);
1019 enet_dmac_writel(priv, priv->tx_desc_dma,
1020 ENETDMAC_RSTART, priv->tx_chan);
1023 /* clear remaining state ram for rx & tx channel */
1024 if (priv->dma_has_sram) {
1025 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
1026 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1027 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1028 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1029 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1030 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1032 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1033 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1036 /* set max rx/tx length */
1037 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1038 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1040 /* set dma maximum burst len */
1041 enet_dmac_writel(priv, priv->dma_maxburst,
1042 ENETDMAC_MAXBURST, priv->rx_chan);
1043 enet_dmac_writel(priv, priv->dma_maxburst,
1044 ENETDMAC_MAXBURST, priv->tx_chan);
1046 /* set correct transmit fifo watermark */
1047 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1049 /* set flow control low/high threshold to 1/3 / 2/3 */
1050 if (priv->dma_has_sram) {
1051 val = priv->rx_ring_size / 3;
1052 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1053 val = (priv->rx_ring_size * 2) / 3;
1054 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1056 enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1057 enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1058 enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1061 /* all set, enable mac and interrupts, start dma engine and
1062 * kick rx dma channel */
1064 val = enet_readl(priv, ENET_CTL_REG);
1065 val |= ENET_CTL_ENABLE_MASK;
1066 enet_writel(priv, val, ENET_CTL_REG);
1067 if (priv->dma_has_sram)
1068 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1069 enet_dmac_writel(priv, priv->dma_chan_en_mask,
1070 ENETDMAC_CHANCFG, priv->rx_chan);
1072 /* watch "mib counters about to overflow" interrupt */
1073 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1074 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1076 /* watch "packet transferred" interrupt in rx and tx */
1077 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1078 ENETDMAC_IR, priv->rx_chan);
1079 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1080 ENETDMAC_IR, priv->tx_chan);
1082 /* make sure we enable napi before rx interrupt */
1083 napi_enable(&priv->napi);
1085 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1086 ENETDMAC_IRMASK, priv->rx_chan);
1087 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1088 ENETDMAC_IRMASK, priv->tx_chan);
1091 phy_start(priv->phydev);
1093 bcm_enet_adjust_link(dev);
1095 netif_start_queue(dev);
1099 for (i = 0; i < priv->rx_ring_size; i++) {
1100 struct bcm_enet_desc *desc;
1102 if (!priv->rx_skb[i])
1105 desc = &priv->rx_desc_cpu[i];
1106 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1108 kfree_skb(priv->rx_skb[i]);
1110 kfree(priv->rx_skb);
1113 kfree(priv->tx_skb);
1116 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1117 priv->tx_desc_cpu, priv->tx_desc_dma);
1120 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1121 priv->rx_desc_cpu, priv->rx_desc_dma);
1124 free_irq(priv->irq_tx, dev);
1127 free_irq(priv->irq_rx, dev);
1130 free_irq(dev->irq, dev);
1133 phy_disconnect(priv->phydev);
1141 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1146 val = enet_readl(priv, ENET_CTL_REG);
1147 val |= ENET_CTL_DISABLE_MASK;
1148 enet_writel(priv, val, ENET_CTL_REG);
1154 val = enet_readl(priv, ENET_CTL_REG);
1155 if (!(val & ENET_CTL_DISABLE_MASK))
1162 * disable dma in given channel
1164 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1168 enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1174 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1175 if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1184 static int bcm_enet_stop(struct net_device *dev)
1186 struct bcm_enet_priv *priv;
1187 struct device *kdev;
1190 priv = netdev_priv(dev);
1191 kdev = &priv->pdev->dev;
1193 netif_stop_queue(dev);
1194 napi_disable(&priv->napi);
1196 phy_stop(priv->phydev);
1197 del_timer_sync(&priv->rx_timeout);
1199 /* mask all interrupts */
1200 enet_writel(priv, 0, ENET_IRMASK_REG);
1201 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1202 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1204 /* make sure no mib update is scheduled */
1205 cancel_work_sync(&priv->mib_update_task);
1207 /* disable dma & mac */
1208 bcm_enet_disable_dma(priv, priv->tx_chan);
1209 bcm_enet_disable_dma(priv, priv->rx_chan);
1210 bcm_enet_disable_mac(priv);
1212 /* force reclaim of all tx buffers */
1213 bcm_enet_tx_reclaim(dev, 1);
1215 /* free the rx skb ring */
1216 for (i = 0; i < priv->rx_ring_size; i++) {
1217 struct bcm_enet_desc *desc;
1219 if (!priv->rx_skb[i])
1222 desc = &priv->rx_desc_cpu[i];
1223 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1225 kfree_skb(priv->rx_skb[i]);
1228 /* free remaining allocated memory */
1229 kfree(priv->rx_skb);
1230 kfree(priv->tx_skb);
1231 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1232 priv->rx_desc_cpu, priv->rx_desc_dma);
1233 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1234 priv->tx_desc_cpu, priv->tx_desc_dma);
1235 free_irq(priv->irq_tx, dev);
1236 free_irq(priv->irq_rx, dev);
1237 free_irq(dev->irq, dev);
1240 if (priv->has_phy) {
1241 phy_disconnect(priv->phydev);
1242 priv->phydev = NULL;
1251 struct bcm_enet_stats {
1252 char stat_string[ETH_GSTRING_LEN];
1258 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
1259 offsetof(struct bcm_enet_priv, m)
1260 #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
1261 offsetof(struct net_device_stats, m)
1263 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1264 { "rx_packets", DEV_STAT(rx_packets), -1 },
1265 { "tx_packets", DEV_STAT(tx_packets), -1 },
1266 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1267 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1268 { "rx_errors", DEV_STAT(rx_errors), -1 },
1269 { "tx_errors", DEV_STAT(tx_errors), -1 },
1270 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1271 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1273 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1274 { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1275 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1276 { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1277 { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1278 { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1279 { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1280 { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1281 { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1282 { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1283 { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1284 { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1285 { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1286 { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1287 { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1288 { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1289 { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1290 { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1291 { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1292 { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1293 { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1295 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1296 { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1297 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1298 { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1299 { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1300 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1301 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1302 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1303 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1304 { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1305 { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1306 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1307 { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1308 { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1309 { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1310 { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1311 { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1312 { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1313 { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1314 { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1315 { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1316 { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1320 #define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats)
1322 static const u32 unused_mib_regs[] = {
1323 ETH_MIB_TX_ALL_OCTETS,
1324 ETH_MIB_TX_ALL_PKTS,
1325 ETH_MIB_RX_ALL_OCTETS,
1326 ETH_MIB_RX_ALL_PKTS,
1330 static void bcm_enet_get_drvinfo(struct net_device *netdev,
1331 struct ethtool_drvinfo *drvinfo)
1333 strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1334 strlcpy(drvinfo->version, bcm_enet_driver_version,
1335 sizeof(drvinfo->version));
1336 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
1337 strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1340 static int bcm_enet_get_sset_count(struct net_device *netdev,
1343 switch (string_set) {
1345 return BCM_ENET_STATS_LEN;
1351 static void bcm_enet_get_strings(struct net_device *netdev,
1352 u32 stringset, u8 *data)
1356 switch (stringset) {
1358 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1359 memcpy(data + i * ETH_GSTRING_LEN,
1360 bcm_enet_gstrings_stats[i].stat_string,
1367 static void update_mib_counters(struct bcm_enet_priv *priv)
1371 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1372 const struct bcm_enet_stats *s;
1376 s = &bcm_enet_gstrings_stats[i];
1377 if (s->mib_reg == -1)
1380 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1381 p = (char *)priv + s->stat_offset;
1383 if (s->sizeof_stat == sizeof(u64))
1389 /* also empty unused mib counters to make sure mib counter
1390 * overflow interrupt is cleared */
1391 for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1392 (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1395 static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1397 struct bcm_enet_priv *priv;
1399 priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1400 mutex_lock(&priv->mib_update_lock);
1401 update_mib_counters(priv);
1402 mutex_unlock(&priv->mib_update_lock);
1404 /* reenable mib interrupt */
1405 if (netif_running(priv->net_dev))
1406 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1409 static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1410 struct ethtool_stats *stats,
1413 struct bcm_enet_priv *priv;
1416 priv = netdev_priv(netdev);
1418 mutex_lock(&priv->mib_update_lock);
1419 update_mib_counters(priv);
1421 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1422 const struct bcm_enet_stats *s;
1425 s = &bcm_enet_gstrings_stats[i];
1426 if (s->mib_reg == -1)
1427 p = (char *)&netdev->stats;
1430 p += s->stat_offset;
1431 data[i] = (s->sizeof_stat == sizeof(u64)) ?
1432 *(u64 *)p : *(u32 *)p;
1434 mutex_unlock(&priv->mib_update_lock);
1437 static int bcm_enet_nway_reset(struct net_device *dev)
1439 struct bcm_enet_priv *priv;
1441 priv = netdev_priv(dev);
1442 if (priv->has_phy) {
1445 return genphy_restart_aneg(priv->phydev);
1451 static int bcm_enet_get_settings(struct net_device *dev,
1452 struct ethtool_cmd *cmd)
1454 struct bcm_enet_priv *priv;
1456 priv = netdev_priv(dev);
1461 if (priv->has_phy) {
1464 return phy_ethtool_gset(priv->phydev, cmd);
1467 ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
1468 ? SPEED_100 : SPEED_10));
1469 cmd->duplex = (priv->force_duplex_full) ?
1470 DUPLEX_FULL : DUPLEX_HALF;
1471 cmd->supported = ADVERTISED_10baseT_Half |
1472 ADVERTISED_10baseT_Full |
1473 ADVERTISED_100baseT_Half |
1474 ADVERTISED_100baseT_Full;
1475 cmd->advertising = 0;
1476 cmd->port = PORT_MII;
1477 cmd->transceiver = XCVR_EXTERNAL;
1482 static int bcm_enet_set_settings(struct net_device *dev,
1483 struct ethtool_cmd *cmd)
1485 struct bcm_enet_priv *priv;
1487 priv = netdev_priv(dev);
1488 if (priv->has_phy) {
1491 return phy_ethtool_sset(priv->phydev, cmd);
1495 (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
1496 cmd->port != PORT_MII)
1499 priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
1500 priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
1502 if (netif_running(dev))
1503 bcm_enet_adjust_link(dev);
1508 static void bcm_enet_get_ringparam(struct net_device *dev,
1509 struct ethtool_ringparam *ering)
1511 struct bcm_enet_priv *priv;
1513 priv = netdev_priv(dev);
1515 /* rx/tx ring is actually only limited by memory */
1516 ering->rx_max_pending = 8192;
1517 ering->tx_max_pending = 8192;
1518 ering->rx_pending = priv->rx_ring_size;
1519 ering->tx_pending = priv->tx_ring_size;
1522 static int bcm_enet_set_ringparam(struct net_device *dev,
1523 struct ethtool_ringparam *ering)
1525 struct bcm_enet_priv *priv;
1528 priv = netdev_priv(dev);
1531 if (netif_running(dev)) {
1536 priv->rx_ring_size = ering->rx_pending;
1537 priv->tx_ring_size = ering->tx_pending;
1542 err = bcm_enet_open(dev);
1546 bcm_enet_set_multicast_list(dev);
1551 static void bcm_enet_get_pauseparam(struct net_device *dev,
1552 struct ethtool_pauseparam *ecmd)
1554 struct bcm_enet_priv *priv;
1556 priv = netdev_priv(dev);
1557 ecmd->autoneg = priv->pause_auto;
1558 ecmd->rx_pause = priv->pause_rx;
1559 ecmd->tx_pause = priv->pause_tx;
1562 static int bcm_enet_set_pauseparam(struct net_device *dev,
1563 struct ethtool_pauseparam *ecmd)
1565 struct bcm_enet_priv *priv;
1567 priv = netdev_priv(dev);
1569 if (priv->has_phy) {
1570 if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1571 /* asymetric pause mode not supported,
1572 * actually possible but integrated PHY has RO
1577 /* no pause autoneg on direct mii connection */
1582 priv->pause_auto = ecmd->autoneg;
1583 priv->pause_rx = ecmd->rx_pause;
1584 priv->pause_tx = ecmd->tx_pause;
1589 static const struct ethtool_ops bcm_enet_ethtool_ops = {
1590 .get_strings = bcm_enet_get_strings,
1591 .get_sset_count = bcm_enet_get_sset_count,
1592 .get_ethtool_stats = bcm_enet_get_ethtool_stats,
1593 .nway_reset = bcm_enet_nway_reset,
1594 .get_settings = bcm_enet_get_settings,
1595 .set_settings = bcm_enet_set_settings,
1596 .get_drvinfo = bcm_enet_get_drvinfo,
1597 .get_link = ethtool_op_get_link,
1598 .get_ringparam = bcm_enet_get_ringparam,
1599 .set_ringparam = bcm_enet_set_ringparam,
1600 .get_pauseparam = bcm_enet_get_pauseparam,
1601 .set_pauseparam = bcm_enet_set_pauseparam,
1604 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1606 struct bcm_enet_priv *priv;
1608 priv = netdev_priv(dev);
1609 if (priv->has_phy) {
1612 return phy_mii_ioctl(priv->phydev, rq, cmd);
1614 struct mii_if_info mii;
1617 mii.mdio_read = bcm_enet_mdio_read_mii;
1618 mii.mdio_write = bcm_enet_mdio_write_mii;
1620 mii.phy_id_mask = 0x3f;
1621 mii.reg_num_mask = 0x1f;
1622 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1627 * calculate actual hardware mtu
1629 static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
1635 /* add ethernet header + vlan tag size */
1636 actual_mtu += VLAN_ETH_HLEN;
1638 if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
1642 * setup maximum size before we get overflow mark in
1643 * descriptor, note that this will not prevent reception of
1644 * big frames, they will be split into multiple buffers
1647 priv->hw_mtu = actual_mtu;
1650 * align rx buffer size to dma burst len, account FCS since
1653 priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1654 priv->dma_maxburst * 4);
1659 * adjust mtu, can't be called while device is running
1661 static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1665 if (netif_running(dev))
1668 ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
1676 * preinit hardware to allow mii operation while device is down
1678 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1683 /* make sure mac is disabled */
1684 bcm_enet_disable_mac(priv);
1686 /* soft reset mac */
1687 val = ENET_CTL_SRESET_MASK;
1688 enet_writel(priv, val, ENET_CTL_REG);
1693 val = enet_readl(priv, ENET_CTL_REG);
1694 if (!(val & ENET_CTL_SRESET_MASK))
1699 /* select correct mii interface */
1700 val = enet_readl(priv, ENET_CTL_REG);
1701 if (priv->use_external_mii)
1702 val |= ENET_CTL_EPHYSEL_MASK;
1704 val &= ~ENET_CTL_EPHYSEL_MASK;
1705 enet_writel(priv, val, ENET_CTL_REG);
1707 /* turn on mdc clock */
1708 enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1709 ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1711 /* set mib counters to self-clear when read */
1712 val = enet_readl(priv, ENET_MIBCTL_REG);
1713 val |= ENET_MIBCTL_RDCLEAR_MASK;
1714 enet_writel(priv, val, ENET_MIBCTL_REG);
1717 static const struct net_device_ops bcm_enet_ops = {
1718 .ndo_open = bcm_enet_open,
1719 .ndo_stop = bcm_enet_stop,
1720 .ndo_start_xmit = bcm_enet_start_xmit,
1721 .ndo_set_mac_address = bcm_enet_set_mac_address,
1722 .ndo_set_rx_mode = bcm_enet_set_multicast_list,
1723 .ndo_do_ioctl = bcm_enet_ioctl,
1724 .ndo_change_mtu = bcm_enet_change_mtu,
1728 * allocate netdevice, request register memory and register device.
1730 static int bcm_enet_probe(struct platform_device *pdev)
1732 struct bcm_enet_priv *priv;
1733 struct net_device *dev;
1734 struct bcm63xx_enet_platform_data *pd;
1735 struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1736 struct mii_bus *bus;
1737 const char *clk_name;
1740 /* stop if shared driver failed, assume driver->probe will be
1741 * called in the same order we register devices (correct ?) */
1742 if (!bcm_enet_shared_base[0])
1745 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1746 res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1747 res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1748 if (!res_irq || !res_irq_rx || !res_irq_tx)
1752 dev = alloc_etherdev(sizeof(*priv));
1755 priv = netdev_priv(dev);
1757 priv->enet_is_sw = false;
1758 priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1760 ret = compute_hw_mtu(priv, dev->mtu);
1764 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1765 priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
1766 if (IS_ERR(priv->base)) {
1767 ret = PTR_ERR(priv->base);
1771 dev->irq = priv->irq = res_irq->start;
1772 priv->irq_rx = res_irq_rx->start;
1773 priv->irq_tx = res_irq_tx->start;
1774 priv->mac_id = pdev->id;
1776 /* get rx & tx dma channel id for this mac */
1777 if (priv->mac_id == 0) {
1787 priv->mac_clk = clk_get(&pdev->dev, clk_name);
1788 if (IS_ERR(priv->mac_clk)) {
1789 ret = PTR_ERR(priv->mac_clk);
1792 ret = clk_prepare_enable(priv->mac_clk);
1794 goto out_put_clk_mac;
1796 /* initialize default and fetch platform data */
1797 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1798 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1800 pd = dev_get_platdata(&pdev->dev);
1802 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1803 priv->has_phy = pd->has_phy;
1804 priv->phy_id = pd->phy_id;
1805 priv->has_phy_interrupt = pd->has_phy_interrupt;
1806 priv->phy_interrupt = pd->phy_interrupt;
1807 priv->use_external_mii = !pd->use_internal_phy;
1808 priv->pause_auto = pd->pause_auto;
1809 priv->pause_rx = pd->pause_rx;
1810 priv->pause_tx = pd->pause_tx;
1811 priv->force_duplex_full = pd->force_duplex_full;
1812 priv->force_speed_100 = pd->force_speed_100;
1813 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1814 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1815 priv->dma_chan_width = pd->dma_chan_width;
1816 priv->dma_has_sram = pd->dma_has_sram;
1817 priv->dma_desc_shift = pd->dma_desc_shift;
1820 if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
1821 /* using internal PHY, enable clock */
1822 priv->phy_clk = clk_get(&pdev->dev, "ephy");
1823 if (IS_ERR(priv->phy_clk)) {
1824 ret = PTR_ERR(priv->phy_clk);
1825 priv->phy_clk = NULL;
1826 goto out_disable_clk_mac;
1828 ret = clk_prepare_enable(priv->phy_clk);
1830 goto out_put_clk_phy;
1833 /* do minimal hardware init to be able to probe mii bus */
1834 bcm_enet_hw_preinit(priv);
1836 /* MII bus registration */
1837 if (priv->has_phy) {
1839 priv->mii_bus = mdiobus_alloc();
1840 if (!priv->mii_bus) {
1845 bus = priv->mii_bus;
1846 bus->name = "bcm63xx_enet MII bus";
1847 bus->parent = &pdev->dev;
1849 bus->read = bcm_enet_mdio_read_phylib;
1850 bus->write = bcm_enet_mdio_write_phylib;
1851 sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
1853 /* only probe bus where we think the PHY is, because
1854 * the mdio read operation return 0 instead of 0xffff
1855 * if a slave is not present on hw */
1856 bus->phy_mask = ~(1 << priv->phy_id);
1858 bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
1865 if (priv->has_phy_interrupt)
1866 bus->irq[priv->phy_id] = priv->phy_interrupt;
1868 bus->irq[priv->phy_id] = PHY_POLL;
1870 ret = mdiobus_register(bus);
1872 dev_err(&pdev->dev, "unable to register mdio bus\n");
1877 /* run platform code to initialize PHY device */
1878 if (pd->mii_config &&
1879 pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1880 bcm_enet_mdio_write_mii)) {
1881 dev_err(&pdev->dev, "unable to configure mdio bus\n");
1886 spin_lock_init(&priv->rx_lock);
1888 /* init rx timeout (used for oom) */
1889 init_timer(&priv->rx_timeout);
1890 priv->rx_timeout.function = bcm_enet_refill_rx_timer;
1891 priv->rx_timeout.data = (unsigned long)dev;
1893 /* init the mib update lock&work */
1894 mutex_init(&priv->mib_update_lock);
1895 INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1897 /* zero mib counters */
1898 for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1899 enet_writel(priv, 0, ENET_MIB_REG(i));
1901 /* register netdevice */
1902 dev->netdev_ops = &bcm_enet_ops;
1903 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1905 dev->ethtool_ops = &bcm_enet_ethtool_ops;
1906 SET_NETDEV_DEV(dev, &pdev->dev);
1908 ret = register_netdev(dev);
1910 goto out_unregister_mdio;
1912 netif_carrier_off(dev);
1913 platform_set_drvdata(pdev, dev);
1915 priv->net_dev = dev;
1919 out_unregister_mdio:
1921 mdiobus_unregister(priv->mii_bus);
1925 mdiobus_free(priv->mii_bus);
1928 /* turn off mdc clock */
1929 enet_writel(priv, 0, ENET_MIISC_REG);
1931 clk_disable_unprepare(priv->phy_clk);
1935 clk_put(priv->phy_clk);
1937 out_disable_clk_mac:
1938 clk_disable_unprepare(priv->mac_clk);
1940 clk_put(priv->mac_clk);
1948 * exit func, stops hardware and unregisters netdevice
1950 static int bcm_enet_remove(struct platform_device *pdev)
1952 struct bcm_enet_priv *priv;
1953 struct net_device *dev;
1955 /* stop netdevice */
1956 dev = platform_get_drvdata(pdev);
1957 priv = netdev_priv(dev);
1958 unregister_netdev(dev);
1960 /* turn off mdc clock */
1961 enet_writel(priv, 0, ENET_MIISC_REG);
1963 if (priv->has_phy) {
1964 mdiobus_unregister(priv->mii_bus);
1965 mdiobus_free(priv->mii_bus);
1967 struct bcm63xx_enet_platform_data *pd;
1969 pd = dev_get_platdata(&pdev->dev);
1970 if (pd && pd->mii_config)
1971 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1972 bcm_enet_mdio_write_mii);
1975 /* disable hw block clocks */
1976 if (priv->phy_clk) {
1977 clk_disable_unprepare(priv->phy_clk);
1978 clk_put(priv->phy_clk);
1980 clk_disable_unprepare(priv->mac_clk);
1981 clk_put(priv->mac_clk);
1987 struct platform_driver bcm63xx_enet_driver = {
1988 .probe = bcm_enet_probe,
1989 .remove = bcm_enet_remove,
1991 .name = "bcm63xx_enet",
1992 .owner = THIS_MODULE,
1997 * switch mii access callbacks
1999 static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
2000 int ext, int phy_id, int location)
2005 spin_lock_bh(&priv->enetsw_mdio_lock);
2006 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
2008 reg = ENETSW_MDIOC_RD_MASK |
2009 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
2010 (location << ENETSW_MDIOC_REG_SHIFT);
2013 reg |= ENETSW_MDIOC_EXT_MASK;
2015 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
2017 ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
2018 spin_unlock_bh(&priv->enetsw_mdio_lock);
2022 static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
2023 int ext, int phy_id, int location,
2028 spin_lock_bh(&priv->enetsw_mdio_lock);
2029 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
2031 reg = ENETSW_MDIOC_WR_MASK |
2032 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
2033 (location << ENETSW_MDIOC_REG_SHIFT);
2036 reg |= ENETSW_MDIOC_EXT_MASK;
2040 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
2042 spin_unlock_bh(&priv->enetsw_mdio_lock);
2045 static inline int bcm_enet_port_is_rgmii(int portid)
2047 return portid >= ENETSW_RGMII_PORT0;
2051 * enet sw PHY polling
2053 static void swphy_poll_timer(unsigned long data)
2055 struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data;
2058 for (i = 0; i < priv->num_ports; i++) {
2059 struct bcm63xx_enetsw_port *port;
2060 int val, j, up, advertise, lpa, speed, duplex, media;
2061 int external_phy = bcm_enet_port_is_rgmii(i);
2064 port = &priv->used_ports[i];
2068 if (port->bypass_link)
2071 /* dummy read to clear */
2072 for (j = 0; j < 2; j++)
2073 val = bcmenet_sw_mdio_read(priv, external_phy,
2074 port->phy_id, MII_BMSR);
2079 up = (val & BMSR_LSTATUS) ? 1 : 0;
2080 if (!(up ^ priv->sw_port_link[i]))
2083 priv->sw_port_link[i] = up;
2087 dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2089 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2090 ENETSW_PORTOV_REG(i));
2091 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2092 ENETSW_PTCTRL_TXDIS_MASK,
2093 ENETSW_PTCTRL_REG(i));
2097 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2098 port->phy_id, MII_ADVERTISE);
2100 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2103 /* figure out media and duplex from advertise and LPA values */
2104 media = mii_nway_result(lpa & advertise);
2105 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2107 if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2112 if (val & BMSR_ESTATEN) {
2113 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2114 port->phy_id, MII_CTRL1000);
2116 lpa = bcmenet_sw_mdio_read(priv, external_phy,
2117 port->phy_id, MII_STAT1000);
2119 if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
2120 && lpa & (LPA_1000FULL | LPA_1000HALF)) {
2122 duplex = (lpa & LPA_1000FULL);
2126 dev_info(&priv->pdev->dev,
2127 "link UP on %s, %dMbps, %s-duplex\n",
2128 port->name, speed, duplex ? "full" : "half");
2130 override = ENETSW_PORTOV_ENABLE_MASK |
2131 ENETSW_PORTOV_LINKUP_MASK;
2134 override |= ENETSW_IMPOV_1000_MASK;
2135 else if (speed == 100)
2136 override |= ENETSW_IMPOV_100_MASK;
2138 override |= ENETSW_IMPOV_FDX_MASK;
2140 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2141 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2144 priv->swphy_poll.expires = jiffies + HZ;
2145 add_timer(&priv->swphy_poll);
2149 * open callback, allocate dma rings & buffers and start rx operation
2151 static int bcm_enetsw_open(struct net_device *dev)
2153 struct bcm_enet_priv *priv;
2154 struct device *kdev;
2160 priv = netdev_priv(dev);
2161 kdev = &priv->pdev->dev;
2163 /* mask all interrupts and request them */
2164 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2165 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2167 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2172 if (priv->irq_tx != -1) {
2173 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2176 goto out_freeirq_rx;
2179 /* allocate rx dma ring */
2180 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2181 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2183 dev_err(kdev, "cannot allocate rx ring %u\n", size);
2185 goto out_freeirq_tx;
2189 priv->rx_desc_alloc_size = size;
2190 priv->rx_desc_cpu = p;
2192 /* allocate tx dma ring */
2193 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2194 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2196 dev_err(kdev, "cannot allocate tx ring\n");
2198 goto out_free_rx_ring;
2202 priv->tx_desc_alloc_size = size;
2203 priv->tx_desc_cpu = p;
2205 priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
2207 if (!priv->tx_skb) {
2208 dev_err(kdev, "cannot allocate rx skb queue\n");
2210 goto out_free_tx_ring;
2213 priv->tx_desc_count = priv->tx_ring_size;
2214 priv->tx_dirty_desc = 0;
2215 priv->tx_curr_desc = 0;
2216 spin_lock_init(&priv->tx_lock);
2218 /* init & fill rx ring with skbs */
2219 priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
2221 if (!priv->rx_skb) {
2222 dev_err(kdev, "cannot allocate rx skb queue\n");
2224 goto out_free_tx_skb;
2227 priv->rx_desc_count = 0;
2228 priv->rx_dirty_desc = 0;
2229 priv->rx_curr_desc = 0;
2231 /* disable all ports */
2232 for (i = 0; i < priv->num_ports; i++) {
2233 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2234 ENETSW_PORTOV_REG(i));
2235 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2236 ENETSW_PTCTRL_TXDIS_MASK,
2237 ENETSW_PTCTRL_REG(i));
2239 priv->sw_port_link[i] = 0;
2243 val = enetsw_readb(priv, ENETSW_GMCR_REG);
2244 val |= ENETSW_GMCR_RST_MIB_MASK;
2245 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2247 val &= ~ENETSW_GMCR_RST_MIB_MASK;
2248 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2251 /* force CPU port state */
2252 val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2253 val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2254 enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2256 /* enable switch forward engine */
2257 val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2258 val |= ENETSW_SWMODE_FWD_EN_MASK;
2259 enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2261 /* enable jumbo on all ports */
2262 enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2263 enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2265 /* initialize flow control buffer allocation */
2266 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2267 ENETDMA_BUFALLOC_REG(priv->rx_chan));
2269 if (bcm_enet_refill_rx(dev)) {
2270 dev_err(kdev, "cannot allocate rx skb queue\n");
2275 /* write rx & tx ring addresses */
2276 enet_dmas_writel(priv, priv->rx_desc_dma,
2277 ENETDMAS_RSTART_REG, priv->rx_chan);
2278 enet_dmas_writel(priv, priv->tx_desc_dma,
2279 ENETDMAS_RSTART_REG, priv->tx_chan);
2281 /* clear remaining state ram for rx & tx channel */
2282 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2283 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2284 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2285 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2286 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2287 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2289 /* set dma maximum burst len */
2290 enet_dmac_writel(priv, priv->dma_maxburst,
2291 ENETDMAC_MAXBURST, priv->rx_chan);
2292 enet_dmac_writel(priv, priv->dma_maxburst,
2293 ENETDMAC_MAXBURST, priv->tx_chan);
2295 /* set flow control low/high threshold to 1/3 / 2/3 */
2296 val = priv->rx_ring_size / 3;
2297 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2298 val = (priv->rx_ring_size * 2) / 3;
2299 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2301 /* all set, enable mac and interrupts, start dma engine and
2302 * kick rx dma channel
2305 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2306 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2307 ENETDMAC_CHANCFG, priv->rx_chan);
2309 /* watch "packet transferred" interrupt in rx and tx */
2310 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2311 ENETDMAC_IR, priv->rx_chan);
2312 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2313 ENETDMAC_IR, priv->tx_chan);
2315 /* make sure we enable napi before rx interrupt */
2316 napi_enable(&priv->napi);
2318 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2319 ENETDMAC_IRMASK, priv->rx_chan);
2320 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2321 ENETDMAC_IRMASK, priv->tx_chan);
2323 netif_carrier_on(dev);
2324 netif_start_queue(dev);
2326 /* apply override config for bypass_link ports here. */
2327 for (i = 0; i < priv->num_ports; i++) {
2328 struct bcm63xx_enetsw_port *port;
2330 port = &priv->used_ports[i];
2334 if (!port->bypass_link)
2337 override = ENETSW_PORTOV_ENABLE_MASK |
2338 ENETSW_PORTOV_LINKUP_MASK;
2340 switch (port->force_speed) {
2342 override |= ENETSW_IMPOV_1000_MASK;
2345 override |= ENETSW_IMPOV_100_MASK;
2350 pr_warn("invalid forced speed on port %s: assume 10\n",
2355 if (port->force_duplex_full)
2356 override |= ENETSW_IMPOV_FDX_MASK;
2359 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2360 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2363 /* start phy polling timer */
2364 init_timer(&priv->swphy_poll);
2365 priv->swphy_poll.function = swphy_poll_timer;
2366 priv->swphy_poll.data = (unsigned long)priv;
2367 priv->swphy_poll.expires = jiffies;
2368 add_timer(&priv->swphy_poll);
2372 for (i = 0; i < priv->rx_ring_size; i++) {
2373 struct bcm_enet_desc *desc;
2375 if (!priv->rx_skb[i])
2378 desc = &priv->rx_desc_cpu[i];
2379 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2381 kfree_skb(priv->rx_skb[i]);
2383 kfree(priv->rx_skb);
2386 kfree(priv->tx_skb);
2389 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2390 priv->tx_desc_cpu, priv->tx_desc_dma);
2393 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2394 priv->rx_desc_cpu, priv->rx_desc_dma);
2397 if (priv->irq_tx != -1)
2398 free_irq(priv->irq_tx, dev);
2401 free_irq(priv->irq_rx, dev);
2408 static int bcm_enetsw_stop(struct net_device *dev)
2410 struct bcm_enet_priv *priv;
2411 struct device *kdev;
2414 priv = netdev_priv(dev);
2415 kdev = &priv->pdev->dev;
2417 del_timer_sync(&priv->swphy_poll);
2418 netif_stop_queue(dev);
2419 napi_disable(&priv->napi);
2420 del_timer_sync(&priv->rx_timeout);
2422 /* mask all interrupts */
2423 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2424 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2426 /* disable dma & mac */
2427 bcm_enet_disable_dma(priv, priv->tx_chan);
2428 bcm_enet_disable_dma(priv, priv->rx_chan);
2430 /* force reclaim of all tx buffers */
2431 bcm_enet_tx_reclaim(dev, 1);
2433 /* free the rx skb ring */
2434 for (i = 0; i < priv->rx_ring_size; i++) {
2435 struct bcm_enet_desc *desc;
2437 if (!priv->rx_skb[i])
2440 desc = &priv->rx_desc_cpu[i];
2441 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2443 kfree_skb(priv->rx_skb[i]);
2446 /* free remaining allocated memory */
2447 kfree(priv->rx_skb);
2448 kfree(priv->tx_skb);
2449 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2450 priv->rx_desc_cpu, priv->rx_desc_dma);
2451 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2452 priv->tx_desc_cpu, priv->tx_desc_dma);
2453 if (priv->irq_tx != -1)
2454 free_irq(priv->irq_tx, dev);
2455 free_irq(priv->irq_rx, dev);
2460 /* try to sort out phy external status by walking the used_port field
2461 * in the bcm_enet_priv structure. in case the phy address is not
2462 * assigned to any physical port on the switch, assume it is external
2463 * (and yell at the user).
2465 static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2469 for (i = 0; i < priv->num_ports; ++i) {
2470 if (!priv->used_ports[i].used)
2472 if (priv->used_ports[i].phy_id == phy_id)
2473 return bcm_enet_port_is_rgmii(i);
2476 printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2481 /* can't use bcmenet_sw_mdio_read directly as we need to sort out
2482 * external/internal status of the given phy_id first.
2484 static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2487 struct bcm_enet_priv *priv;
2489 priv = netdev_priv(dev);
2490 return bcmenet_sw_mdio_read(priv,
2491 bcm_enetsw_phy_is_external(priv, phy_id),
2495 /* can't use bcmenet_sw_mdio_write directly as we need to sort out
2496 * external/internal status of the given phy_id first.
2498 static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2502 struct bcm_enet_priv *priv;
2504 priv = netdev_priv(dev);
2505 bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2506 phy_id, location, val);
2509 static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2511 struct mii_if_info mii;
2514 mii.mdio_read = bcm_enetsw_mii_mdio_read;
2515 mii.mdio_write = bcm_enetsw_mii_mdio_write;
2517 mii.phy_id_mask = 0x3f;
2518 mii.reg_num_mask = 0x1f;
2519 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2523 static const struct net_device_ops bcm_enetsw_ops = {
2524 .ndo_open = bcm_enetsw_open,
2525 .ndo_stop = bcm_enetsw_stop,
2526 .ndo_start_xmit = bcm_enet_start_xmit,
2527 .ndo_change_mtu = bcm_enet_change_mtu,
2528 .ndo_do_ioctl = bcm_enetsw_ioctl,
2532 static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2533 { "rx_packets", DEV_STAT(rx_packets), -1 },
2534 { "tx_packets", DEV_STAT(tx_packets), -1 },
2535 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2536 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2537 { "rx_errors", DEV_STAT(rx_errors), -1 },
2538 { "tx_errors", DEV_STAT(tx_errors), -1 },
2539 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2540 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2542 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2543 { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2544 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2545 { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2546 { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2547 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2548 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2549 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2550 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2551 { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2552 ETHSW_MIB_RX_1024_1522 },
2553 { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2554 ETHSW_MIB_RX_1523_2047 },
2555 { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2556 ETHSW_MIB_RX_2048_4095 },
2557 { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2558 ETHSW_MIB_RX_4096_8191 },
2559 { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2560 ETHSW_MIB_RX_8192_9728 },
2561 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2562 { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2563 { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2564 { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2565 { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2567 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2568 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2569 { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2570 { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2571 { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2572 { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2576 #define BCM_ENETSW_STATS_LEN \
2577 (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2579 static void bcm_enetsw_get_strings(struct net_device *netdev,
2580 u32 stringset, u8 *data)
2584 switch (stringset) {
2586 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2587 memcpy(data + i * ETH_GSTRING_LEN,
2588 bcm_enetsw_gstrings_stats[i].stat_string,
2595 static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2598 switch (string_set) {
2600 return BCM_ENETSW_STATS_LEN;
2606 static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2607 struct ethtool_drvinfo *drvinfo)
2609 strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
2610 strncpy(drvinfo->version, bcm_enet_driver_version, 32);
2611 strncpy(drvinfo->fw_version, "N/A", 32);
2612 strncpy(drvinfo->bus_info, "bcm63xx", 32);
2615 static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2616 struct ethtool_stats *stats,
2619 struct bcm_enet_priv *priv;
2622 priv = netdev_priv(netdev);
2624 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2625 const struct bcm_enet_stats *s;
2630 s = &bcm_enetsw_gstrings_stats[i];
2636 lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2637 p = (char *)priv + s->stat_offset;
2639 if (s->sizeof_stat == sizeof(u64)) {
2640 hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2641 *(u64 *)p = ((u64)hi << 32 | lo);
2647 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2648 const struct bcm_enet_stats *s;
2651 s = &bcm_enetsw_gstrings_stats[i];
2653 if (s->mib_reg == -1)
2654 p = (char *)&netdev->stats + s->stat_offset;
2656 p = (char *)priv + s->stat_offset;
2658 data[i] = (s->sizeof_stat == sizeof(u64)) ?
2659 *(u64 *)p : *(u32 *)p;
2663 static void bcm_enetsw_get_ringparam(struct net_device *dev,
2664 struct ethtool_ringparam *ering)
2666 struct bcm_enet_priv *priv;
2668 priv = netdev_priv(dev);
2670 /* rx/tx ring is actually only limited by memory */
2671 ering->rx_max_pending = 8192;
2672 ering->tx_max_pending = 8192;
2673 ering->rx_mini_max_pending = 0;
2674 ering->rx_jumbo_max_pending = 0;
2675 ering->rx_pending = priv->rx_ring_size;
2676 ering->tx_pending = priv->tx_ring_size;
2679 static int bcm_enetsw_set_ringparam(struct net_device *dev,
2680 struct ethtool_ringparam *ering)
2682 struct bcm_enet_priv *priv;
2685 priv = netdev_priv(dev);
2688 if (netif_running(dev)) {
2689 bcm_enetsw_stop(dev);
2693 priv->rx_ring_size = ering->rx_pending;
2694 priv->tx_ring_size = ering->tx_pending;
2699 err = bcm_enetsw_open(dev);
2706 static struct ethtool_ops bcm_enetsw_ethtool_ops = {
2707 .get_strings = bcm_enetsw_get_strings,
2708 .get_sset_count = bcm_enetsw_get_sset_count,
2709 .get_ethtool_stats = bcm_enetsw_get_ethtool_stats,
2710 .get_drvinfo = bcm_enetsw_get_drvinfo,
2711 .get_ringparam = bcm_enetsw_get_ringparam,
2712 .set_ringparam = bcm_enetsw_set_ringparam,
2715 /* allocate netdevice, request register memory and register device. */
2716 static int bcm_enetsw_probe(struct platform_device *pdev)
2718 struct bcm_enet_priv *priv;
2719 struct net_device *dev;
2720 struct bcm63xx_enetsw_platform_data *pd;
2721 struct resource *res_mem;
2722 int ret, irq_rx, irq_tx;
2724 /* stop if shared driver failed, assume driver->probe will be
2725 * called in the same order we register devices (correct ?)
2727 if (!bcm_enet_shared_base[0])
2730 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2731 irq_rx = platform_get_irq(pdev, 0);
2732 irq_tx = platform_get_irq(pdev, 1);
2733 if (!res_mem || irq_rx < 0)
2737 dev = alloc_etherdev(sizeof(*priv));
2740 priv = netdev_priv(dev);
2741 memset(priv, 0, sizeof(*priv));
2743 /* initialize default and fetch platform data */
2744 priv->enet_is_sw = true;
2745 priv->irq_rx = irq_rx;
2746 priv->irq_tx = irq_tx;
2747 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2748 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2749 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2751 pd = dev_get_platdata(&pdev->dev);
2753 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2754 memcpy(priv->used_ports, pd->used_ports,
2755 sizeof(pd->used_ports));
2756 priv->num_ports = pd->num_ports;
2757 priv->dma_has_sram = pd->dma_has_sram;
2758 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2759 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2760 priv->dma_chan_width = pd->dma_chan_width;
2763 ret = compute_hw_mtu(priv, dev->mtu);
2767 if (!request_mem_region(res_mem->start, resource_size(res_mem),
2768 "bcm63xx_enetsw")) {
2773 priv->base = ioremap(res_mem->start, resource_size(res_mem));
2774 if (priv->base == NULL) {
2776 goto out_release_mem;
2779 priv->mac_clk = clk_get(&pdev->dev, "enetsw");
2780 if (IS_ERR(priv->mac_clk)) {
2781 ret = PTR_ERR(priv->mac_clk);
2784 ret = clk_prepare_enable(priv->mac_clk);
2790 spin_lock_init(&priv->rx_lock);
2792 /* init rx timeout (used for oom) */
2793 init_timer(&priv->rx_timeout);
2794 priv->rx_timeout.function = bcm_enet_refill_rx_timer;
2795 priv->rx_timeout.data = (unsigned long)dev;
2797 /* register netdevice */
2798 dev->netdev_ops = &bcm_enetsw_ops;
2799 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2800 dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2801 SET_NETDEV_DEV(dev, &pdev->dev);
2803 spin_lock_init(&priv->enetsw_mdio_lock);
2805 ret = register_netdev(dev);
2807 goto out_disable_clk;
2809 netif_carrier_off(dev);
2810 platform_set_drvdata(pdev, dev);
2812 priv->net_dev = dev;
2817 clk_disable_unprepare(priv->mac_clk);
2820 clk_put(priv->mac_clk);
2823 iounmap(priv->base);
2826 release_mem_region(res_mem->start, resource_size(res_mem));
2833 /* exit func, stops hardware and unregisters netdevice */
2834 static int bcm_enetsw_remove(struct platform_device *pdev)
2836 struct bcm_enet_priv *priv;
2837 struct net_device *dev;
2838 struct resource *res;
2840 /* stop netdevice */
2841 dev = platform_get_drvdata(pdev);
2842 priv = netdev_priv(dev);
2843 unregister_netdev(dev);
2845 /* release device resources */
2846 iounmap(priv->base);
2847 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2848 release_mem_region(res->start, resource_size(res));
2850 clk_disable_unprepare(priv->mac_clk);
2851 clk_put(priv->mac_clk);
2857 struct platform_driver bcm63xx_enetsw_driver = {
2858 .probe = bcm_enetsw_probe,
2859 .remove = bcm_enetsw_remove,
2861 .name = "bcm63xx_enetsw",
2862 .owner = THIS_MODULE,
2866 /* reserve & remap memory space shared between all macs */
2867 static int bcm_enet_shared_probe(struct platform_device *pdev)
2869 struct resource *res;
2873 memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2875 for (i = 0; i < 3; i++) {
2876 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
2877 p[i] = devm_ioremap_resource(&pdev->dev, res);
2879 return PTR_ERR(p[i]);
2882 memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2887 static int bcm_enet_shared_remove(struct platform_device *pdev)
2892 /* this "shared" driver is needed because both macs share a single
2895 struct platform_driver bcm63xx_enet_shared_driver = {
2896 .probe = bcm_enet_shared_probe,
2897 .remove = bcm_enet_shared_remove,
2899 .name = "bcm63xx_enet_shared",
2900 .owner = THIS_MODULE,
2905 static int __init bcm_enet_init(void)
2909 ret = platform_driver_register(&bcm63xx_enet_shared_driver);
2913 ret = platform_driver_register(&bcm63xx_enet_driver);
2915 platform_driver_unregister(&bcm63xx_enet_shared_driver);
2917 ret = platform_driver_register(&bcm63xx_enetsw_driver);
2919 platform_driver_unregister(&bcm63xx_enet_driver);
2920 platform_driver_unregister(&bcm63xx_enet_shared_driver);
2926 static void __exit bcm_enet_exit(void)
2928 platform_driver_unregister(&bcm63xx_enet_driver);
2929 platform_driver_unregister(&bcm63xx_enetsw_driver);
2930 platform_driver_unregister(&bcm63xx_enet_shared_driver);
2934 module_init(bcm_enet_init);
2935 module_exit(bcm_enet_exit);
2937 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2938 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2939 MODULE_LICENSE("GPL");