2 * Driver for BCM963xx builtin Ethernet mac
4 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/clk.h>
24 #include <linux/etherdevice.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/ethtool.h>
28 #include <linux/crc32.h>
29 #include <linux/err.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/platform_device.h>
32 #include <linux/if_vlan.h>
34 #include <bcm63xx_dev_enet.h>
35 #include "bcm63xx_enet.h"
37 static char bcm_enet_driver_name[] = "bcm63xx_enet";
38 static char bcm_enet_driver_version[] = "1.0";
40 static int copybreak __read_mostly = 128;
41 module_param(copybreak, int, 0);
42 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
44 /* io registers memory shared between all devices */
45 static void __iomem *bcm_enet_shared_base[3];
48 * io helpers to access mac registers
50 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
52 return bcm_readl(priv->base + off);
55 static inline void enet_writel(struct bcm_enet_priv *priv,
58 bcm_writel(val, priv->base + off);
62 * io helpers to access switch registers
64 static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
66 return bcm_readl(priv->base + off);
69 static inline void enetsw_writel(struct bcm_enet_priv *priv,
72 bcm_writel(val, priv->base + off);
75 static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
77 return bcm_readw(priv->base + off);
80 static inline void enetsw_writew(struct bcm_enet_priv *priv,
83 bcm_writew(val, priv->base + off);
86 static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
88 return bcm_readb(priv->base + off);
91 static inline void enetsw_writeb(struct bcm_enet_priv *priv,
94 bcm_writeb(val, priv->base + off);
98 /* io helpers to access shared registers */
99 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
101 return bcm_readl(bcm_enet_shared_base[0] + off);
104 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
107 bcm_writel(val, bcm_enet_shared_base[0] + off);
110 static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
112 return bcm_readl(bcm_enet_shared_base[1] +
113 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
116 static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
117 u32 val, u32 off, int chan)
119 bcm_writel(val, bcm_enet_shared_base[1] +
120 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
123 static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
125 return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
128 static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
129 u32 val, u32 off, int chan)
131 bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
135 * write given data into mii register and wait for transfer to end
136 * with timeout (average measured transfer time is 25us)
138 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
142 /* make sure mii interrupt status is cleared */
143 enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
145 enet_writel(priv, data, ENET_MIIDATA_REG);
148 /* busy wait on mii interrupt bit, with timeout */
151 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
154 } while (limit-- > 0);
156 return (limit < 0) ? 1 : 0;
160 * MII internal read callback
162 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
167 tmp = regnum << ENET_MIIDATA_REG_SHIFT;
168 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
169 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
170 tmp |= ENET_MIIDATA_OP_READ_MASK;
172 if (do_mdio_op(priv, tmp))
175 val = enet_readl(priv, ENET_MIIDATA_REG);
181 * MII internal write callback
183 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
184 int regnum, u16 value)
188 tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
189 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
190 tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
191 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
192 tmp |= ENET_MIIDATA_OP_WRITE_MASK;
194 (void)do_mdio_op(priv, tmp);
199 * MII read callback from phylib
201 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
204 return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
208 * MII write callback from phylib
210 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
211 int regnum, u16 value)
213 return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
217 * MII read callback from mii core
219 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
222 return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
226 * MII write callback from mii core
228 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
229 int regnum, int value)
231 bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
237 static int bcm_enet_refill_rx(struct net_device *dev)
239 struct bcm_enet_priv *priv;
241 priv = netdev_priv(dev);
243 while (priv->rx_desc_count < priv->rx_ring_size) {
244 struct bcm_enet_desc *desc;
250 desc_idx = priv->rx_dirty_desc;
251 desc = &priv->rx_desc_cpu[desc_idx];
253 if (!priv->rx_skb[desc_idx]) {
254 skb = netdev_alloc_skb(dev, priv->rx_skb_size);
257 priv->rx_skb[desc_idx] = skb;
258 p = dma_map_single(&priv->pdev->dev, skb->data,
264 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
265 len_stat |= DMADESC_OWNER_MASK;
266 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
267 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
268 priv->rx_dirty_desc = 0;
270 priv->rx_dirty_desc++;
273 desc->len_stat = len_stat;
275 priv->rx_desc_count++;
277 /* tell dma engine we allocated one buffer */
278 if (priv->dma_has_sram)
279 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
281 enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
284 /* If rx ring is still empty, set a timer to try allocating
285 * again at a later time. */
286 if (priv->rx_desc_count == 0 && netif_running(dev)) {
287 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
288 priv->rx_timeout.expires = jiffies + HZ;
289 add_timer(&priv->rx_timeout);
296 * timer callback to defer refill rx queue in case we're OOM
298 static void bcm_enet_refill_rx_timer(struct timer_list *t)
300 struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
301 struct net_device *dev = priv->net_dev;
303 spin_lock(&priv->rx_lock);
304 bcm_enet_refill_rx(dev);
305 spin_unlock(&priv->rx_lock);
309 * extract packet from rx queue
311 static int bcm_enet_receive_queue(struct net_device *dev, int budget)
313 struct bcm_enet_priv *priv;
317 priv = netdev_priv(dev);
318 kdev = &priv->pdev->dev;
321 /* don't scan ring further than number of refilled
323 if (budget > priv->rx_desc_count)
324 budget = priv->rx_desc_count;
327 struct bcm_enet_desc *desc;
333 desc_idx = priv->rx_curr_desc;
334 desc = &priv->rx_desc_cpu[desc_idx];
336 /* make sure we actually read the descriptor status at
340 len_stat = desc->len_stat;
342 /* break if dma ownership belongs to hw */
343 if (len_stat & DMADESC_OWNER_MASK)
347 priv->rx_curr_desc++;
348 if (priv->rx_curr_desc == priv->rx_ring_size)
349 priv->rx_curr_desc = 0;
350 priv->rx_desc_count--;
352 /* if the packet does not have start of packet _and_
353 * end of packet flag set, then just recycle it */
354 if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
355 (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
356 dev->stats.rx_dropped++;
360 /* recycle packet if it's marked as bad */
361 if (!priv->enet_is_sw &&
362 unlikely(len_stat & DMADESC_ERR_MASK)) {
363 dev->stats.rx_errors++;
365 if (len_stat & DMADESC_OVSIZE_MASK)
366 dev->stats.rx_length_errors++;
367 if (len_stat & DMADESC_CRC_MASK)
368 dev->stats.rx_crc_errors++;
369 if (len_stat & DMADESC_UNDER_MASK)
370 dev->stats.rx_frame_errors++;
371 if (len_stat & DMADESC_OV_MASK)
372 dev->stats.rx_fifo_errors++;
377 skb = priv->rx_skb[desc_idx];
378 len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
379 /* don't include FCS */
382 if (len < copybreak) {
383 struct sk_buff *nskb;
385 nskb = napi_alloc_skb(&priv->napi, len);
387 /* forget packet, just rearm desc */
388 dev->stats.rx_dropped++;
392 dma_sync_single_for_cpu(kdev, desc->address,
393 len, DMA_FROM_DEVICE);
394 memcpy(nskb->data, skb->data, len);
395 dma_sync_single_for_device(kdev, desc->address,
396 len, DMA_FROM_DEVICE);
399 dma_unmap_single(&priv->pdev->dev, desc->address,
400 priv->rx_skb_size, DMA_FROM_DEVICE);
401 priv->rx_skb[desc_idx] = NULL;
405 skb->protocol = eth_type_trans(skb, dev);
406 dev->stats.rx_packets++;
407 dev->stats.rx_bytes += len;
408 netif_receive_skb(skb);
410 } while (--budget > 0);
412 if (processed || !priv->rx_desc_count) {
413 bcm_enet_refill_rx(dev);
416 enet_dmac_writel(priv, priv->dma_chan_en_mask,
417 ENETDMAC_CHANCFG, priv->rx_chan);
425 * try to or force reclaim of transmitted buffers
427 static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
429 struct bcm_enet_priv *priv;
432 priv = netdev_priv(dev);
435 while (priv->tx_desc_count < priv->tx_ring_size) {
436 struct bcm_enet_desc *desc;
439 /* We run in a bh and fight against start_xmit, which
440 * is called with bh disabled */
441 spin_lock(&priv->tx_lock);
443 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
445 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
446 spin_unlock(&priv->tx_lock);
450 /* ensure other field of the descriptor were not read
451 * before we checked ownership */
454 skb = priv->tx_skb[priv->tx_dirty_desc];
455 priv->tx_skb[priv->tx_dirty_desc] = NULL;
456 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
459 priv->tx_dirty_desc++;
460 if (priv->tx_dirty_desc == priv->tx_ring_size)
461 priv->tx_dirty_desc = 0;
462 priv->tx_desc_count++;
464 spin_unlock(&priv->tx_lock);
466 if (desc->len_stat & DMADESC_UNDER_MASK)
467 dev->stats.tx_errors++;
473 if (netif_queue_stopped(dev) && released)
474 netif_wake_queue(dev);
480 * poll func, called by network core
482 static int bcm_enet_poll(struct napi_struct *napi, int budget)
484 struct bcm_enet_priv *priv;
485 struct net_device *dev;
488 priv = container_of(napi, struct bcm_enet_priv, napi);
492 enet_dmac_writel(priv, priv->dma_chan_int_mask,
493 ENETDMAC_IR, priv->rx_chan);
494 enet_dmac_writel(priv, priv->dma_chan_int_mask,
495 ENETDMAC_IR, priv->tx_chan);
497 /* reclaim sent skb */
498 bcm_enet_tx_reclaim(dev, 0);
500 spin_lock(&priv->rx_lock);
501 rx_work_done = bcm_enet_receive_queue(dev, budget);
502 spin_unlock(&priv->rx_lock);
504 if (rx_work_done >= budget) {
505 /* rx queue is not yet empty/clean */
509 /* no more packet in rx/tx queue, remove device from poll
511 napi_complete_done(napi, rx_work_done);
513 /* restore rx/tx interrupt */
514 enet_dmac_writel(priv, priv->dma_chan_int_mask,
515 ENETDMAC_IRMASK, priv->rx_chan);
516 enet_dmac_writel(priv, priv->dma_chan_int_mask,
517 ENETDMAC_IRMASK, priv->tx_chan);
523 * mac interrupt handler
525 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
527 struct net_device *dev;
528 struct bcm_enet_priv *priv;
532 priv = netdev_priv(dev);
534 stat = enet_readl(priv, ENET_IR_REG);
535 if (!(stat & ENET_IR_MIB))
538 /* clear & mask interrupt */
539 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
540 enet_writel(priv, 0, ENET_IRMASK_REG);
542 /* read mib registers in workqueue */
543 schedule_work(&priv->mib_update_task);
549 * rx/tx dma interrupt handler
551 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
553 struct net_device *dev;
554 struct bcm_enet_priv *priv;
557 priv = netdev_priv(dev);
559 /* mask rx/tx interrupts */
560 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
561 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
563 napi_schedule(&priv->napi);
569 * tx request callback
572 bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
574 struct bcm_enet_priv *priv;
575 struct bcm_enet_desc *desc;
579 priv = netdev_priv(dev);
581 /* lock against tx reclaim */
582 spin_lock(&priv->tx_lock);
584 /* make sure the tx hw queue is not full, should not happen
585 * since we stop queue before it's the case */
586 if (unlikely(!priv->tx_desc_count)) {
587 netif_stop_queue(dev);
588 dev_err(&priv->pdev->dev, "xmit called with no tx desc "
590 ret = NETDEV_TX_BUSY;
594 /* pad small packets sent on a switch device */
595 if (priv->enet_is_sw && skb->len < 64) {
596 int needed = 64 - skb->len;
599 if (unlikely(skb_tailroom(skb) < needed)) {
600 struct sk_buff *nskb;
602 nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
604 ret = NETDEV_TX_BUSY;
610 data = skb_put_zero(skb, needed);
613 /* point to the next available desc */
614 desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
615 priv->tx_skb[priv->tx_curr_desc] = skb;
617 /* fill descriptor */
618 desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
621 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
622 len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
626 priv->tx_curr_desc++;
627 if (priv->tx_curr_desc == priv->tx_ring_size) {
628 priv->tx_curr_desc = 0;
629 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
631 priv->tx_desc_count--;
633 /* dma might be already polling, make sure we update desc
634 * fields in correct order */
636 desc->len_stat = len_stat;
640 enet_dmac_writel(priv, priv->dma_chan_en_mask,
641 ENETDMAC_CHANCFG, priv->tx_chan);
643 /* stop queue if no more desc available */
644 if (!priv->tx_desc_count)
645 netif_stop_queue(dev);
647 dev->stats.tx_bytes += skb->len;
648 dev->stats.tx_packets++;
652 spin_unlock(&priv->tx_lock);
657 * Change the interface's mac address.
659 static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
661 struct bcm_enet_priv *priv;
662 struct sockaddr *addr = p;
665 priv = netdev_priv(dev);
666 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
668 /* use perfect match register 0 to store my mac address */
669 val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
670 (dev->dev_addr[4] << 8) | dev->dev_addr[5];
671 enet_writel(priv, val, ENET_PML_REG(0));
673 val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
674 val |= ENET_PMH_DATAVALID_MASK;
675 enet_writel(priv, val, ENET_PMH_REG(0));
681 * Change rx mode (promiscuous/allmulti) and update multicast list
683 static void bcm_enet_set_multicast_list(struct net_device *dev)
685 struct bcm_enet_priv *priv;
686 struct netdev_hw_addr *ha;
690 priv = netdev_priv(dev);
692 val = enet_readl(priv, ENET_RXCFG_REG);
694 if (dev->flags & IFF_PROMISC)
695 val |= ENET_RXCFG_PROMISC_MASK;
697 val &= ~ENET_RXCFG_PROMISC_MASK;
699 /* only 3 perfect match registers left, first one is used for
701 if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
702 val |= ENET_RXCFG_ALLMCAST_MASK;
704 val &= ~ENET_RXCFG_ALLMCAST_MASK;
706 /* no need to set perfect match registers if we catch all
708 if (val & ENET_RXCFG_ALLMCAST_MASK) {
709 enet_writel(priv, val, ENET_RXCFG_REG);
714 netdev_for_each_mc_addr(ha, dev) {
720 /* update perfect match registers */
722 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
723 (dmi_addr[4] << 8) | dmi_addr[5];
724 enet_writel(priv, tmp, ENET_PML_REG(i + 1));
726 tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
727 tmp |= ENET_PMH_DATAVALID_MASK;
728 enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
732 enet_writel(priv, 0, ENET_PML_REG(i + 1));
733 enet_writel(priv, 0, ENET_PMH_REG(i + 1));
736 enet_writel(priv, val, ENET_RXCFG_REG);
740 * set mac duplex parameters
742 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
746 val = enet_readl(priv, ENET_TXCTL_REG);
748 val |= ENET_TXCTL_FD_MASK;
750 val &= ~ENET_TXCTL_FD_MASK;
751 enet_writel(priv, val, ENET_TXCTL_REG);
755 * set mac flow control parameters
757 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
761 /* rx flow control (pause frame handling) */
762 val = enet_readl(priv, ENET_RXCFG_REG);
764 val |= ENET_RXCFG_ENFLOW_MASK;
766 val &= ~ENET_RXCFG_ENFLOW_MASK;
767 enet_writel(priv, val, ENET_RXCFG_REG);
769 if (!priv->dma_has_sram)
772 /* tx flow control (pause frame generation) */
773 val = enet_dma_readl(priv, ENETDMA_CFG_REG);
775 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
777 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
778 enet_dma_writel(priv, val, ENETDMA_CFG_REG);
782 * link changed callback (from phylib)
784 static void bcm_enet_adjust_phy_link(struct net_device *dev)
786 struct bcm_enet_priv *priv;
787 struct phy_device *phydev;
790 priv = netdev_priv(dev);
791 phydev = dev->phydev;
794 if (priv->old_link != phydev->link) {
796 priv->old_link = phydev->link;
799 /* reflect duplex change in mac configuration */
800 if (phydev->link && phydev->duplex != priv->old_duplex) {
801 bcm_enet_set_duplex(priv,
802 (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
804 priv->old_duplex = phydev->duplex;
807 /* enable flow control if remote advertise it (trust phylib to
808 * check that duplex is full */
809 if (phydev->link && phydev->pause != priv->old_pause) {
810 int rx_pause_en, tx_pause_en;
813 /* pause was advertised by lpa and us */
816 } else if (!priv->pause_auto) {
817 /* pause setting overridden by user */
818 rx_pause_en = priv->pause_rx;
819 tx_pause_en = priv->pause_tx;
825 bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
827 priv->old_pause = phydev->pause;
830 if (status_changed) {
831 pr_info("%s: link %s", dev->name, phydev->link ?
834 pr_cont(" - %d/%s - flow control %s", phydev->speed,
835 DUPLEX_FULL == phydev->duplex ? "full" : "half",
836 phydev->pause == 1 ? "rx&tx" : "off");
843 * link changed callback (if phylib is not used)
845 static void bcm_enet_adjust_link(struct net_device *dev)
847 struct bcm_enet_priv *priv;
849 priv = netdev_priv(dev);
850 bcm_enet_set_duplex(priv, priv->force_duplex_full);
851 bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
852 netif_carrier_on(dev);
854 pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
856 priv->force_speed_100 ? 100 : 10,
857 priv->force_duplex_full ? "full" : "half",
858 priv->pause_rx ? "rx" : "off",
859 priv->pause_tx ? "tx" : "off");
863 * open callback, allocate dma rings & buffers and start rx operation
865 static int bcm_enet_open(struct net_device *dev)
867 struct bcm_enet_priv *priv;
868 struct sockaddr addr;
870 struct phy_device *phydev;
873 char phy_id[MII_BUS_ID_SIZE + 3];
877 priv = netdev_priv(dev);
878 kdev = &priv->pdev->dev;
882 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
883 priv->mii_bus->id, priv->phy_id);
885 phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
886 PHY_INTERFACE_MODE_MII);
888 if (IS_ERR(phydev)) {
889 dev_err(kdev, "could not attach to PHY\n");
890 return PTR_ERR(phydev);
893 /* mask with MAC supported features */
894 phydev->supported &= (SUPPORTED_10baseT_Half |
895 SUPPORTED_10baseT_Full |
896 SUPPORTED_100baseT_Half |
897 SUPPORTED_100baseT_Full |
901 phydev->advertising = phydev->supported;
903 if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
904 phydev->advertising |= SUPPORTED_Pause;
906 phydev->advertising &= ~SUPPORTED_Pause;
908 phy_attached_info(phydev);
911 priv->old_duplex = -1;
912 priv->old_pause = -1;
917 /* mask all interrupts and request them */
918 enet_writel(priv, 0, ENET_IRMASK_REG);
919 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
920 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
922 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
924 goto out_phy_disconnect;
926 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
931 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
936 /* initialize perfect match registers */
937 for (i = 0; i < 4; i++) {
938 enet_writel(priv, 0, ENET_PML_REG(i));
939 enet_writel(priv, 0, ENET_PMH_REG(i));
942 /* write device mac address */
943 memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
944 bcm_enet_set_mac_address(dev, &addr);
946 /* allocate rx dma ring */
947 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
948 p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
954 priv->rx_desc_alloc_size = size;
955 priv->rx_desc_cpu = p;
957 /* allocate tx dma ring */
958 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
959 p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
962 goto out_free_rx_ring;
965 priv->tx_desc_alloc_size = size;
966 priv->tx_desc_cpu = p;
968 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
972 goto out_free_tx_ring;
975 priv->tx_desc_count = priv->tx_ring_size;
976 priv->tx_dirty_desc = 0;
977 priv->tx_curr_desc = 0;
978 spin_lock_init(&priv->tx_lock);
980 /* init & fill rx ring with skbs */
981 priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
985 goto out_free_tx_skb;
988 priv->rx_desc_count = 0;
989 priv->rx_dirty_desc = 0;
990 priv->rx_curr_desc = 0;
992 /* initialize flow control buffer allocation */
993 if (priv->dma_has_sram)
994 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
995 ENETDMA_BUFALLOC_REG(priv->rx_chan));
997 enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
998 ENETDMAC_BUFALLOC, priv->rx_chan);
1000 if (bcm_enet_refill_rx(dev)) {
1001 dev_err(kdev, "cannot allocate rx skb queue\n");
1006 /* write rx & tx ring addresses */
1007 if (priv->dma_has_sram) {
1008 enet_dmas_writel(priv, priv->rx_desc_dma,
1009 ENETDMAS_RSTART_REG, priv->rx_chan);
1010 enet_dmas_writel(priv, priv->tx_desc_dma,
1011 ENETDMAS_RSTART_REG, priv->tx_chan);
1013 enet_dmac_writel(priv, priv->rx_desc_dma,
1014 ENETDMAC_RSTART, priv->rx_chan);
1015 enet_dmac_writel(priv, priv->tx_desc_dma,
1016 ENETDMAC_RSTART, priv->tx_chan);
1019 /* clear remaining state ram for rx & tx channel */
1020 if (priv->dma_has_sram) {
1021 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
1022 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1023 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1024 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1025 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1026 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1028 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1029 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1032 /* set max rx/tx length */
1033 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1034 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1036 /* set dma maximum burst len */
1037 enet_dmac_writel(priv, priv->dma_maxburst,
1038 ENETDMAC_MAXBURST, priv->rx_chan);
1039 enet_dmac_writel(priv, priv->dma_maxburst,
1040 ENETDMAC_MAXBURST, priv->tx_chan);
1042 /* set correct transmit fifo watermark */
1043 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1045 /* set flow control low/high threshold to 1/3 / 2/3 */
1046 if (priv->dma_has_sram) {
1047 val = priv->rx_ring_size / 3;
1048 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1049 val = (priv->rx_ring_size * 2) / 3;
1050 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1052 enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1053 enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1054 enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1057 /* all set, enable mac and interrupts, start dma engine and
1058 * kick rx dma channel */
1060 val = enet_readl(priv, ENET_CTL_REG);
1061 val |= ENET_CTL_ENABLE_MASK;
1062 enet_writel(priv, val, ENET_CTL_REG);
1063 if (priv->dma_has_sram)
1064 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1065 enet_dmac_writel(priv, priv->dma_chan_en_mask,
1066 ENETDMAC_CHANCFG, priv->rx_chan);
1068 /* watch "mib counters about to overflow" interrupt */
1069 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1070 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1072 /* watch "packet transferred" interrupt in rx and tx */
1073 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1074 ENETDMAC_IR, priv->rx_chan);
1075 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1076 ENETDMAC_IR, priv->tx_chan);
1078 /* make sure we enable napi before rx interrupt */
1079 napi_enable(&priv->napi);
1081 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1082 ENETDMAC_IRMASK, priv->rx_chan);
1083 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1084 ENETDMAC_IRMASK, priv->tx_chan);
1089 bcm_enet_adjust_link(dev);
1091 netif_start_queue(dev);
1095 for (i = 0; i < priv->rx_ring_size; i++) {
1096 struct bcm_enet_desc *desc;
1098 if (!priv->rx_skb[i])
1101 desc = &priv->rx_desc_cpu[i];
1102 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1104 kfree_skb(priv->rx_skb[i]);
1106 kfree(priv->rx_skb);
1109 kfree(priv->tx_skb);
1112 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1113 priv->tx_desc_cpu, priv->tx_desc_dma);
1116 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1117 priv->rx_desc_cpu, priv->rx_desc_dma);
1120 free_irq(priv->irq_tx, dev);
1123 free_irq(priv->irq_rx, dev);
1126 free_irq(dev->irq, dev);
1130 phy_disconnect(phydev);
1138 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1143 val = enet_readl(priv, ENET_CTL_REG);
1144 val |= ENET_CTL_DISABLE_MASK;
1145 enet_writel(priv, val, ENET_CTL_REG);
1151 val = enet_readl(priv, ENET_CTL_REG);
1152 if (!(val & ENET_CTL_DISABLE_MASK))
1159 * disable dma in given channel
1161 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1165 enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1171 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1172 if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1181 static int bcm_enet_stop(struct net_device *dev)
1183 struct bcm_enet_priv *priv;
1184 struct device *kdev;
1187 priv = netdev_priv(dev);
1188 kdev = &priv->pdev->dev;
1190 netif_stop_queue(dev);
1191 napi_disable(&priv->napi);
1193 phy_stop(dev->phydev);
1194 del_timer_sync(&priv->rx_timeout);
1196 /* mask all interrupts */
1197 enet_writel(priv, 0, ENET_IRMASK_REG);
1198 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1199 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1201 /* make sure no mib update is scheduled */
1202 cancel_work_sync(&priv->mib_update_task);
1204 /* disable dma & mac */
1205 bcm_enet_disable_dma(priv, priv->tx_chan);
1206 bcm_enet_disable_dma(priv, priv->rx_chan);
1207 bcm_enet_disable_mac(priv);
1209 /* force reclaim of all tx buffers */
1210 bcm_enet_tx_reclaim(dev, 1);
1212 /* free the rx skb ring */
1213 for (i = 0; i < priv->rx_ring_size; i++) {
1214 struct bcm_enet_desc *desc;
1216 if (!priv->rx_skb[i])
1219 desc = &priv->rx_desc_cpu[i];
1220 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1222 kfree_skb(priv->rx_skb[i]);
1225 /* free remaining allocated memory */
1226 kfree(priv->rx_skb);
1227 kfree(priv->tx_skb);
1228 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1229 priv->rx_desc_cpu, priv->rx_desc_dma);
1230 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1231 priv->tx_desc_cpu, priv->tx_desc_dma);
1232 free_irq(priv->irq_tx, dev);
1233 free_irq(priv->irq_rx, dev);
1234 free_irq(dev->irq, dev);
1238 phy_disconnect(dev->phydev);
1246 struct bcm_enet_stats {
1247 char stat_string[ETH_GSTRING_LEN];
1253 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
1254 offsetof(struct bcm_enet_priv, m)
1255 #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
1256 offsetof(struct net_device_stats, m)
1258 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1259 { "rx_packets", DEV_STAT(rx_packets), -1 },
1260 { "tx_packets", DEV_STAT(tx_packets), -1 },
1261 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1262 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1263 { "rx_errors", DEV_STAT(rx_errors), -1 },
1264 { "tx_errors", DEV_STAT(tx_errors), -1 },
1265 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1266 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1268 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1269 { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1270 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1271 { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1272 { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1273 { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1274 { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1275 { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1276 { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1277 { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1278 { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1279 { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1280 { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1281 { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1282 { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1283 { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1284 { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1285 { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1286 { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1287 { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1288 { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1290 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1291 { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1292 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1293 { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1294 { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1295 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1296 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1297 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1298 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1299 { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1300 { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1301 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1302 { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1303 { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1304 { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1305 { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1306 { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1307 { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1308 { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1309 { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1310 { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1311 { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1315 #define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats)
1317 static const u32 unused_mib_regs[] = {
1318 ETH_MIB_TX_ALL_OCTETS,
1319 ETH_MIB_TX_ALL_PKTS,
1320 ETH_MIB_RX_ALL_OCTETS,
1321 ETH_MIB_RX_ALL_PKTS,
1325 static void bcm_enet_get_drvinfo(struct net_device *netdev,
1326 struct ethtool_drvinfo *drvinfo)
1328 strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1329 strlcpy(drvinfo->version, bcm_enet_driver_version,
1330 sizeof(drvinfo->version));
1331 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
1332 strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1335 static int bcm_enet_get_sset_count(struct net_device *netdev,
1338 switch (string_set) {
1340 return BCM_ENET_STATS_LEN;
1346 static void bcm_enet_get_strings(struct net_device *netdev,
1347 u32 stringset, u8 *data)
1351 switch (stringset) {
1353 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1354 memcpy(data + i * ETH_GSTRING_LEN,
1355 bcm_enet_gstrings_stats[i].stat_string,
1362 static void update_mib_counters(struct bcm_enet_priv *priv)
1366 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1367 const struct bcm_enet_stats *s;
1371 s = &bcm_enet_gstrings_stats[i];
1372 if (s->mib_reg == -1)
1375 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1376 p = (char *)priv + s->stat_offset;
1378 if (s->sizeof_stat == sizeof(u64))
1384 /* also empty unused mib counters to make sure mib counter
1385 * overflow interrupt is cleared */
1386 for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1387 (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1390 static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1392 struct bcm_enet_priv *priv;
1394 priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1395 mutex_lock(&priv->mib_update_lock);
1396 update_mib_counters(priv);
1397 mutex_unlock(&priv->mib_update_lock);
1399 /* reenable mib interrupt */
1400 if (netif_running(priv->net_dev))
1401 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1404 static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1405 struct ethtool_stats *stats,
1408 struct bcm_enet_priv *priv;
1411 priv = netdev_priv(netdev);
1413 mutex_lock(&priv->mib_update_lock);
1414 update_mib_counters(priv);
1416 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1417 const struct bcm_enet_stats *s;
1420 s = &bcm_enet_gstrings_stats[i];
1421 if (s->mib_reg == -1)
1422 p = (char *)&netdev->stats;
1425 p += s->stat_offset;
1426 data[i] = (s->sizeof_stat == sizeof(u64)) ?
1427 *(u64 *)p : *(u32 *)p;
1429 mutex_unlock(&priv->mib_update_lock);
1432 static int bcm_enet_nway_reset(struct net_device *dev)
1434 struct bcm_enet_priv *priv;
1436 priv = netdev_priv(dev);
1438 return phy_ethtool_nway_reset(dev);
1443 static int bcm_enet_get_link_ksettings(struct net_device *dev,
1444 struct ethtool_link_ksettings *cmd)
1446 struct bcm_enet_priv *priv;
1447 u32 supported, advertising;
1449 priv = netdev_priv(dev);
1451 if (priv->has_phy) {
1455 phy_ethtool_ksettings_get(dev->phydev, cmd);
1459 cmd->base.autoneg = 0;
1460 cmd->base.speed = (priv->force_speed_100) ?
1461 SPEED_100 : SPEED_10;
1462 cmd->base.duplex = (priv->force_duplex_full) ?
1463 DUPLEX_FULL : DUPLEX_HALF;
1464 supported = ADVERTISED_10baseT_Half |
1465 ADVERTISED_10baseT_Full |
1466 ADVERTISED_100baseT_Half |
1467 ADVERTISED_100baseT_Full;
1469 ethtool_convert_legacy_u32_to_link_mode(
1470 cmd->link_modes.supported, supported);
1471 ethtool_convert_legacy_u32_to_link_mode(
1472 cmd->link_modes.advertising, advertising);
1473 cmd->base.port = PORT_MII;
1478 static int bcm_enet_set_link_ksettings(struct net_device *dev,
1479 const struct ethtool_link_ksettings *cmd)
1481 struct bcm_enet_priv *priv;
1483 priv = netdev_priv(dev);
1484 if (priv->has_phy) {
1487 return phy_ethtool_ksettings_set(dev->phydev, cmd);
1490 if (cmd->base.autoneg ||
1491 (cmd->base.speed != SPEED_100 &&
1492 cmd->base.speed != SPEED_10) ||
1493 cmd->base.port != PORT_MII)
1496 priv->force_speed_100 =
1497 (cmd->base.speed == SPEED_100) ? 1 : 0;
1498 priv->force_duplex_full =
1499 (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
1501 if (netif_running(dev))
1502 bcm_enet_adjust_link(dev);
1507 static void bcm_enet_get_ringparam(struct net_device *dev,
1508 struct ethtool_ringparam *ering)
1510 struct bcm_enet_priv *priv;
1512 priv = netdev_priv(dev);
1514 /* rx/tx ring is actually only limited by memory */
1515 ering->rx_max_pending = 8192;
1516 ering->tx_max_pending = 8192;
1517 ering->rx_pending = priv->rx_ring_size;
1518 ering->tx_pending = priv->tx_ring_size;
1521 static int bcm_enet_set_ringparam(struct net_device *dev,
1522 struct ethtool_ringparam *ering)
1524 struct bcm_enet_priv *priv;
1527 priv = netdev_priv(dev);
1530 if (netif_running(dev)) {
1535 priv->rx_ring_size = ering->rx_pending;
1536 priv->tx_ring_size = ering->tx_pending;
1541 err = bcm_enet_open(dev);
1545 bcm_enet_set_multicast_list(dev);
1550 static void bcm_enet_get_pauseparam(struct net_device *dev,
1551 struct ethtool_pauseparam *ecmd)
1553 struct bcm_enet_priv *priv;
1555 priv = netdev_priv(dev);
1556 ecmd->autoneg = priv->pause_auto;
1557 ecmd->rx_pause = priv->pause_rx;
1558 ecmd->tx_pause = priv->pause_tx;
1561 static int bcm_enet_set_pauseparam(struct net_device *dev,
1562 struct ethtool_pauseparam *ecmd)
1564 struct bcm_enet_priv *priv;
1566 priv = netdev_priv(dev);
1568 if (priv->has_phy) {
1569 if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1570 /* asymetric pause mode not supported,
1571 * actually possible but integrated PHY has RO
1576 /* no pause autoneg on direct mii connection */
1581 priv->pause_auto = ecmd->autoneg;
1582 priv->pause_rx = ecmd->rx_pause;
1583 priv->pause_tx = ecmd->tx_pause;
1588 static const struct ethtool_ops bcm_enet_ethtool_ops = {
1589 .get_strings = bcm_enet_get_strings,
1590 .get_sset_count = bcm_enet_get_sset_count,
1591 .get_ethtool_stats = bcm_enet_get_ethtool_stats,
1592 .nway_reset = bcm_enet_nway_reset,
1593 .get_drvinfo = bcm_enet_get_drvinfo,
1594 .get_link = ethtool_op_get_link,
1595 .get_ringparam = bcm_enet_get_ringparam,
1596 .set_ringparam = bcm_enet_set_ringparam,
1597 .get_pauseparam = bcm_enet_get_pauseparam,
1598 .set_pauseparam = bcm_enet_set_pauseparam,
1599 .get_link_ksettings = bcm_enet_get_link_ksettings,
1600 .set_link_ksettings = bcm_enet_set_link_ksettings,
1603 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1605 struct bcm_enet_priv *priv;
1607 priv = netdev_priv(dev);
1608 if (priv->has_phy) {
1611 return phy_mii_ioctl(dev->phydev, rq, cmd);
1613 struct mii_if_info mii;
1616 mii.mdio_read = bcm_enet_mdio_read_mii;
1617 mii.mdio_write = bcm_enet_mdio_write_mii;
1619 mii.phy_id_mask = 0x3f;
1620 mii.reg_num_mask = 0x1f;
1621 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1626 * adjust mtu, can't be called while device is running
1628 static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1630 struct bcm_enet_priv *priv = netdev_priv(dev);
1631 int actual_mtu = new_mtu;
1633 if (netif_running(dev))
1636 /* add ethernet header + vlan tag size */
1637 actual_mtu += VLAN_ETH_HLEN;
1640 * setup maximum size before we get overflow mark in
1641 * descriptor, note that this will not prevent reception of
1642 * big frames, they will be split into multiple buffers
1645 priv->hw_mtu = actual_mtu;
1648 * align rx buffer size to dma burst len, account FCS since
1651 priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1652 priv->dma_maxburst * 4);
1659 * preinit hardware to allow mii operation while device is down
1661 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1666 /* make sure mac is disabled */
1667 bcm_enet_disable_mac(priv);
1669 /* soft reset mac */
1670 val = ENET_CTL_SRESET_MASK;
1671 enet_writel(priv, val, ENET_CTL_REG);
1676 val = enet_readl(priv, ENET_CTL_REG);
1677 if (!(val & ENET_CTL_SRESET_MASK))
1682 /* select correct mii interface */
1683 val = enet_readl(priv, ENET_CTL_REG);
1684 if (priv->use_external_mii)
1685 val |= ENET_CTL_EPHYSEL_MASK;
1687 val &= ~ENET_CTL_EPHYSEL_MASK;
1688 enet_writel(priv, val, ENET_CTL_REG);
1690 /* turn on mdc clock */
1691 enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1692 ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1694 /* set mib counters to self-clear when read */
1695 val = enet_readl(priv, ENET_MIBCTL_REG);
1696 val |= ENET_MIBCTL_RDCLEAR_MASK;
1697 enet_writel(priv, val, ENET_MIBCTL_REG);
1700 static const struct net_device_ops bcm_enet_ops = {
1701 .ndo_open = bcm_enet_open,
1702 .ndo_stop = bcm_enet_stop,
1703 .ndo_start_xmit = bcm_enet_start_xmit,
1704 .ndo_set_mac_address = bcm_enet_set_mac_address,
1705 .ndo_set_rx_mode = bcm_enet_set_multicast_list,
1706 .ndo_do_ioctl = bcm_enet_ioctl,
1707 .ndo_change_mtu = bcm_enet_change_mtu,
1711 * allocate netdevice, request register memory and register device.
1713 static int bcm_enet_probe(struct platform_device *pdev)
1715 struct bcm_enet_priv *priv;
1716 struct net_device *dev;
1717 struct bcm63xx_enet_platform_data *pd;
1718 struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1719 struct mii_bus *bus;
1722 if (!bcm_enet_shared_base[0])
1723 return -EPROBE_DEFER;
1725 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1726 res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1727 res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1728 if (!res_irq || !res_irq_rx || !res_irq_tx)
1732 dev = alloc_etherdev(sizeof(*priv));
1735 priv = netdev_priv(dev);
1737 priv->enet_is_sw = false;
1738 priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1740 ret = bcm_enet_change_mtu(dev, dev->mtu);
1744 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1745 priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
1746 if (IS_ERR(priv->base)) {
1747 ret = PTR_ERR(priv->base);
1751 dev->irq = priv->irq = res_irq->start;
1752 priv->irq_rx = res_irq_rx->start;
1753 priv->irq_tx = res_irq_tx->start;
1755 priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
1756 if (IS_ERR(priv->mac_clk)) {
1757 ret = PTR_ERR(priv->mac_clk);
1760 ret = clk_prepare_enable(priv->mac_clk);
1764 /* initialize default and fetch platform data */
1765 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1766 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1768 pd = dev_get_platdata(&pdev->dev);
1770 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1771 priv->has_phy = pd->has_phy;
1772 priv->phy_id = pd->phy_id;
1773 priv->has_phy_interrupt = pd->has_phy_interrupt;
1774 priv->phy_interrupt = pd->phy_interrupt;
1775 priv->use_external_mii = !pd->use_internal_phy;
1776 priv->pause_auto = pd->pause_auto;
1777 priv->pause_rx = pd->pause_rx;
1778 priv->pause_tx = pd->pause_tx;
1779 priv->force_duplex_full = pd->force_duplex_full;
1780 priv->force_speed_100 = pd->force_speed_100;
1781 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1782 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1783 priv->dma_chan_width = pd->dma_chan_width;
1784 priv->dma_has_sram = pd->dma_has_sram;
1785 priv->dma_desc_shift = pd->dma_desc_shift;
1786 priv->rx_chan = pd->rx_chan;
1787 priv->tx_chan = pd->tx_chan;
1790 if (priv->has_phy && !priv->use_external_mii) {
1791 /* using internal PHY, enable clock */
1792 priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
1793 if (IS_ERR(priv->phy_clk)) {
1794 ret = PTR_ERR(priv->phy_clk);
1795 priv->phy_clk = NULL;
1796 goto out_disable_clk_mac;
1798 ret = clk_prepare_enable(priv->phy_clk);
1800 goto out_disable_clk_mac;
1803 /* do minimal hardware init to be able to probe mii bus */
1804 bcm_enet_hw_preinit(priv);
1806 /* MII bus registration */
1807 if (priv->has_phy) {
1809 priv->mii_bus = mdiobus_alloc();
1810 if (!priv->mii_bus) {
1815 bus = priv->mii_bus;
1816 bus->name = "bcm63xx_enet MII bus";
1817 bus->parent = &pdev->dev;
1819 bus->read = bcm_enet_mdio_read_phylib;
1820 bus->write = bcm_enet_mdio_write_phylib;
1821 sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
1823 /* only probe bus where we think the PHY is, because
1824 * the mdio read operation return 0 instead of 0xffff
1825 * if a slave is not present on hw */
1826 bus->phy_mask = ~(1 << priv->phy_id);
1828 if (priv->has_phy_interrupt)
1829 bus->irq[priv->phy_id] = priv->phy_interrupt;
1831 ret = mdiobus_register(bus);
1833 dev_err(&pdev->dev, "unable to register mdio bus\n");
1838 /* run platform code to initialize PHY device */
1839 if (pd && pd->mii_config &&
1840 pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1841 bcm_enet_mdio_write_mii)) {
1842 dev_err(&pdev->dev, "unable to configure mdio bus\n");
1847 spin_lock_init(&priv->rx_lock);
1849 /* init rx timeout (used for oom) */
1850 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
1852 /* init the mib update lock&work */
1853 mutex_init(&priv->mib_update_lock);
1854 INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1856 /* zero mib counters */
1857 for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1858 enet_writel(priv, 0, ENET_MIB_REG(i));
1860 /* register netdevice */
1861 dev->netdev_ops = &bcm_enet_ops;
1862 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1864 dev->ethtool_ops = &bcm_enet_ethtool_ops;
1865 /* MTU range: 46 - 2028 */
1866 dev->min_mtu = ETH_ZLEN - ETH_HLEN;
1867 dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN;
1868 SET_NETDEV_DEV(dev, &pdev->dev);
1870 ret = register_netdev(dev);
1872 goto out_unregister_mdio;
1874 netif_carrier_off(dev);
1875 platform_set_drvdata(pdev, dev);
1877 priv->net_dev = dev;
1881 out_unregister_mdio:
1883 mdiobus_unregister(priv->mii_bus);
1887 mdiobus_free(priv->mii_bus);
1890 /* turn off mdc clock */
1891 enet_writel(priv, 0, ENET_MIISC_REG);
1892 clk_disable_unprepare(priv->phy_clk);
1894 out_disable_clk_mac:
1895 clk_disable_unprepare(priv->mac_clk);
1903 * exit func, stops hardware and unregisters netdevice
1905 static int bcm_enet_remove(struct platform_device *pdev)
1907 struct bcm_enet_priv *priv;
1908 struct net_device *dev;
1910 /* stop netdevice */
1911 dev = platform_get_drvdata(pdev);
1912 priv = netdev_priv(dev);
1913 unregister_netdev(dev);
1915 /* turn off mdc clock */
1916 enet_writel(priv, 0, ENET_MIISC_REG);
1918 if (priv->has_phy) {
1919 mdiobus_unregister(priv->mii_bus);
1920 mdiobus_free(priv->mii_bus);
1922 struct bcm63xx_enet_platform_data *pd;
1924 pd = dev_get_platdata(&pdev->dev);
1925 if (pd && pd->mii_config)
1926 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1927 bcm_enet_mdio_write_mii);
1930 /* disable hw block clocks */
1931 clk_disable_unprepare(priv->phy_clk);
1932 clk_disable_unprepare(priv->mac_clk);
1938 struct platform_driver bcm63xx_enet_driver = {
1939 .probe = bcm_enet_probe,
1940 .remove = bcm_enet_remove,
1942 .name = "bcm63xx_enet",
1943 .owner = THIS_MODULE,
1948 * switch mii access callbacks
1950 static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1951 int ext, int phy_id, int location)
1956 spin_lock_bh(&priv->enetsw_mdio_lock);
1957 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1959 reg = ENETSW_MDIOC_RD_MASK |
1960 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1961 (location << ENETSW_MDIOC_REG_SHIFT);
1964 reg |= ENETSW_MDIOC_EXT_MASK;
1966 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1968 ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1969 spin_unlock_bh(&priv->enetsw_mdio_lock);
1973 static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1974 int ext, int phy_id, int location,
1979 spin_lock_bh(&priv->enetsw_mdio_lock);
1980 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1982 reg = ENETSW_MDIOC_WR_MASK |
1983 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1984 (location << ENETSW_MDIOC_REG_SHIFT);
1987 reg |= ENETSW_MDIOC_EXT_MASK;
1991 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1993 spin_unlock_bh(&priv->enetsw_mdio_lock);
1996 static inline int bcm_enet_port_is_rgmii(int portid)
1998 return portid >= ENETSW_RGMII_PORT0;
2002 * enet sw PHY polling
2004 static void swphy_poll_timer(struct timer_list *t)
2006 struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
2009 for (i = 0; i < priv->num_ports; i++) {
2010 struct bcm63xx_enetsw_port *port;
2011 int val, j, up, advertise, lpa, speed, duplex, media;
2012 int external_phy = bcm_enet_port_is_rgmii(i);
2015 port = &priv->used_ports[i];
2019 if (port->bypass_link)
2022 /* dummy read to clear */
2023 for (j = 0; j < 2; j++)
2024 val = bcmenet_sw_mdio_read(priv, external_phy,
2025 port->phy_id, MII_BMSR);
2030 up = (val & BMSR_LSTATUS) ? 1 : 0;
2031 if (!(up ^ priv->sw_port_link[i]))
2034 priv->sw_port_link[i] = up;
2038 dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2040 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2041 ENETSW_PORTOV_REG(i));
2042 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2043 ENETSW_PTCTRL_TXDIS_MASK,
2044 ENETSW_PTCTRL_REG(i));
2048 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2049 port->phy_id, MII_ADVERTISE);
2051 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2054 /* figure out media and duplex from advertise and LPA values */
2055 media = mii_nway_result(lpa & advertise);
2056 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2058 if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2063 if (val & BMSR_ESTATEN) {
2064 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2065 port->phy_id, MII_CTRL1000);
2067 lpa = bcmenet_sw_mdio_read(priv, external_phy,
2068 port->phy_id, MII_STAT1000);
2070 if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
2071 && lpa & (LPA_1000FULL | LPA_1000HALF)) {
2073 duplex = (lpa & LPA_1000FULL);
2077 dev_info(&priv->pdev->dev,
2078 "link UP on %s, %dMbps, %s-duplex\n",
2079 port->name, speed, duplex ? "full" : "half");
2081 override = ENETSW_PORTOV_ENABLE_MASK |
2082 ENETSW_PORTOV_LINKUP_MASK;
2085 override |= ENETSW_IMPOV_1000_MASK;
2086 else if (speed == 100)
2087 override |= ENETSW_IMPOV_100_MASK;
2089 override |= ENETSW_IMPOV_FDX_MASK;
2091 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2092 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2095 priv->swphy_poll.expires = jiffies + HZ;
2096 add_timer(&priv->swphy_poll);
2100 * open callback, allocate dma rings & buffers and start rx operation
2102 static int bcm_enetsw_open(struct net_device *dev)
2104 struct bcm_enet_priv *priv;
2105 struct device *kdev;
2111 priv = netdev_priv(dev);
2112 kdev = &priv->pdev->dev;
2114 /* mask all interrupts and request them */
2115 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2116 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2118 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2123 if (priv->irq_tx != -1) {
2124 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2127 goto out_freeirq_rx;
2130 /* allocate rx dma ring */
2131 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2132 p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2134 dev_err(kdev, "cannot allocate rx ring %u\n", size);
2136 goto out_freeirq_tx;
2139 priv->rx_desc_alloc_size = size;
2140 priv->rx_desc_cpu = p;
2142 /* allocate tx dma ring */
2143 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2144 p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2146 dev_err(kdev, "cannot allocate tx ring\n");
2148 goto out_free_rx_ring;
2151 priv->tx_desc_alloc_size = size;
2152 priv->tx_desc_cpu = p;
2154 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
2156 if (!priv->tx_skb) {
2157 dev_err(kdev, "cannot allocate rx skb queue\n");
2159 goto out_free_tx_ring;
2162 priv->tx_desc_count = priv->tx_ring_size;
2163 priv->tx_dirty_desc = 0;
2164 priv->tx_curr_desc = 0;
2165 spin_lock_init(&priv->tx_lock);
2167 /* init & fill rx ring with skbs */
2168 priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
2170 if (!priv->rx_skb) {
2171 dev_err(kdev, "cannot allocate rx skb queue\n");
2173 goto out_free_tx_skb;
2176 priv->rx_desc_count = 0;
2177 priv->rx_dirty_desc = 0;
2178 priv->rx_curr_desc = 0;
2180 /* disable all ports */
2181 for (i = 0; i < priv->num_ports; i++) {
2182 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2183 ENETSW_PORTOV_REG(i));
2184 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2185 ENETSW_PTCTRL_TXDIS_MASK,
2186 ENETSW_PTCTRL_REG(i));
2188 priv->sw_port_link[i] = 0;
2192 val = enetsw_readb(priv, ENETSW_GMCR_REG);
2193 val |= ENETSW_GMCR_RST_MIB_MASK;
2194 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2196 val &= ~ENETSW_GMCR_RST_MIB_MASK;
2197 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2200 /* force CPU port state */
2201 val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2202 val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2203 enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2205 /* enable switch forward engine */
2206 val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2207 val |= ENETSW_SWMODE_FWD_EN_MASK;
2208 enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2210 /* enable jumbo on all ports */
2211 enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2212 enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2214 /* initialize flow control buffer allocation */
2215 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2216 ENETDMA_BUFALLOC_REG(priv->rx_chan));
2218 if (bcm_enet_refill_rx(dev)) {
2219 dev_err(kdev, "cannot allocate rx skb queue\n");
2224 /* write rx & tx ring addresses */
2225 enet_dmas_writel(priv, priv->rx_desc_dma,
2226 ENETDMAS_RSTART_REG, priv->rx_chan);
2227 enet_dmas_writel(priv, priv->tx_desc_dma,
2228 ENETDMAS_RSTART_REG, priv->tx_chan);
2230 /* clear remaining state ram for rx & tx channel */
2231 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2232 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2233 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2234 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2235 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2236 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2238 /* set dma maximum burst len */
2239 enet_dmac_writel(priv, priv->dma_maxburst,
2240 ENETDMAC_MAXBURST, priv->rx_chan);
2241 enet_dmac_writel(priv, priv->dma_maxburst,
2242 ENETDMAC_MAXBURST, priv->tx_chan);
2244 /* set flow control low/high threshold to 1/3 / 2/3 */
2245 val = priv->rx_ring_size / 3;
2246 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2247 val = (priv->rx_ring_size * 2) / 3;
2248 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2250 /* all set, enable mac and interrupts, start dma engine and
2251 * kick rx dma channel
2254 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2255 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2256 ENETDMAC_CHANCFG, priv->rx_chan);
2258 /* watch "packet transferred" interrupt in rx and tx */
2259 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2260 ENETDMAC_IR, priv->rx_chan);
2261 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2262 ENETDMAC_IR, priv->tx_chan);
2264 /* make sure we enable napi before rx interrupt */
2265 napi_enable(&priv->napi);
2267 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2268 ENETDMAC_IRMASK, priv->rx_chan);
2269 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2270 ENETDMAC_IRMASK, priv->tx_chan);
2272 netif_carrier_on(dev);
2273 netif_start_queue(dev);
2275 /* apply override config for bypass_link ports here. */
2276 for (i = 0; i < priv->num_ports; i++) {
2277 struct bcm63xx_enetsw_port *port;
2279 port = &priv->used_ports[i];
2283 if (!port->bypass_link)
2286 override = ENETSW_PORTOV_ENABLE_MASK |
2287 ENETSW_PORTOV_LINKUP_MASK;
2289 switch (port->force_speed) {
2291 override |= ENETSW_IMPOV_1000_MASK;
2294 override |= ENETSW_IMPOV_100_MASK;
2299 pr_warn("invalid forced speed on port %s: assume 10\n",
2304 if (port->force_duplex_full)
2305 override |= ENETSW_IMPOV_FDX_MASK;
2308 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2309 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2312 /* start phy polling timer */
2313 timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
2314 mod_timer(&priv->swphy_poll, jiffies);
2318 for (i = 0; i < priv->rx_ring_size; i++) {
2319 struct bcm_enet_desc *desc;
2321 if (!priv->rx_skb[i])
2324 desc = &priv->rx_desc_cpu[i];
2325 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2327 kfree_skb(priv->rx_skb[i]);
2329 kfree(priv->rx_skb);
2332 kfree(priv->tx_skb);
2335 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2336 priv->tx_desc_cpu, priv->tx_desc_dma);
2339 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2340 priv->rx_desc_cpu, priv->rx_desc_dma);
2343 if (priv->irq_tx != -1)
2344 free_irq(priv->irq_tx, dev);
2347 free_irq(priv->irq_rx, dev);
2354 static int bcm_enetsw_stop(struct net_device *dev)
2356 struct bcm_enet_priv *priv;
2357 struct device *kdev;
2360 priv = netdev_priv(dev);
2361 kdev = &priv->pdev->dev;
2363 del_timer_sync(&priv->swphy_poll);
2364 netif_stop_queue(dev);
2365 napi_disable(&priv->napi);
2366 del_timer_sync(&priv->rx_timeout);
2368 /* mask all interrupts */
2369 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2370 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2372 /* disable dma & mac */
2373 bcm_enet_disable_dma(priv, priv->tx_chan);
2374 bcm_enet_disable_dma(priv, priv->rx_chan);
2376 /* force reclaim of all tx buffers */
2377 bcm_enet_tx_reclaim(dev, 1);
2379 /* free the rx skb ring */
2380 for (i = 0; i < priv->rx_ring_size; i++) {
2381 struct bcm_enet_desc *desc;
2383 if (!priv->rx_skb[i])
2386 desc = &priv->rx_desc_cpu[i];
2387 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2389 kfree_skb(priv->rx_skb[i]);
2392 /* free remaining allocated memory */
2393 kfree(priv->rx_skb);
2394 kfree(priv->tx_skb);
2395 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2396 priv->rx_desc_cpu, priv->rx_desc_dma);
2397 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2398 priv->tx_desc_cpu, priv->tx_desc_dma);
2399 if (priv->irq_tx != -1)
2400 free_irq(priv->irq_tx, dev);
2401 free_irq(priv->irq_rx, dev);
2406 /* try to sort out phy external status by walking the used_port field
2407 * in the bcm_enet_priv structure. in case the phy address is not
2408 * assigned to any physical port on the switch, assume it is external
2409 * (and yell at the user).
2411 static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2415 for (i = 0; i < priv->num_ports; ++i) {
2416 if (!priv->used_ports[i].used)
2418 if (priv->used_ports[i].phy_id == phy_id)
2419 return bcm_enet_port_is_rgmii(i);
2422 printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2427 /* can't use bcmenet_sw_mdio_read directly as we need to sort out
2428 * external/internal status of the given phy_id first.
2430 static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2433 struct bcm_enet_priv *priv;
2435 priv = netdev_priv(dev);
2436 return bcmenet_sw_mdio_read(priv,
2437 bcm_enetsw_phy_is_external(priv, phy_id),
2441 /* can't use bcmenet_sw_mdio_write directly as we need to sort out
2442 * external/internal status of the given phy_id first.
2444 static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2448 struct bcm_enet_priv *priv;
2450 priv = netdev_priv(dev);
2451 bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2452 phy_id, location, val);
2455 static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2457 struct mii_if_info mii;
2460 mii.mdio_read = bcm_enetsw_mii_mdio_read;
2461 mii.mdio_write = bcm_enetsw_mii_mdio_write;
2463 mii.phy_id_mask = 0x3f;
2464 mii.reg_num_mask = 0x1f;
2465 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2469 static const struct net_device_ops bcm_enetsw_ops = {
2470 .ndo_open = bcm_enetsw_open,
2471 .ndo_stop = bcm_enetsw_stop,
2472 .ndo_start_xmit = bcm_enet_start_xmit,
2473 .ndo_change_mtu = bcm_enet_change_mtu,
2474 .ndo_do_ioctl = bcm_enetsw_ioctl,
2478 static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2479 { "rx_packets", DEV_STAT(rx_packets), -1 },
2480 { "tx_packets", DEV_STAT(tx_packets), -1 },
2481 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2482 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2483 { "rx_errors", DEV_STAT(rx_errors), -1 },
2484 { "tx_errors", DEV_STAT(tx_errors), -1 },
2485 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2486 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2488 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2489 { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2490 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2491 { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2492 { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2493 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2494 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2495 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2496 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2497 { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2498 ETHSW_MIB_RX_1024_1522 },
2499 { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2500 ETHSW_MIB_RX_1523_2047 },
2501 { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2502 ETHSW_MIB_RX_2048_4095 },
2503 { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2504 ETHSW_MIB_RX_4096_8191 },
2505 { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2506 ETHSW_MIB_RX_8192_9728 },
2507 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2508 { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2509 { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2510 { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2511 { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2513 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2514 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2515 { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2516 { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2517 { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2518 { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2522 #define BCM_ENETSW_STATS_LEN \
2523 (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2525 static void bcm_enetsw_get_strings(struct net_device *netdev,
2526 u32 stringset, u8 *data)
2530 switch (stringset) {
2532 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2533 memcpy(data + i * ETH_GSTRING_LEN,
2534 bcm_enetsw_gstrings_stats[i].stat_string,
2541 static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2544 switch (string_set) {
2546 return BCM_ENETSW_STATS_LEN;
2552 static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2553 struct ethtool_drvinfo *drvinfo)
2555 strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
2556 strncpy(drvinfo->version, bcm_enet_driver_version, 32);
2557 strncpy(drvinfo->fw_version, "N/A", 32);
2558 strncpy(drvinfo->bus_info, "bcm63xx", 32);
2561 static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2562 struct ethtool_stats *stats,
2565 struct bcm_enet_priv *priv;
2568 priv = netdev_priv(netdev);
2570 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2571 const struct bcm_enet_stats *s;
2576 s = &bcm_enetsw_gstrings_stats[i];
2582 lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2583 p = (char *)priv + s->stat_offset;
2585 if (s->sizeof_stat == sizeof(u64)) {
2586 hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2587 *(u64 *)p = ((u64)hi << 32 | lo);
2593 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2594 const struct bcm_enet_stats *s;
2597 s = &bcm_enetsw_gstrings_stats[i];
2599 if (s->mib_reg == -1)
2600 p = (char *)&netdev->stats + s->stat_offset;
2602 p = (char *)priv + s->stat_offset;
2604 data[i] = (s->sizeof_stat == sizeof(u64)) ?
2605 *(u64 *)p : *(u32 *)p;
2609 static void bcm_enetsw_get_ringparam(struct net_device *dev,
2610 struct ethtool_ringparam *ering)
2612 struct bcm_enet_priv *priv;
2614 priv = netdev_priv(dev);
2616 /* rx/tx ring is actually only limited by memory */
2617 ering->rx_max_pending = 8192;
2618 ering->tx_max_pending = 8192;
2619 ering->rx_mini_max_pending = 0;
2620 ering->rx_jumbo_max_pending = 0;
2621 ering->rx_pending = priv->rx_ring_size;
2622 ering->tx_pending = priv->tx_ring_size;
2625 static int bcm_enetsw_set_ringparam(struct net_device *dev,
2626 struct ethtool_ringparam *ering)
2628 struct bcm_enet_priv *priv;
2631 priv = netdev_priv(dev);
2634 if (netif_running(dev)) {
2635 bcm_enetsw_stop(dev);
2639 priv->rx_ring_size = ering->rx_pending;
2640 priv->tx_ring_size = ering->tx_pending;
2645 err = bcm_enetsw_open(dev);
2652 static const struct ethtool_ops bcm_enetsw_ethtool_ops = {
2653 .get_strings = bcm_enetsw_get_strings,
2654 .get_sset_count = bcm_enetsw_get_sset_count,
2655 .get_ethtool_stats = bcm_enetsw_get_ethtool_stats,
2656 .get_drvinfo = bcm_enetsw_get_drvinfo,
2657 .get_ringparam = bcm_enetsw_get_ringparam,
2658 .set_ringparam = bcm_enetsw_set_ringparam,
2661 /* allocate netdevice, request register memory and register device. */
2662 static int bcm_enetsw_probe(struct platform_device *pdev)
2664 struct bcm_enet_priv *priv;
2665 struct net_device *dev;
2666 struct bcm63xx_enetsw_platform_data *pd;
2667 struct resource *res_mem;
2668 int ret, irq_rx, irq_tx;
2670 if (!bcm_enet_shared_base[0])
2671 return -EPROBE_DEFER;
2673 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2674 irq_rx = platform_get_irq(pdev, 0);
2675 irq_tx = platform_get_irq(pdev, 1);
2676 if (!res_mem || irq_rx < 0)
2680 dev = alloc_etherdev(sizeof(*priv));
2683 priv = netdev_priv(dev);
2684 memset(priv, 0, sizeof(*priv));
2686 /* initialize default and fetch platform data */
2687 priv->enet_is_sw = true;
2688 priv->irq_rx = irq_rx;
2689 priv->irq_tx = irq_tx;
2690 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2691 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2692 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2694 pd = dev_get_platdata(&pdev->dev);
2696 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2697 memcpy(priv->used_ports, pd->used_ports,
2698 sizeof(pd->used_ports));
2699 priv->num_ports = pd->num_ports;
2700 priv->dma_has_sram = pd->dma_has_sram;
2701 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2702 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2703 priv->dma_chan_width = pd->dma_chan_width;
2706 ret = bcm_enet_change_mtu(dev, dev->mtu);
2710 priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
2711 if (IS_ERR(priv->base)) {
2712 ret = PTR_ERR(priv->base);
2716 priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
2717 if (IS_ERR(priv->mac_clk)) {
2718 ret = PTR_ERR(priv->mac_clk);
2721 ret = clk_prepare_enable(priv->mac_clk);
2727 spin_lock_init(&priv->rx_lock);
2729 /* init rx timeout (used for oom) */
2730 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
2732 /* register netdevice */
2733 dev->netdev_ops = &bcm_enetsw_ops;
2734 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2735 dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2736 SET_NETDEV_DEV(dev, &pdev->dev);
2738 spin_lock_init(&priv->enetsw_mdio_lock);
2740 ret = register_netdev(dev);
2742 goto out_disable_clk;
2744 netif_carrier_off(dev);
2745 platform_set_drvdata(pdev, dev);
2747 priv->net_dev = dev;
2752 clk_disable_unprepare(priv->mac_clk);
2759 /* exit func, stops hardware and unregisters netdevice */
2760 static int bcm_enetsw_remove(struct platform_device *pdev)
2762 struct bcm_enet_priv *priv;
2763 struct net_device *dev;
2765 /* stop netdevice */
2766 dev = platform_get_drvdata(pdev);
2767 priv = netdev_priv(dev);
2768 unregister_netdev(dev);
2770 clk_disable_unprepare(priv->mac_clk);
2776 struct platform_driver bcm63xx_enetsw_driver = {
2777 .probe = bcm_enetsw_probe,
2778 .remove = bcm_enetsw_remove,
2780 .name = "bcm63xx_enetsw",
2781 .owner = THIS_MODULE,
2785 /* reserve & remap memory space shared between all macs */
2786 static int bcm_enet_shared_probe(struct platform_device *pdev)
2788 struct resource *res;
2792 memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2794 for (i = 0; i < 3; i++) {
2795 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
2796 p[i] = devm_ioremap_resource(&pdev->dev, res);
2798 return PTR_ERR(p[i]);
2801 memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2806 static int bcm_enet_shared_remove(struct platform_device *pdev)
2811 /* this "shared" driver is needed because both macs share a single
2814 struct platform_driver bcm63xx_enet_shared_driver = {
2815 .probe = bcm_enet_shared_probe,
2816 .remove = bcm_enet_shared_remove,
2818 .name = "bcm63xx_enet_shared",
2819 .owner = THIS_MODULE,
2823 static struct platform_driver * const drivers[] = {
2824 &bcm63xx_enet_shared_driver,
2825 &bcm63xx_enet_driver,
2826 &bcm63xx_enetsw_driver,
2830 static int __init bcm_enet_init(void)
2832 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2835 static void __exit bcm_enet_exit(void)
2837 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2841 module_init(bcm_enet_init);
2842 module_exit(bcm_enet_exit);
2844 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2845 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2846 MODULE_LICENSE("GPL");