2 * Broadcom BCM7xxx System Port Ethernet MAC driver
4 * Copyright (C) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/platform_device.h>
21 #include <linux/of_net.h>
22 #include <linux/of_mdio.h>
23 #include <linux/phy.h>
24 #include <linux/phy_fixed.h>
28 #include "bcmsysport.h"
30 /* I/O accessors register helpers */
31 #define BCM_SYSPORT_IO_MACRO(name, offset) \
32 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
34 u32 reg = __raw_readl(priv->base + offset + off); \
37 static inline void name##_writel(struct bcm_sysport_priv *priv, \
40 __raw_writel(val, priv->base + offset + off); \
43 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
44 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
45 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
46 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
47 BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
48 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
49 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
50 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
51 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
52 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
54 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
55 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
57 #define BCM_SYSPORT_INTR_L2(which) \
58 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
61 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
62 priv->irq##which##_mask &= ~(mask); \
64 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
67 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
68 priv->irq##which##_mask |= (mask); \
71 BCM_SYSPORT_INTR_L2(0)
72 BCM_SYSPORT_INTR_L2(1)
74 /* Register accesses to GISB/RBUS registers are expensive (few hundred
75 * nanoseconds), so keep the check for 64-bits explicit here to save
76 * one register write per-packet on 32-bits platforms.
78 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
82 #ifdef CONFIG_PHYS_ADDR_T_64BIT
83 __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
84 d + DESC_ADDR_HI_STATUS_LEN);
86 __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
89 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
90 struct dma_desc *desc,
93 unsigned long desc_flags;
95 /* Ports are latched, so write upper address first */
96 spin_lock_irqsave(&priv->desc_lock, desc_flags);
97 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
98 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
99 spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
102 /* Ethtool operations */
103 static int bcm_sysport_set_settings(struct net_device *dev,
104 struct ethtool_cmd *cmd)
106 struct bcm_sysport_priv *priv = netdev_priv(dev);
108 if (!netif_running(dev))
111 return phy_ethtool_sset(priv->phydev, cmd);
114 static int bcm_sysport_get_settings(struct net_device *dev,
115 struct ethtool_cmd *cmd)
117 struct bcm_sysport_priv *priv = netdev_priv(dev);
119 if (!netif_running(dev))
122 return phy_ethtool_gset(priv->phydev, cmd);
125 static int bcm_sysport_set_rx_csum(struct net_device *dev,
126 netdev_features_t wanted)
128 struct bcm_sysport_priv *priv = netdev_priv(dev);
131 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
132 reg = rxchk_readl(priv, RXCHK_CONTROL);
133 /* Clear L2 header checks, which would prevent BPDUs
134 * from being received.
136 reg &= ~RXCHK_L2_HDR_DIS;
142 /* If UniMAC forwards CRC, we need to skip over it to get
143 * a valid CHK bit to be set in the per-packet status word
145 if (priv->rx_chk_en && priv->crc_fwd)
146 reg |= RXCHK_SKIP_FCS;
148 reg &= ~RXCHK_SKIP_FCS;
150 /* If Broadcom tags are enabled (e.g: using a switch), make
151 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
152 * tag after the Ethernet MAC Source Address.
154 if (netdev_uses_dsa(dev))
155 reg |= RXCHK_BRCM_TAG_EN;
157 reg &= ~RXCHK_BRCM_TAG_EN;
159 rxchk_writel(priv, reg, RXCHK_CONTROL);
164 static int bcm_sysport_set_tx_csum(struct net_device *dev,
165 netdev_features_t wanted)
167 struct bcm_sysport_priv *priv = netdev_priv(dev);
170 /* Hardware transmit checksum requires us to enable the Transmit status
171 * block prepended to the packet contents
173 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
174 reg = tdma_readl(priv, TDMA_CONTROL);
179 tdma_writel(priv, reg, TDMA_CONTROL);
184 static int bcm_sysport_set_features(struct net_device *dev,
185 netdev_features_t features)
187 netdev_features_t changed = features ^ dev->features;
188 netdev_features_t wanted = dev->wanted_features;
191 if (changed & NETIF_F_RXCSUM)
192 ret = bcm_sysport_set_rx_csum(dev, wanted);
193 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
194 ret = bcm_sysport_set_tx_csum(dev, wanted);
199 /* Hardware counters must be kept in sync because the order/offset
200 * is important here (order in structure declaration = order in hardware)
202 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
204 STAT_NETDEV(rx_packets),
205 STAT_NETDEV(tx_packets),
206 STAT_NETDEV(rx_bytes),
207 STAT_NETDEV(tx_bytes),
208 STAT_NETDEV(rx_errors),
209 STAT_NETDEV(tx_errors),
210 STAT_NETDEV(rx_dropped),
211 STAT_NETDEV(tx_dropped),
212 STAT_NETDEV(multicast),
213 /* UniMAC RSV counters */
214 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
215 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
216 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
217 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
218 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
219 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
220 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
221 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
222 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
223 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
224 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
225 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
226 STAT_MIB_RX("rx_multicast", mib.rx.mca),
227 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
228 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
229 STAT_MIB_RX("rx_control", mib.rx.cf),
230 STAT_MIB_RX("rx_pause", mib.rx.pf),
231 STAT_MIB_RX("rx_unknown", mib.rx.uo),
232 STAT_MIB_RX("rx_align", mib.rx.aln),
233 STAT_MIB_RX("rx_outrange", mib.rx.flr),
234 STAT_MIB_RX("rx_code", mib.rx.cde),
235 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
236 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
237 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
238 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
239 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
240 STAT_MIB_RX("rx_unicast", mib.rx.uc),
241 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
242 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
243 /* UniMAC TSV counters */
244 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
245 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
246 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
247 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
248 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
249 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
250 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
251 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
252 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
253 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
254 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
255 STAT_MIB_TX("tx_multicast", mib.tx.mca),
256 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
257 STAT_MIB_TX("tx_pause", mib.tx.pf),
258 STAT_MIB_TX("tx_control", mib.tx.cf),
259 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
260 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
261 STAT_MIB_TX("tx_defer", mib.tx.drf),
262 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
263 STAT_MIB_TX("tx_single_col", mib.tx.scl),
264 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
265 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
266 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
267 STAT_MIB_TX("tx_frags", mib.tx.frg),
268 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
269 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
270 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
271 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
272 STAT_MIB_TX("tx_unicast", mib.tx.uc),
273 /* UniMAC RUNT counters */
274 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
275 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
276 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
277 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
278 /* RXCHK misc statistics */
279 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
280 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
281 RXCHK_OTHER_DISC_CNTR),
282 /* RBUF misc statistics */
283 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
284 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
285 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
286 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
287 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
290 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
292 static void bcm_sysport_get_drvinfo(struct net_device *dev,
293 struct ethtool_drvinfo *info)
295 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
296 strlcpy(info->version, "0.1", sizeof(info->version));
297 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
300 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
302 struct bcm_sysport_priv *priv = netdev_priv(dev);
304 return priv->msg_enable;
307 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
309 struct bcm_sysport_priv *priv = netdev_priv(dev);
311 priv->msg_enable = enable;
314 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
316 switch (string_set) {
318 return BCM_SYSPORT_STATS_LEN;
324 static void bcm_sysport_get_strings(struct net_device *dev,
325 u32 stringset, u8 *data)
331 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
332 memcpy(data + i * ETH_GSTRING_LEN,
333 bcm_sysport_gstrings_stats[i].stat_string,
342 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
346 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
347 const struct bcm_sysport_stats *s;
352 s = &bcm_sysport_gstrings_stats[i];
354 case BCM_SYSPORT_STAT_NETDEV:
355 case BCM_SYSPORT_STAT_SOFT:
357 case BCM_SYSPORT_STAT_MIB_RX:
358 case BCM_SYSPORT_STAT_MIB_TX:
359 case BCM_SYSPORT_STAT_RUNT:
360 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
361 offset = UMAC_MIB_STAT_OFFSET;
362 val = umac_readl(priv, UMAC_MIB_START + j + offset);
364 case BCM_SYSPORT_STAT_RXCHK:
365 val = rxchk_readl(priv, s->reg_offset);
367 rxchk_writel(priv, 0, s->reg_offset);
369 case BCM_SYSPORT_STAT_RBUF:
370 val = rbuf_readl(priv, s->reg_offset);
372 rbuf_writel(priv, 0, s->reg_offset);
377 p = (char *)priv + s->stat_offset;
381 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
384 static void bcm_sysport_get_stats(struct net_device *dev,
385 struct ethtool_stats *stats, u64 *data)
387 struct bcm_sysport_priv *priv = netdev_priv(dev);
390 if (netif_running(dev))
391 bcm_sysport_update_mib_counters(priv);
393 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
394 const struct bcm_sysport_stats *s;
397 s = &bcm_sysport_gstrings_stats[i];
398 if (s->type == BCM_SYSPORT_STAT_NETDEV)
399 p = (char *)&dev->stats;
403 data[i] = *(unsigned long *)p;
407 static void bcm_sysport_get_wol(struct net_device *dev,
408 struct ethtool_wolinfo *wol)
410 struct bcm_sysport_priv *priv = netdev_priv(dev);
412 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
413 wol->wolopts = priv->wolopts;
415 if (!(priv->wolopts & WAKE_MAGICSECURE))
418 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
421 static int bcm_sysport_set_wol(struct net_device *dev,
422 struct ethtool_wolinfo *wol)
424 struct bcm_sysport_priv *priv = netdev_priv(dev);
425 struct device *kdev = &priv->pdev->dev;
426 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
428 if (!device_can_wakeup(kdev))
431 if (wol->wolopts & ~supported)
434 if (wol->wolopts & WAKE_MAGICSECURE)
435 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
437 /* Flag the device and relevant IRQ as wakeup capable */
439 device_set_wakeup_enable(kdev, 1);
440 if (priv->wol_irq_disabled)
441 enable_irq_wake(priv->wol_irq);
442 priv->wol_irq_disabled = 0;
444 device_set_wakeup_enable(kdev, 0);
445 /* Avoid unbalanced disable_irq_wake calls */
446 if (!priv->wol_irq_disabled)
447 disable_irq_wake(priv->wol_irq);
448 priv->wol_irq_disabled = 1;
451 priv->wolopts = wol->wolopts;
456 static int bcm_sysport_get_coalesce(struct net_device *dev,
457 struct ethtool_coalesce *ec)
459 struct bcm_sysport_priv *priv = netdev_priv(dev);
462 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
464 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
465 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
467 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
469 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
470 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
475 static int bcm_sysport_set_coalesce(struct net_device *dev,
476 struct ethtool_coalesce *ec)
478 struct bcm_sysport_priv *priv = netdev_priv(dev);
482 /* Base system clock is 125Mhz, DMA timeout is this reference clock
483 * divided by 1024, which yield roughly 8.192 us, our maximum value has
484 * to fit in the RING_TIMEOUT_MASK (16 bits).
486 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
487 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
488 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
489 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
492 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
493 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
496 for (i = 0; i < dev->num_tx_queues; i++) {
497 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
498 reg &= ~(RING_INTR_THRESH_MASK |
499 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
500 reg |= ec->tx_max_coalesced_frames;
501 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
503 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
506 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
507 reg &= ~(RDMA_INTR_THRESH_MASK |
508 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
509 reg |= ec->rx_max_coalesced_frames;
510 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
512 rdma_writel(priv, reg, RDMA_MBDONE_INTR);
517 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
519 dev_kfree_skb_any(cb->skb);
521 dma_unmap_addr_set(cb, dma_addr, 0);
524 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
525 struct bcm_sysport_cb *cb)
527 struct device *kdev = &priv->pdev->dev;
528 struct net_device *ndev = priv->netdev;
529 struct sk_buff *skb, *rx_skb;
532 /* Allocate a new SKB for a new packet */
533 skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
534 GFP_ATOMIC | __GFP_NOWARN);
536 priv->mib.alloc_rx_buff_failed++;
537 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
541 mapping = dma_map_single(kdev, skb->data,
542 RX_BUF_LENGTH, DMA_FROM_DEVICE);
543 if (dma_mapping_error(kdev, mapping)) {
544 priv->mib.rx_dma_failed++;
545 dev_kfree_skb_any(skb);
546 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
550 /* Grab the current SKB on the ring */
553 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
554 RX_BUF_LENGTH, DMA_FROM_DEVICE);
556 /* Put the new SKB on the ring */
558 dma_unmap_addr_set(cb, dma_addr, mapping);
559 dma_desc_set_addr(priv, cb->bd_addr, mapping);
561 netif_dbg(priv, rx_status, ndev, "RX refill\n");
563 /* Return the current SKB to the caller */
567 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
569 struct bcm_sysport_cb *cb;
573 for (i = 0; i < priv->num_rx_bds; i++) {
574 cb = &priv->rx_cbs[i];
575 skb = bcm_sysport_rx_refill(priv, cb);
585 /* Poll the hardware for up to budget packets to process */
586 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
589 struct net_device *ndev = priv->netdev;
590 unsigned int processed = 0, to_process;
591 struct bcm_sysport_cb *cb;
593 unsigned int p_index;
597 /* Determine how much we should process since last call */
598 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
599 p_index &= RDMA_PROD_INDEX_MASK;
601 if (p_index < priv->rx_c_index)
602 to_process = (RDMA_CONS_INDEX_MASK + 1) -
603 priv->rx_c_index + p_index;
605 to_process = p_index - priv->rx_c_index;
607 netif_dbg(priv, rx_status, ndev,
608 "p_index=%d rx_c_index=%d to_process=%d\n",
609 p_index, priv->rx_c_index, to_process);
611 while ((processed < to_process) && (processed < budget)) {
612 cb = &priv->rx_cbs[priv->rx_read_ptr];
613 skb = bcm_sysport_rx_refill(priv, cb);
616 /* We do not have a backing SKB, so we do not a corresponding
617 * DMA mapping for this incoming packet since
618 * bcm_sysport_rx_refill always either has both skb and mapping
621 if (unlikely(!skb)) {
622 netif_err(priv, rx_err, ndev, "out of memory!\n");
623 ndev->stats.rx_dropped++;
624 ndev->stats.rx_errors++;
628 /* Extract the Receive Status Block prepended */
629 rsb = (struct bcm_rsb *)skb->data;
630 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
631 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
634 netif_dbg(priv, rx_status, ndev,
635 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
636 p_index, priv->rx_c_index, priv->rx_read_ptr,
639 if (unlikely(len > RX_BUF_LENGTH)) {
640 netif_err(priv, rx_status, ndev, "oversized packet\n");
641 ndev->stats.rx_length_errors++;
642 ndev->stats.rx_errors++;
643 dev_kfree_skb_any(skb);
647 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
648 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
649 ndev->stats.rx_dropped++;
650 ndev->stats.rx_errors++;
651 dev_kfree_skb_any(skb);
655 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
656 netif_err(priv, rx_err, ndev, "error packet\n");
657 if (status & RX_STATUS_OVFLOW)
658 ndev->stats.rx_over_errors++;
659 ndev->stats.rx_dropped++;
660 ndev->stats.rx_errors++;
661 dev_kfree_skb_any(skb);
667 /* Hardware validated our checksum */
668 if (likely(status & DESC_L4_CSUM))
669 skb->ip_summed = CHECKSUM_UNNECESSARY;
671 /* Hardware pre-pends packets with 2bytes before Ethernet
672 * header plus we have the Receive Status Block, strip off all
673 * of this from the SKB.
675 skb_pull(skb, sizeof(*rsb) + 2);
676 len -= (sizeof(*rsb) + 2);
678 /* UniMAC may forward CRC */
680 skb_trim(skb, len - ETH_FCS_LEN);
684 skb->protocol = eth_type_trans(skb, ndev);
685 ndev->stats.rx_packets++;
686 ndev->stats.rx_bytes += len;
688 napi_gro_receive(&priv->napi, skb);
693 if (priv->rx_read_ptr == priv->num_rx_bds)
694 priv->rx_read_ptr = 0;
700 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
701 struct bcm_sysport_cb *cb,
702 unsigned int *bytes_compl,
703 unsigned int *pkts_compl)
705 struct device *kdev = &priv->pdev->dev;
706 struct net_device *ndev = priv->netdev;
709 ndev->stats.tx_bytes += cb->skb->len;
710 *bytes_compl += cb->skb->len;
711 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
712 dma_unmap_len(cb, dma_len),
714 ndev->stats.tx_packets++;
716 bcm_sysport_free_cb(cb);
718 } else if (dma_unmap_addr(cb, dma_addr)) {
719 ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
720 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
721 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
722 dma_unmap_addr_set(cb, dma_addr, 0);
726 /* Reclaim queued SKBs for transmission completion, lockless version */
727 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
728 struct bcm_sysport_tx_ring *ring)
730 struct net_device *ndev = priv->netdev;
731 unsigned int pkts_compl = 0, bytes_compl = 0;
732 unsigned int txbds_processed = 0;
733 struct bcm_sysport_cb *cb;
734 unsigned int txbds_ready;
735 unsigned int c_index;
738 /* Compute how many descriptors have been processed since last call */
739 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
740 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
741 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
743 netif_dbg(priv, tx_done, ndev,
744 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
745 ring->index, ring->c_index, c_index, txbds_ready);
747 while (txbds_processed < txbds_ready) {
748 cb = &ring->cbs[ring->clean_index];
749 bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
754 if (likely(ring->clean_index < ring->size - 1))
757 ring->clean_index = 0;
760 ring->c_index = c_index;
762 netif_dbg(priv, tx_done, ndev,
763 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
764 ring->index, ring->c_index, pkts_compl, bytes_compl);
769 /* Locked version of the per-ring TX reclaim routine */
770 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
771 struct bcm_sysport_tx_ring *ring)
773 struct netdev_queue *txq;
774 unsigned int released;
777 txq = netdev_get_tx_queue(priv->netdev, ring->index);
779 spin_lock_irqsave(&ring->lock, flags);
780 released = __bcm_sysport_tx_reclaim(priv, ring);
782 netif_tx_wake_queue(txq);
784 spin_unlock_irqrestore(&ring->lock, flags);
789 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
790 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
791 struct bcm_sysport_tx_ring *ring)
795 spin_lock_irqsave(&ring->lock, flags);
796 __bcm_sysport_tx_reclaim(priv, ring);
797 spin_unlock_irqrestore(&ring->lock, flags);
800 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
802 struct bcm_sysport_tx_ring *ring =
803 container_of(napi, struct bcm_sysport_tx_ring, napi);
804 unsigned int work_done = 0;
806 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
808 if (work_done == 0) {
810 /* re-enable TX interrupt */
811 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
819 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
823 for (q = 0; q < priv->netdev->num_tx_queues; q++)
824 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
827 static int bcm_sysport_poll(struct napi_struct *napi, int budget)
829 struct bcm_sysport_priv *priv =
830 container_of(napi, struct bcm_sysport_priv, napi);
831 unsigned int work_done = 0;
833 work_done = bcm_sysport_desc_rx(priv, budget);
835 priv->rx_c_index += work_done;
836 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
837 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
839 if (work_done < budget) {
841 /* re-enable RX interrupts */
842 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
848 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
852 /* Clear the MagicPacket detection logic */
853 reg = umac_readl(priv, UMAC_MPD_CTRL);
855 umac_writel(priv, reg, UMAC_MPD_CTRL);
857 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
858 if (reg & INTRL2_0_MPD)
859 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
861 if (reg & INTRL2_0_BRCM_MATCH_TAG) {
862 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
863 RXCHK_BRCM_TAG_MATCH_MASK;
864 netdev_info(priv->netdev,
865 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
868 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
871 /* RX and misc interrupt routine */
872 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
874 struct net_device *dev = dev_id;
875 struct bcm_sysport_priv *priv = netdev_priv(dev);
877 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
878 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
879 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
881 if (unlikely(priv->irq0_stat == 0)) {
882 netdev_warn(priv->netdev, "spurious RX interrupt\n");
886 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
887 if (likely(napi_schedule_prep(&priv->napi))) {
888 /* disable RX interrupts */
889 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
890 __napi_schedule(&priv->napi);
894 /* TX ring is full, perform a full reclaim since we do not know
895 * which one would trigger this interrupt
897 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
898 bcm_sysport_tx_reclaim_all(priv);
903 /* TX interrupt service routine */
904 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
906 struct net_device *dev = dev_id;
907 struct bcm_sysport_priv *priv = netdev_priv(dev);
908 struct bcm_sysport_tx_ring *txr;
911 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
912 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
913 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
915 if (unlikely(priv->irq1_stat == 0)) {
916 netdev_warn(priv->netdev, "spurious TX interrupt\n");
920 for (ring = 0; ring < dev->num_tx_queues; ring++) {
921 if (!(priv->irq1_stat & BIT(ring)))
924 txr = &priv->tx_rings[ring];
926 if (likely(napi_schedule_prep(&txr->napi))) {
927 intrl2_1_mask_set(priv, BIT(ring));
928 __napi_schedule(&txr->napi);
935 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
937 struct bcm_sysport_priv *priv = dev_id;
939 pm_wakeup_event(&priv->pdev->dev, 0);
944 #ifdef CONFIG_NET_POLL_CONTROLLER
945 static void bcm_sysport_poll_controller(struct net_device *dev)
947 struct bcm_sysport_priv *priv = netdev_priv(dev);
949 disable_irq(priv->irq0);
950 bcm_sysport_rx_isr(priv->irq0, priv);
951 enable_irq(priv->irq0);
953 disable_irq(priv->irq1);
954 bcm_sysport_tx_isr(priv->irq1, priv);
955 enable_irq(priv->irq1);
959 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
960 struct net_device *dev)
962 struct sk_buff *nskb;
969 /* Re-allocate SKB if needed */
970 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
971 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
974 dev->stats.tx_errors++;
975 dev->stats.tx_dropped++;
981 tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
982 /* Zero-out TSB by default */
983 memset(tsb, 0, sizeof(*tsb));
985 if (skb->ip_summed == CHECKSUM_PARTIAL) {
986 ip_ver = htons(skb->protocol);
989 ip_proto = ip_hdr(skb)->protocol;
992 ip_proto = ipv6_hdr(skb)->nexthdr;
998 /* Get the checksum offset and the L4 (transport) offset */
999 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1000 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1001 csum_info |= (csum_start << L4_PTR_SHIFT);
1003 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1004 csum_info |= L4_LENGTH_VALID;
1005 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1006 csum_info |= L4_UDP;
1011 tsb->l4_ptr_dest_map = csum_info;
1017 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1018 struct net_device *dev)
1020 struct bcm_sysport_priv *priv = netdev_priv(dev);
1021 struct device *kdev = &priv->pdev->dev;
1022 struct bcm_sysport_tx_ring *ring;
1023 struct bcm_sysport_cb *cb;
1024 struct netdev_queue *txq;
1025 struct dma_desc *desc;
1026 unsigned int skb_len;
1027 unsigned long flags;
1033 queue = skb_get_queue_mapping(skb);
1034 txq = netdev_get_tx_queue(dev, queue);
1035 ring = &priv->tx_rings[queue];
1037 /* lock against tx reclaim in BH context and TX ring full interrupt */
1038 spin_lock_irqsave(&ring->lock, flags);
1039 if (unlikely(ring->desc_count == 0)) {
1040 netif_tx_stop_queue(txq);
1041 netdev_err(dev, "queue %d awake and ring full!\n", queue);
1042 ret = NETDEV_TX_BUSY;
1046 /* The Ethernet switch we are interfaced with needs packets to be at
1047 * least 64 bytes (including FCS) otherwise they will be discarded when
1048 * they enter the switch port logic. When Broadcom tags are enabled, we
1049 * need to make sure that packets are at least 68 bytes
1050 * (including FCS and tag) because the length verification is done after
1051 * the Broadcom tag is stripped off the ingress packet.
1053 if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
1058 /* Insert TSB and checksum infos */
1060 skb = bcm_sysport_insert_tsb(skb, dev);
1069 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1070 if (dma_mapping_error(kdev, mapping)) {
1071 priv->mib.tx_dma_failed++;
1072 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1073 skb->data, skb_len);
1078 /* Remember the SKB for future freeing */
1079 cb = &ring->cbs[ring->curr_desc];
1081 dma_unmap_addr_set(cb, dma_addr, mapping);
1082 dma_unmap_len_set(cb, dma_len, skb_len);
1084 /* Fetch a descriptor entry from our pool */
1085 desc = ring->desc_cpu;
1087 desc->addr_lo = lower_32_bits(mapping);
1088 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1089 len_status |= (skb_len << DESC_LEN_SHIFT);
1090 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1092 if (skb->ip_summed == CHECKSUM_PARTIAL)
1093 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1096 if (ring->curr_desc == ring->size)
1097 ring->curr_desc = 0;
1100 /* Ensure write completion of the descriptor status/length
1101 * in DRAM before the System Port WRITE_PORT register latches
1105 desc->addr_status_len = len_status;
1108 /* Write this descriptor address to the RING write port */
1109 tdma_port_write_desc_addr(priv, desc, ring->index);
1111 /* Check ring space and update SW control flow */
1112 if (ring->desc_count == 0)
1113 netif_tx_stop_queue(txq);
1115 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1116 ring->index, ring->desc_count, ring->curr_desc);
1120 spin_unlock_irqrestore(&ring->lock, flags);
1124 static void bcm_sysport_tx_timeout(struct net_device *dev)
1126 netdev_warn(dev, "transmit timeout!\n");
1128 dev->trans_start = jiffies;
1129 dev->stats.tx_errors++;
1131 netif_tx_wake_all_queues(dev);
1134 /* phylib adjust link callback */
1135 static void bcm_sysport_adj_link(struct net_device *dev)
1137 struct bcm_sysport_priv *priv = netdev_priv(dev);
1138 struct phy_device *phydev = priv->phydev;
1139 unsigned int changed = 0;
1140 u32 cmd_bits = 0, reg;
1142 if (priv->old_link != phydev->link) {
1144 priv->old_link = phydev->link;
1147 if (priv->old_duplex != phydev->duplex) {
1149 priv->old_duplex = phydev->duplex;
1152 switch (phydev->speed) {
1154 cmd_bits = CMD_SPEED_2500;
1157 cmd_bits = CMD_SPEED_1000;
1160 cmd_bits = CMD_SPEED_100;
1163 cmd_bits = CMD_SPEED_10;
1168 cmd_bits <<= CMD_SPEED_SHIFT;
1170 if (phydev->duplex == DUPLEX_HALF)
1171 cmd_bits |= CMD_HD_EN;
1173 if (priv->old_pause != phydev->pause) {
1175 priv->old_pause = phydev->pause;
1179 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1185 reg = umac_readl(priv, UMAC_CMD);
1186 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1187 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1188 CMD_TX_PAUSE_IGNORE);
1190 umac_writel(priv, reg, UMAC_CMD);
1193 phy_print_status(priv->phydev);
1196 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1199 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1200 struct device *kdev = &priv->pdev->dev;
1205 /* Simple descriptors partitioning for now */
1208 /* We just need one DMA descriptor which is DMA-able, since writing to
1209 * the port will allocate a new descriptor in its internal linked-list
1211 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1214 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1218 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1220 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1224 /* Initialize SW view of the ring */
1225 spin_lock_init(&ring->lock);
1227 netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1228 ring->index = index;
1230 ring->clean_index = 0;
1231 ring->alloc_size = ring->size;
1233 ring->desc_count = ring->size;
1234 ring->curr_desc = 0;
1236 /* Initialize HW ring */
1237 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1238 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1239 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1240 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1241 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
1242 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1244 /* Program the number of descriptors as MAX_THRESHOLD and half of
1245 * its size for the hysteresis trigger
1247 tdma_writel(priv, ring->size |
1248 1 << RING_HYST_THRESH_SHIFT,
1249 TDMA_DESC_RING_MAX_HYST(index));
1251 /* Enable the ring queue in the arbiter */
1252 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1253 reg |= (1 << index);
1254 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1256 napi_enable(&ring->napi);
1258 netif_dbg(priv, hw, priv->netdev,
1259 "TDMA cfg, size=%d, desc_cpu=%p\n",
1260 ring->size, ring->desc_cpu);
1265 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1268 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1269 struct device *kdev = &priv->pdev->dev;
1272 /* Caller should stop the TDMA engine */
1273 reg = tdma_readl(priv, TDMA_STATUS);
1274 if (!(reg & TDMA_DISABLED))
1275 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1277 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1278 * fail, so by checking this pointer we know whether the TX ring was
1279 * fully initialized or not.
1284 napi_disable(&ring->napi);
1285 netif_napi_del(&ring->napi);
1287 bcm_sysport_tx_clean(priv, ring);
1292 if (ring->desc_dma) {
1293 dma_free_coherent(kdev, sizeof(struct dma_desc),
1294 ring->desc_cpu, ring->desc_dma);
1298 ring->alloc_size = 0;
1300 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1304 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1305 unsigned int enable)
1307 unsigned int timeout = 1000;
1310 reg = rdma_readl(priv, RDMA_CONTROL);
1315 rdma_writel(priv, reg, RDMA_CONTROL);
1317 /* Poll for RMDA disabling completion */
1319 reg = rdma_readl(priv, RDMA_STATUS);
1320 if (!!(reg & RDMA_DISABLED) == !enable)
1322 usleep_range(1000, 2000);
1323 } while (timeout-- > 0);
1325 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1331 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1332 unsigned int enable)
1334 unsigned int timeout = 1000;
1337 reg = tdma_readl(priv, TDMA_CONTROL);
1342 tdma_writel(priv, reg, TDMA_CONTROL);
1344 /* Poll for TMDA disabling completion */
1346 reg = tdma_readl(priv, TDMA_STATUS);
1347 if (!!(reg & TDMA_DISABLED) == !enable)
1350 usleep_range(1000, 2000);
1351 } while (timeout-- > 0);
1353 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1358 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1360 struct bcm_sysport_cb *cb;
1365 /* Initialize SW view of the RX ring */
1366 priv->num_rx_bds = NUM_RX_DESC;
1367 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1368 priv->rx_c_index = 0;
1369 priv->rx_read_ptr = 0;
1370 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1372 if (!priv->rx_cbs) {
1373 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1377 for (i = 0; i < priv->num_rx_bds; i++) {
1378 cb = priv->rx_cbs + i;
1379 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1382 ret = bcm_sysport_alloc_rx_bufs(priv);
1384 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1388 /* Initialize HW, ensure RDMA is disabled */
1389 reg = rdma_readl(priv, RDMA_STATUS);
1390 if (!(reg & RDMA_DISABLED))
1391 rdma_enable_set(priv, 0);
1393 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1394 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1395 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1396 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1397 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1398 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1399 /* Operate the queue in ring mode */
1400 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1401 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1402 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1403 rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
1405 rdma_writel(priv, 1, RDMA_MBDONE_INTR);
1407 netif_dbg(priv, hw, priv->netdev,
1408 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1409 priv->num_rx_bds, priv->rx_bds);
1414 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1416 struct bcm_sysport_cb *cb;
1420 /* Caller should ensure RDMA is disabled */
1421 reg = rdma_readl(priv, RDMA_STATUS);
1422 if (!(reg & RDMA_DISABLED))
1423 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1425 for (i = 0; i < priv->num_rx_bds; i++) {
1426 cb = &priv->rx_cbs[i];
1427 if (dma_unmap_addr(cb, dma_addr))
1428 dma_unmap_single(&priv->pdev->dev,
1429 dma_unmap_addr(cb, dma_addr),
1430 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1431 bcm_sysport_free_cb(cb);
1434 kfree(priv->rx_cbs);
1435 priv->rx_cbs = NULL;
1437 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1440 static void bcm_sysport_set_rx_mode(struct net_device *dev)
1442 struct bcm_sysport_priv *priv = netdev_priv(dev);
1445 reg = umac_readl(priv, UMAC_CMD);
1446 if (dev->flags & IFF_PROMISC)
1449 reg &= ~CMD_PROMISC;
1450 umac_writel(priv, reg, UMAC_CMD);
1452 /* No support for ALLMULTI */
1453 if (dev->flags & IFF_ALLMULTI)
1457 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1458 u32 mask, unsigned int enable)
1462 reg = umac_readl(priv, UMAC_CMD);
1467 umac_writel(priv, reg, UMAC_CMD);
1469 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1470 * to be processed (1 msec).
1473 usleep_range(1000, 2000);
1476 static inline void umac_reset(struct bcm_sysport_priv *priv)
1480 reg = umac_readl(priv, UMAC_CMD);
1481 reg |= CMD_SW_RESET;
1482 umac_writel(priv, reg, UMAC_CMD);
1484 reg = umac_readl(priv, UMAC_CMD);
1485 reg &= ~CMD_SW_RESET;
1486 umac_writel(priv, reg, UMAC_CMD);
1489 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1490 unsigned char *addr)
1492 umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
1493 (addr[2] << 8) | addr[3], UMAC_MAC0);
1494 umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
1497 static void topctrl_flush(struct bcm_sysport_priv *priv)
1499 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1500 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1502 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1503 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1506 static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1508 struct bcm_sysport_priv *priv = netdev_priv(dev);
1509 struct sockaddr *addr = p;
1511 if (!is_valid_ether_addr(addr->sa_data))
1514 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1516 /* interface is disabled, changes to MAC will be reflected on next
1519 if (!netif_running(dev))
1522 umac_set_hw_addr(priv, dev->dev_addr);
1527 static void bcm_sysport_netif_start(struct net_device *dev)
1529 struct bcm_sysport_priv *priv = netdev_priv(dev);
1532 napi_enable(&priv->napi);
1534 /* Enable RX interrupt and TX ring full interrupt */
1535 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1537 phy_start(priv->phydev);
1539 /* Enable TX interrupts for the 32 TXQs */
1540 intrl2_1_mask_clear(priv, 0xffffffff);
1542 /* Last call before we start the real business */
1543 netif_tx_start_all_queues(dev);
1546 static void rbuf_init(struct bcm_sysport_priv *priv)
1550 reg = rbuf_readl(priv, RBUF_CONTROL);
1551 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1552 rbuf_writel(priv, reg, RBUF_CONTROL);
1555 static int bcm_sysport_open(struct net_device *dev)
1557 struct bcm_sysport_priv *priv = netdev_priv(dev);
1564 /* Flush TX and RX FIFOs at TOPCTRL level */
1565 topctrl_flush(priv);
1567 /* Disable the UniMAC RX/TX */
1568 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1570 /* Enable RBUF 2bytes alignment and Receive Status Block */
1573 /* Set maximum frame length */
1574 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1576 /* Set MAC address */
1577 umac_set_hw_addr(priv, dev->dev_addr);
1579 /* Read CRC forward */
1580 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1582 priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1583 0, priv->phy_interface);
1584 if (!priv->phydev) {
1585 netdev_err(dev, "could not attach to PHY\n");
1589 /* Reset house keeping link status */
1590 priv->old_duplex = -1;
1591 priv->old_link = -1;
1592 priv->old_pause = -1;
1594 /* mask all interrupts and request them */
1595 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1596 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1597 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1598 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1599 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1600 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1602 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1604 netdev_err(dev, "failed to request RX interrupt\n");
1605 goto out_phy_disconnect;
1608 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
1610 netdev_err(dev, "failed to request TX interrupt\n");
1614 /* Initialize both hardware and software ring */
1615 spin_lock_init(&priv->desc_lock);
1616 for (i = 0; i < dev->num_tx_queues; i++) {
1617 ret = bcm_sysport_init_tx_ring(priv, i);
1619 netdev_err(dev, "failed to initialize TX ring %d\n",
1621 goto out_free_tx_ring;
1625 /* Initialize linked-list */
1626 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1628 /* Initialize RX ring */
1629 ret = bcm_sysport_init_rx_ring(priv);
1631 netdev_err(dev, "failed to initialize RX ring\n");
1632 goto out_free_rx_ring;
1636 ret = rdma_enable_set(priv, 1);
1638 goto out_free_rx_ring;
1641 ret = tdma_enable_set(priv, 1);
1643 goto out_clear_rx_int;
1645 /* Turn on UniMAC TX/RX */
1646 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
1648 bcm_sysport_netif_start(dev);
1653 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1655 bcm_sysport_fini_rx_ring(priv);
1657 for (i = 0; i < dev->num_tx_queues; i++)
1658 bcm_sysport_fini_tx_ring(priv, i);
1659 free_irq(priv->irq1, dev);
1661 free_irq(priv->irq0, dev);
1663 phy_disconnect(priv->phydev);
1667 static void bcm_sysport_netif_stop(struct net_device *dev)
1669 struct bcm_sysport_priv *priv = netdev_priv(dev);
1671 /* stop all software from updating hardware */
1672 netif_tx_stop_all_queues(dev);
1673 napi_disable(&priv->napi);
1674 phy_stop(priv->phydev);
1676 /* mask all interrupts */
1677 intrl2_0_mask_set(priv, 0xffffffff);
1678 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1679 intrl2_1_mask_set(priv, 0xffffffff);
1680 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1683 static int bcm_sysport_stop(struct net_device *dev)
1685 struct bcm_sysport_priv *priv = netdev_priv(dev);
1689 bcm_sysport_netif_stop(dev);
1691 /* Disable UniMAC RX */
1692 umac_enable_set(priv, CMD_RX_EN, 0);
1694 ret = tdma_enable_set(priv, 0);
1696 netdev_err(dev, "timeout disabling RDMA\n");
1700 /* Wait for a maximum packet size to be drained */
1701 usleep_range(2000, 3000);
1703 ret = rdma_enable_set(priv, 0);
1705 netdev_err(dev, "timeout disabling TDMA\n");
1709 /* Disable UniMAC TX */
1710 umac_enable_set(priv, CMD_TX_EN, 0);
1712 /* Free RX/TX rings SW structures */
1713 for (i = 0; i < dev->num_tx_queues; i++)
1714 bcm_sysport_fini_tx_ring(priv, i);
1715 bcm_sysport_fini_rx_ring(priv);
1717 free_irq(priv->irq0, dev);
1718 free_irq(priv->irq1, dev);
1720 /* Disconnect from PHY */
1721 phy_disconnect(priv->phydev);
1726 static struct ethtool_ops bcm_sysport_ethtool_ops = {
1727 .get_settings = bcm_sysport_get_settings,
1728 .set_settings = bcm_sysport_set_settings,
1729 .get_drvinfo = bcm_sysport_get_drvinfo,
1730 .get_msglevel = bcm_sysport_get_msglvl,
1731 .set_msglevel = bcm_sysport_set_msglvl,
1732 .get_link = ethtool_op_get_link,
1733 .get_strings = bcm_sysport_get_strings,
1734 .get_ethtool_stats = bcm_sysport_get_stats,
1735 .get_sset_count = bcm_sysport_get_sset_count,
1736 .get_wol = bcm_sysport_get_wol,
1737 .set_wol = bcm_sysport_set_wol,
1738 .get_coalesce = bcm_sysport_get_coalesce,
1739 .set_coalesce = bcm_sysport_set_coalesce,
1742 static const struct net_device_ops bcm_sysport_netdev_ops = {
1743 .ndo_start_xmit = bcm_sysport_xmit,
1744 .ndo_tx_timeout = bcm_sysport_tx_timeout,
1745 .ndo_open = bcm_sysport_open,
1746 .ndo_stop = bcm_sysport_stop,
1747 .ndo_set_features = bcm_sysport_set_features,
1748 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
1749 .ndo_set_mac_address = bcm_sysport_change_mac,
1750 #ifdef CONFIG_NET_POLL_CONTROLLER
1751 .ndo_poll_controller = bcm_sysport_poll_controller,
1755 #define REV_FMT "v%2x.%02x"
1757 static int bcm_sysport_probe(struct platform_device *pdev)
1759 struct bcm_sysport_priv *priv;
1760 struct device_node *dn;
1761 struct net_device *dev;
1762 const void *macaddr;
1767 dn = pdev->dev.of_node;
1768 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1770 /* Read the Transmit/Receive Queue properties */
1771 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
1772 txq = TDMA_NUM_RINGS;
1773 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
1776 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
1780 /* Initialize private members */
1781 priv = netdev_priv(dev);
1783 priv->irq0 = platform_get_irq(pdev, 0);
1784 priv->irq1 = platform_get_irq(pdev, 1);
1785 priv->wol_irq = platform_get_irq(pdev, 2);
1786 if (priv->irq0 <= 0 || priv->irq1 <= 0) {
1787 dev_err(&pdev->dev, "invalid interrupts\n");
1792 priv->base = devm_ioremap_resource(&pdev->dev, r);
1793 if (IS_ERR(priv->base)) {
1794 ret = PTR_ERR(priv->base);
1801 priv->phy_interface = of_get_phy_mode(dn);
1802 /* Default to GMII interface mode */
1803 if ((int)priv->phy_interface < 0)
1804 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
1806 /* In the case of a fixed PHY, the DT node associated
1807 * to the PHY is the Ethernet MAC DT node.
1809 if (of_phy_is_fixed_link(dn)) {
1810 ret = of_phy_register_fixed_link(dn);
1812 dev_err(&pdev->dev, "failed to register fixed PHY\n");
1819 /* Initialize netdevice members */
1820 macaddr = of_get_mac_address(dn);
1821 if (!macaddr || !is_valid_ether_addr(macaddr)) {
1822 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
1823 eth_hw_addr_random(dev);
1825 ether_addr_copy(dev->dev_addr, macaddr);
1828 SET_NETDEV_DEV(dev, &pdev->dev);
1829 dev_set_drvdata(&pdev->dev, dev);
1830 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
1831 dev->netdev_ops = &bcm_sysport_netdev_ops;
1832 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
1834 /* HW supported features, none enabled by default */
1835 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
1836 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1838 /* Request the WOL interrupt and advertise suspend if available */
1839 priv->wol_irq_disabled = 1;
1840 ret = devm_request_irq(&pdev->dev, priv->wol_irq,
1841 bcm_sysport_wol_isr, 0, dev->name, priv);
1843 device_set_wakeup_capable(&pdev->dev, 1);
1845 /* Set the needed headroom once and for all */
1846 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
1847 dev->needed_headroom += sizeof(struct bcm_tsb);
1849 /* libphy will adjust the link state accordingly */
1850 netif_carrier_off(dev);
1852 ret = register_netdev(dev);
1854 dev_err(&pdev->dev, "failed to register net_device\n");
1858 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
1859 dev_info(&pdev->dev,
1860 "Broadcom SYSTEMPORT" REV_FMT
1861 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
1862 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
1863 priv->base, priv->irq0, priv->irq1, txq, rxq);
1871 static int bcm_sysport_remove(struct platform_device *pdev)
1873 struct net_device *dev = dev_get_drvdata(&pdev->dev);
1875 /* Not much to do, ndo_close has been called
1876 * and we use managed allocations
1878 unregister_netdev(dev);
1880 dev_set_drvdata(&pdev->dev, NULL);
1885 #ifdef CONFIG_PM_SLEEP
1886 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
1888 struct net_device *ndev = priv->netdev;
1889 unsigned int timeout = 1000;
1892 reg = umac_readl(priv, UMAC_MPD_CTRL);
1895 if (priv->wolopts & WAKE_MAGICSECURE) {
1896 /* Program the SecureOn password */
1897 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
1899 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
1903 umac_writel(priv, reg, UMAC_MPD_CTRL);
1905 /* Make sure RBUF entered WoL mode as result */
1907 reg = rbuf_readl(priv, RBUF_STATUS);
1908 if (reg & RBUF_WOL_MODE)
1912 } while (timeout-- > 0);
1914 /* Do not leave the UniMAC RBUF matching only MPD packets */
1916 reg = umac_readl(priv, UMAC_MPD_CTRL);
1918 umac_writel(priv, reg, UMAC_MPD_CTRL);
1919 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
1923 /* UniMAC receive needs to be turned on */
1924 umac_enable_set(priv, CMD_RX_EN, 1);
1926 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
1931 static int bcm_sysport_suspend(struct device *d)
1933 struct net_device *dev = dev_get_drvdata(d);
1934 struct bcm_sysport_priv *priv = netdev_priv(dev);
1939 if (!netif_running(dev))
1942 bcm_sysport_netif_stop(dev);
1944 phy_suspend(priv->phydev);
1946 netif_device_detach(dev);
1948 /* Disable UniMAC RX */
1949 umac_enable_set(priv, CMD_RX_EN, 0);
1951 ret = rdma_enable_set(priv, 0);
1953 netdev_err(dev, "RDMA timeout!\n");
1957 /* Disable RXCHK if enabled */
1958 if (priv->rx_chk_en) {
1959 reg = rxchk_readl(priv, RXCHK_CONTROL);
1961 rxchk_writel(priv, reg, RXCHK_CONTROL);
1966 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1968 ret = tdma_enable_set(priv, 0);
1970 netdev_err(dev, "TDMA timeout!\n");
1974 /* Wait for a packet boundary */
1975 usleep_range(2000, 3000);
1977 umac_enable_set(priv, CMD_TX_EN, 0);
1979 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1981 /* Free RX/TX rings SW structures */
1982 for (i = 0; i < dev->num_tx_queues; i++)
1983 bcm_sysport_fini_tx_ring(priv, i);
1984 bcm_sysport_fini_rx_ring(priv);
1986 /* Get prepared for Wake-on-LAN */
1987 if (device_may_wakeup(d) && priv->wolopts)
1988 ret = bcm_sysport_suspend_to_wol(priv);
1993 static int bcm_sysport_resume(struct device *d)
1995 struct net_device *dev = dev_get_drvdata(d);
1996 struct bcm_sysport_priv *priv = netdev_priv(dev);
2001 if (!netif_running(dev))
2006 /* Disable the UniMAC RX/TX */
2007 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
2009 /* We may have been suspended and never received a WOL event that
2010 * would turn off MPD detection, take care of that now
2012 bcm_sysport_resume_from_wol(priv);
2014 /* Initialize both hardware and software ring */
2015 for (i = 0; i < dev->num_tx_queues; i++) {
2016 ret = bcm_sysport_init_tx_ring(priv, i);
2018 netdev_err(dev, "failed to initialize TX ring %d\n",
2020 goto out_free_tx_rings;
2024 /* Initialize linked-list */
2025 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2027 /* Initialize RX ring */
2028 ret = bcm_sysport_init_rx_ring(priv);
2030 netdev_err(dev, "failed to initialize RX ring\n");
2031 goto out_free_rx_ring;
2034 netif_device_attach(dev);
2036 /* RX pipe enable */
2037 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2039 ret = rdma_enable_set(priv, 1);
2041 netdev_err(dev, "failed to enable RDMA\n");
2042 goto out_free_rx_ring;
2046 if (priv->rx_chk_en) {
2047 reg = rxchk_readl(priv, RXCHK_CONTROL);
2049 rxchk_writel(priv, reg, RXCHK_CONTROL);
2054 /* Set maximum frame length */
2055 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2057 /* Set MAC address */
2058 umac_set_hw_addr(priv, dev->dev_addr);
2060 umac_enable_set(priv, CMD_RX_EN, 1);
2062 /* TX pipe enable */
2063 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2065 umac_enable_set(priv, CMD_TX_EN, 1);
2067 ret = tdma_enable_set(priv, 1);
2069 netdev_err(dev, "TDMA timeout!\n");
2070 goto out_free_rx_ring;
2073 phy_resume(priv->phydev);
2075 bcm_sysport_netif_start(dev);
2080 bcm_sysport_fini_rx_ring(priv);
2082 for (i = 0; i < dev->num_tx_queues; i++)
2083 bcm_sysport_fini_tx_ring(priv, i);
2088 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2089 bcm_sysport_suspend, bcm_sysport_resume);
2091 static const struct of_device_id bcm_sysport_of_match[] = {
2092 { .compatible = "brcm,systemport-v1.00" },
2093 { .compatible = "brcm,systemport" },
2096 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2098 static struct platform_driver bcm_sysport_driver = {
2099 .probe = bcm_sysport_probe,
2100 .remove = bcm_sysport_remove,
2102 .name = "brcm-systemport",
2103 .of_match_table = bcm_sysport_of_match,
2104 .pm = &bcm_sysport_pm_ops,
2107 module_platform_driver(bcm_sysport_driver);
2109 MODULE_AUTHOR("Broadcom Corporation");
2110 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2111 MODULE_ALIAS("platform:brcm-systemport");
2112 MODULE_LICENSE("GPL");