2 * Broadcom BCM7xxx System Port Ethernet MAC driver
4 * Copyright (C) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/platform_device.h>
21 #include <linux/of_net.h>
22 #include <linux/of_mdio.h>
23 #include <linux/phy.h>
24 #include <linux/phy_fixed.h>
29 #include "bcmsysport.h"
31 /* I/O accessors register helpers */
32 #define BCM_SYSPORT_IO_MACRO(name, offset) \
33 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
35 u32 reg = readl_relaxed(priv->base + offset + off); \
38 static inline void name##_writel(struct bcm_sysport_priv *priv, \
41 writel_relaxed(val, priv->base + offset + off); \
44 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
45 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
46 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
47 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
48 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
49 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
50 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
51 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
52 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
53 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
55 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
56 * same layout, except it has been moved by 4 bytes up, *sigh*
58 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
60 if (priv->is_lite && off >= RDMA_STATUS)
62 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
65 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
67 if (priv->is_lite && off >= RDMA_STATUS)
69 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
72 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
84 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
85 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
87 #define BCM_SYSPORT_INTR_L2(which) \
88 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
91 priv->irq##which##_mask &= ~(mask); \
92 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
94 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
97 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
98 priv->irq##which##_mask |= (mask); \
101 BCM_SYSPORT_INTR_L2(0)
102 BCM_SYSPORT_INTR_L2(1)
104 /* Register accesses to GISB/RBUS registers are expensive (few hundred
105 * nanoseconds), so keep the check for 64-bits explicit here to save
106 * one register write per-packet on 32-bits platforms.
108 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
112 #ifdef CONFIG_PHYS_ADDR_T_64BIT
113 writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
114 d + DESC_ADDR_HI_STATUS_LEN);
116 writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
119 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
120 struct dma_desc *desc,
123 unsigned long desc_flags;
125 /* Ports are latched, so write upper address first */
126 spin_lock_irqsave(&priv->desc_lock, desc_flags);
127 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
128 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
129 spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
132 /* Ethtool operations */
133 static int bcm_sysport_set_rx_csum(struct net_device *dev,
134 netdev_features_t wanted)
136 struct bcm_sysport_priv *priv = netdev_priv(dev);
139 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
140 reg = rxchk_readl(priv, RXCHK_CONTROL);
141 /* Clear L2 header checks, which would prevent BPDUs
142 * from being received.
144 reg &= ~RXCHK_L2_HDR_DIS;
150 /* If UniMAC forwards CRC, we need to skip over it to get
151 * a valid CHK bit to be set in the per-packet status word
153 if (priv->rx_chk_en && priv->crc_fwd)
154 reg |= RXCHK_SKIP_FCS;
156 reg &= ~RXCHK_SKIP_FCS;
158 /* If Broadcom tags are enabled (e.g: using a switch), make
159 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
160 * tag after the Ethernet MAC Source Address.
162 if (netdev_uses_dsa(dev))
163 reg |= RXCHK_BRCM_TAG_EN;
165 reg &= ~RXCHK_BRCM_TAG_EN;
167 rxchk_writel(priv, reg, RXCHK_CONTROL);
172 static int bcm_sysport_set_tx_csum(struct net_device *dev,
173 netdev_features_t wanted)
175 struct bcm_sysport_priv *priv = netdev_priv(dev);
178 /* Hardware transmit checksum requires us to enable the Transmit status
179 * block prepended to the packet contents
181 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
182 reg = tdma_readl(priv, TDMA_CONTROL);
184 reg |= tdma_control_bit(priv, TSB_EN);
186 reg &= ~tdma_control_bit(priv, TSB_EN);
187 tdma_writel(priv, reg, TDMA_CONTROL);
192 static int bcm_sysport_set_features(struct net_device *dev,
193 netdev_features_t features)
195 netdev_features_t changed = features ^ dev->features;
196 netdev_features_t wanted = dev->wanted_features;
199 if (changed & NETIF_F_RXCSUM)
200 ret = bcm_sysport_set_rx_csum(dev, wanted);
201 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
202 ret = bcm_sysport_set_tx_csum(dev, wanted);
207 /* Hardware counters must be kept in sync because the order/offset
208 * is important here (order in structure declaration = order in hardware)
210 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
212 STAT_NETDEV64(rx_packets),
213 STAT_NETDEV64(tx_packets),
214 STAT_NETDEV64(rx_bytes),
215 STAT_NETDEV64(tx_bytes),
216 STAT_NETDEV(rx_errors),
217 STAT_NETDEV(tx_errors),
218 STAT_NETDEV(rx_dropped),
219 STAT_NETDEV(tx_dropped),
220 STAT_NETDEV(multicast),
221 /* UniMAC RSV counters */
222 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
223 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
224 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
225 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
226 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
227 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
228 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
229 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
230 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
231 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
232 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
233 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
234 STAT_MIB_RX("rx_multicast", mib.rx.mca),
235 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
236 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
237 STAT_MIB_RX("rx_control", mib.rx.cf),
238 STAT_MIB_RX("rx_pause", mib.rx.pf),
239 STAT_MIB_RX("rx_unknown", mib.rx.uo),
240 STAT_MIB_RX("rx_align", mib.rx.aln),
241 STAT_MIB_RX("rx_outrange", mib.rx.flr),
242 STAT_MIB_RX("rx_code", mib.rx.cde),
243 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
244 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
245 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
246 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
247 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
248 STAT_MIB_RX("rx_unicast", mib.rx.uc),
249 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
250 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
251 /* UniMAC TSV counters */
252 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
253 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
254 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
255 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
256 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
257 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
258 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
259 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
260 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
261 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
262 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
263 STAT_MIB_TX("tx_multicast", mib.tx.mca),
264 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
265 STAT_MIB_TX("tx_pause", mib.tx.pf),
266 STAT_MIB_TX("tx_control", mib.tx.cf),
267 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
268 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
269 STAT_MIB_TX("tx_defer", mib.tx.drf),
270 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
271 STAT_MIB_TX("tx_single_col", mib.tx.scl),
272 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
273 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
274 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
275 STAT_MIB_TX("tx_frags", mib.tx.frg),
276 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
277 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
278 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
279 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
280 STAT_MIB_TX("tx_unicast", mib.tx.uc),
281 /* UniMAC RUNT counters */
282 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
283 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
284 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
285 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
286 /* RXCHK misc statistics */
287 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
288 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
289 RXCHK_OTHER_DISC_CNTR),
290 /* RBUF misc statistics */
291 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
292 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
293 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
294 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
295 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
296 /* Per TX-queue statistics are dynamically appended */
299 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
301 static void bcm_sysport_get_drvinfo(struct net_device *dev,
302 struct ethtool_drvinfo *info)
304 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
305 strlcpy(info->version, "0.1", sizeof(info->version));
306 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
309 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
311 struct bcm_sysport_priv *priv = netdev_priv(dev);
313 return priv->msg_enable;
316 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
318 struct bcm_sysport_priv *priv = netdev_priv(dev);
320 priv->msg_enable = enable;
323 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
326 case BCM_SYSPORT_STAT_NETDEV:
327 case BCM_SYSPORT_STAT_NETDEV64:
328 case BCM_SYSPORT_STAT_RXCHK:
329 case BCM_SYSPORT_STAT_RBUF:
330 case BCM_SYSPORT_STAT_SOFT:
337 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
339 struct bcm_sysport_priv *priv = netdev_priv(dev);
340 const struct bcm_sysport_stats *s;
343 switch (string_set) {
345 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
346 s = &bcm_sysport_gstrings_stats[i];
348 !bcm_sysport_lite_stat_valid(s->type))
352 /* Include per-queue statistics */
353 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
359 static void bcm_sysport_get_strings(struct net_device *dev,
360 u32 stringset, u8 *data)
362 struct bcm_sysport_priv *priv = netdev_priv(dev);
363 const struct bcm_sysport_stats *s;
369 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
370 s = &bcm_sysport_gstrings_stats[i];
372 !bcm_sysport_lite_stat_valid(s->type))
375 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
380 for (i = 0; i < dev->num_tx_queues; i++) {
381 snprintf(buf, sizeof(buf), "txq%d_packets", i);
382 memcpy(data + j * ETH_GSTRING_LEN, buf,
386 snprintf(buf, sizeof(buf), "txq%d_bytes", i);
387 memcpy(data + j * ETH_GSTRING_LEN, buf,
397 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
401 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
402 const struct bcm_sysport_stats *s;
407 s = &bcm_sysport_gstrings_stats[i];
409 case BCM_SYSPORT_STAT_NETDEV:
410 case BCM_SYSPORT_STAT_NETDEV64:
411 case BCM_SYSPORT_STAT_SOFT:
413 case BCM_SYSPORT_STAT_MIB_RX:
414 case BCM_SYSPORT_STAT_MIB_TX:
415 case BCM_SYSPORT_STAT_RUNT:
419 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
420 offset = UMAC_MIB_STAT_OFFSET;
421 val = umac_readl(priv, UMAC_MIB_START + j + offset);
423 case BCM_SYSPORT_STAT_RXCHK:
424 val = rxchk_readl(priv, s->reg_offset);
426 rxchk_writel(priv, 0, s->reg_offset);
428 case BCM_SYSPORT_STAT_RBUF:
429 val = rbuf_readl(priv, s->reg_offset);
431 rbuf_writel(priv, 0, s->reg_offset);
436 p = (char *)priv + s->stat_offset;
440 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
443 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
444 u64 *tx_bytes, u64 *tx_packets)
446 struct bcm_sysport_tx_ring *ring;
447 u64 bytes = 0, packets = 0;
451 for (q = 0; q < priv->netdev->num_tx_queues; q++) {
452 ring = &priv->tx_rings[q];
454 start = u64_stats_fetch_begin_irq(&priv->syncp);
456 packets = ring->packets;
457 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
460 *tx_packets += packets;
464 static void bcm_sysport_get_stats(struct net_device *dev,
465 struct ethtool_stats *stats, u64 *data)
467 struct bcm_sysport_priv *priv = netdev_priv(dev);
468 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
469 struct u64_stats_sync *syncp = &priv->syncp;
470 struct bcm_sysport_tx_ring *ring;
471 u64 tx_bytes = 0, tx_packets = 0;
475 if (netif_running(dev)) {
476 bcm_sysport_update_mib_counters(priv);
477 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
478 stats64->tx_bytes = tx_bytes;
479 stats64->tx_packets = tx_packets;
482 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
483 const struct bcm_sysport_stats *s;
486 s = &bcm_sysport_gstrings_stats[i];
487 if (s->type == BCM_SYSPORT_STAT_NETDEV)
488 p = (char *)&dev->stats;
489 else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
494 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
498 if (s->stat_sizeof == sizeof(u64) &&
499 s->type == BCM_SYSPORT_STAT_NETDEV64) {
501 start = u64_stats_fetch_begin_irq(syncp);
503 } while (u64_stats_fetch_retry_irq(syncp, start));
509 /* For SYSTEMPORT Lite since we have holes in our statistics, j would
510 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
511 * needs to point to how many total statistics we have minus the
512 * number of per TX queue statistics
514 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
515 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
517 for (i = 0; i < dev->num_tx_queues; i++) {
518 ring = &priv->tx_rings[i];
519 data[j] = ring->packets;
521 data[j] = ring->bytes;
526 static void bcm_sysport_get_wol(struct net_device *dev,
527 struct ethtool_wolinfo *wol)
529 struct bcm_sysport_priv *priv = netdev_priv(dev);
531 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
532 wol->wolopts = priv->wolopts;
534 if (!(priv->wolopts & WAKE_MAGICSECURE))
537 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
540 static int bcm_sysport_set_wol(struct net_device *dev,
541 struct ethtool_wolinfo *wol)
543 struct bcm_sysport_priv *priv = netdev_priv(dev);
544 struct device *kdev = &priv->pdev->dev;
545 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
547 if (!device_can_wakeup(kdev))
550 if (wol->wolopts & ~supported)
553 if (wol->wolopts & WAKE_MAGICSECURE)
554 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
556 /* Flag the device and relevant IRQ as wakeup capable */
558 device_set_wakeup_enable(kdev, 1);
559 if (priv->wol_irq_disabled)
560 enable_irq_wake(priv->wol_irq);
561 priv->wol_irq_disabled = 0;
563 device_set_wakeup_enable(kdev, 0);
564 /* Avoid unbalanced disable_irq_wake calls */
565 if (!priv->wol_irq_disabled)
566 disable_irq_wake(priv->wol_irq);
567 priv->wol_irq_disabled = 1;
570 priv->wolopts = wol->wolopts;
575 static int bcm_sysport_get_coalesce(struct net_device *dev,
576 struct ethtool_coalesce *ec)
578 struct bcm_sysport_priv *priv = netdev_priv(dev);
581 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
583 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
584 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
586 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
588 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
589 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
594 static int bcm_sysport_set_coalesce(struct net_device *dev,
595 struct ethtool_coalesce *ec)
597 struct bcm_sysport_priv *priv = netdev_priv(dev);
601 /* Base system clock is 125Mhz, DMA timeout is this reference clock
602 * divided by 1024, which yield roughly 8.192 us, our maximum value has
603 * to fit in the RING_TIMEOUT_MASK (16 bits).
605 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
606 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
607 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
608 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
611 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
612 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
615 for (i = 0; i < dev->num_tx_queues; i++) {
616 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
617 reg &= ~(RING_INTR_THRESH_MASK |
618 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
619 reg |= ec->tx_max_coalesced_frames;
620 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
622 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
625 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
626 reg &= ~(RDMA_INTR_THRESH_MASK |
627 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
628 reg |= ec->rx_max_coalesced_frames;
629 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
631 rdma_writel(priv, reg, RDMA_MBDONE_INTR);
636 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
638 dev_consume_skb_any(cb->skb);
640 dma_unmap_addr_set(cb, dma_addr, 0);
643 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
644 struct bcm_sysport_cb *cb)
646 struct device *kdev = &priv->pdev->dev;
647 struct net_device *ndev = priv->netdev;
648 struct sk_buff *skb, *rx_skb;
651 /* Allocate a new SKB for a new packet */
652 skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
653 GFP_ATOMIC | __GFP_NOWARN);
655 priv->mib.alloc_rx_buff_failed++;
656 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
660 mapping = dma_map_single(kdev, skb->data,
661 RX_BUF_LENGTH, DMA_FROM_DEVICE);
662 if (dma_mapping_error(kdev, mapping)) {
663 priv->mib.rx_dma_failed++;
664 dev_kfree_skb_any(skb);
665 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
669 /* Grab the current SKB on the ring */
672 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
673 RX_BUF_LENGTH, DMA_FROM_DEVICE);
675 /* Put the new SKB on the ring */
677 dma_unmap_addr_set(cb, dma_addr, mapping);
678 dma_desc_set_addr(priv, cb->bd_addr, mapping);
680 netif_dbg(priv, rx_status, ndev, "RX refill\n");
682 /* Return the current SKB to the caller */
686 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
688 struct bcm_sysport_cb *cb;
692 for (i = 0; i < priv->num_rx_bds; i++) {
693 cb = &priv->rx_cbs[i];
694 skb = bcm_sysport_rx_refill(priv, cb);
704 /* Poll the hardware for up to budget packets to process */
705 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
708 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
709 struct net_device *ndev = priv->netdev;
710 unsigned int processed = 0, to_process;
711 struct bcm_sysport_cb *cb;
713 unsigned int p_index;
717 /* Clear status before servicing to reduce spurious interrupts */
718 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
720 /* Determine how much we should process since last call, SYSTEMPORT Lite
721 * groups the producer and consumer indexes into the same 32-bit
722 * which we access using RDMA_CONS_INDEX
725 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
727 p_index = rdma_readl(priv, RDMA_CONS_INDEX);
728 p_index &= RDMA_PROD_INDEX_MASK;
730 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
732 netif_dbg(priv, rx_status, ndev,
733 "p_index=%d rx_c_index=%d to_process=%d\n",
734 p_index, priv->rx_c_index, to_process);
736 while ((processed < to_process) && (processed < budget)) {
737 cb = &priv->rx_cbs[priv->rx_read_ptr];
738 skb = bcm_sysport_rx_refill(priv, cb);
741 /* We do not have a backing SKB, so we do not a corresponding
742 * DMA mapping for this incoming packet since
743 * bcm_sysport_rx_refill always either has both skb and mapping
746 if (unlikely(!skb)) {
747 netif_err(priv, rx_err, ndev, "out of memory!\n");
748 ndev->stats.rx_dropped++;
749 ndev->stats.rx_errors++;
753 /* Extract the Receive Status Block prepended */
754 rsb = (struct bcm_rsb *)skb->data;
755 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
756 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
759 netif_dbg(priv, rx_status, ndev,
760 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
761 p_index, priv->rx_c_index, priv->rx_read_ptr,
764 if (unlikely(len > RX_BUF_LENGTH)) {
765 netif_err(priv, rx_status, ndev, "oversized packet\n");
766 ndev->stats.rx_length_errors++;
767 ndev->stats.rx_errors++;
768 dev_kfree_skb_any(skb);
772 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
773 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
774 ndev->stats.rx_dropped++;
775 ndev->stats.rx_errors++;
776 dev_kfree_skb_any(skb);
780 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
781 netif_err(priv, rx_err, ndev, "error packet\n");
782 if (status & RX_STATUS_OVFLOW)
783 ndev->stats.rx_over_errors++;
784 ndev->stats.rx_dropped++;
785 ndev->stats.rx_errors++;
786 dev_kfree_skb_any(skb);
792 /* Hardware validated our checksum */
793 if (likely(status & DESC_L4_CSUM))
794 skb->ip_summed = CHECKSUM_UNNECESSARY;
796 /* Hardware pre-pends packets with 2bytes before Ethernet
797 * header plus we have the Receive Status Block, strip off all
798 * of this from the SKB.
800 skb_pull(skb, sizeof(*rsb) + 2);
801 len -= (sizeof(*rsb) + 2);
803 /* UniMAC may forward CRC */
805 skb_trim(skb, len - ETH_FCS_LEN);
809 skb->protocol = eth_type_trans(skb, ndev);
810 ndev->stats.rx_packets++;
811 ndev->stats.rx_bytes += len;
812 u64_stats_update_begin(&priv->syncp);
813 stats64->rx_packets++;
814 stats64->rx_bytes += len;
815 u64_stats_update_end(&priv->syncp);
817 napi_gro_receive(&priv->napi, skb);
822 if (priv->rx_read_ptr == priv->num_rx_bds)
823 priv->rx_read_ptr = 0;
829 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
830 struct bcm_sysport_cb *cb,
831 unsigned int *bytes_compl,
832 unsigned int *pkts_compl)
834 struct bcm_sysport_priv *priv = ring->priv;
835 struct device *kdev = &priv->pdev->dev;
838 *bytes_compl += cb->skb->len;
839 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
840 dma_unmap_len(cb, dma_len),
843 bcm_sysport_free_cb(cb);
845 } else if (dma_unmap_addr(cb, dma_addr)) {
846 *bytes_compl += dma_unmap_len(cb, dma_len);
847 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
848 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
849 dma_unmap_addr_set(cb, dma_addr, 0);
853 /* Reclaim queued SKBs for transmission completion, lockless version */
854 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
855 struct bcm_sysport_tx_ring *ring)
857 unsigned int pkts_compl = 0, bytes_compl = 0;
858 struct net_device *ndev = priv->netdev;
859 unsigned int txbds_processed = 0;
860 struct bcm_sysport_cb *cb;
861 unsigned int txbds_ready;
862 unsigned int c_index;
865 /* Clear status before servicing to reduce spurious interrupts */
866 if (!ring->priv->is_lite)
867 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
869 intrl2_0_writel(ring->priv, BIT(ring->index +
870 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
872 /* Compute how many descriptors have been processed since last call */
873 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
874 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
875 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
877 netif_dbg(priv, tx_done, ndev,
878 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
879 ring->index, ring->c_index, c_index, txbds_ready);
881 while (txbds_processed < txbds_ready) {
882 cb = &ring->cbs[ring->clean_index];
883 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
888 if (likely(ring->clean_index < ring->size - 1))
891 ring->clean_index = 0;
894 u64_stats_update_begin(&priv->syncp);
895 ring->packets += pkts_compl;
896 ring->bytes += bytes_compl;
897 u64_stats_update_end(&priv->syncp);
899 ring->c_index = c_index;
901 netif_dbg(priv, tx_done, ndev,
902 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
903 ring->index, ring->c_index, pkts_compl, bytes_compl);
908 /* Locked version of the per-ring TX reclaim routine */
909 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
910 struct bcm_sysport_tx_ring *ring)
912 struct netdev_queue *txq;
913 unsigned int released;
916 txq = netdev_get_tx_queue(priv->netdev, ring->index);
918 spin_lock_irqsave(&ring->lock, flags);
919 released = __bcm_sysport_tx_reclaim(priv, ring);
921 netif_tx_wake_queue(txq);
923 spin_unlock_irqrestore(&ring->lock, flags);
928 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
929 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
930 struct bcm_sysport_tx_ring *ring)
934 spin_lock_irqsave(&ring->lock, flags);
935 __bcm_sysport_tx_reclaim(priv, ring);
936 spin_unlock_irqrestore(&ring->lock, flags);
939 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
941 struct bcm_sysport_tx_ring *ring =
942 container_of(napi, struct bcm_sysport_tx_ring, napi);
943 unsigned int work_done = 0;
945 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
947 if (work_done == 0) {
949 /* re-enable TX interrupt */
950 if (!ring->priv->is_lite)
951 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
953 intrl2_0_mask_clear(ring->priv, BIT(ring->index +
954 INTRL2_0_TDMA_MBDONE_SHIFT));
962 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
966 for (q = 0; q < priv->netdev->num_tx_queues; q++)
967 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
970 static int bcm_sysport_poll(struct napi_struct *napi, int budget)
972 struct bcm_sysport_priv *priv =
973 container_of(napi, struct bcm_sysport_priv, napi);
974 unsigned int work_done = 0;
976 work_done = bcm_sysport_desc_rx(priv, budget);
978 priv->rx_c_index += work_done;
979 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
981 /* SYSTEMPORT Lite groups the producer/consumer index, producer is
982 * maintained by HW, but writes to it will be ignore while RDMA
986 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
988 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
990 if (work_done < budget) {
991 napi_complete_done(napi, work_done);
992 /* re-enable RX interrupts */
993 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
999 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1003 /* Clear the MagicPacket detection logic */
1004 reg = umac_readl(priv, UMAC_MPD_CTRL);
1006 umac_writel(priv, reg, UMAC_MPD_CTRL);
1008 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
1009 if (reg & INTRL2_0_MPD)
1010 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1012 if (reg & INTRL2_0_BRCM_MATCH_TAG) {
1013 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1014 RXCHK_BRCM_TAG_MATCH_MASK;
1015 netdev_info(priv->netdev,
1016 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
1019 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1022 /* RX and misc interrupt routine */
1023 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1025 struct net_device *dev = dev_id;
1026 struct bcm_sysport_priv *priv = netdev_priv(dev);
1027 struct bcm_sysport_tx_ring *txr;
1028 unsigned int ring, ring_bit;
1030 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1031 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1032 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1034 if (unlikely(priv->irq0_stat == 0)) {
1035 netdev_warn(priv->netdev, "spurious RX interrupt\n");
1039 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1040 if (likely(napi_schedule_prep(&priv->napi))) {
1041 /* disable RX interrupts */
1042 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1043 __napi_schedule_irqoff(&priv->napi);
1047 /* TX ring is full, perform a full reclaim since we do not know
1048 * which one would trigger this interrupt
1050 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1051 bcm_sysport_tx_reclaim_all(priv);
1056 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1057 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1058 if (!(priv->irq0_stat & ring_bit))
1061 txr = &priv->tx_rings[ring];
1063 if (likely(napi_schedule_prep(&txr->napi))) {
1064 intrl2_0_mask_set(priv, ring_bit);
1065 __napi_schedule(&txr->napi);
1072 /* TX interrupt service routine */
1073 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1075 struct net_device *dev = dev_id;
1076 struct bcm_sysport_priv *priv = netdev_priv(dev);
1077 struct bcm_sysport_tx_ring *txr;
1080 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1081 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1082 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1084 if (unlikely(priv->irq1_stat == 0)) {
1085 netdev_warn(priv->netdev, "spurious TX interrupt\n");
1089 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1090 if (!(priv->irq1_stat & BIT(ring)))
1093 txr = &priv->tx_rings[ring];
1095 if (likely(napi_schedule_prep(&txr->napi))) {
1096 intrl2_1_mask_set(priv, BIT(ring));
1097 __napi_schedule_irqoff(&txr->napi);
1104 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1106 struct bcm_sysport_priv *priv = dev_id;
1108 pm_wakeup_event(&priv->pdev->dev, 0);
1113 #ifdef CONFIG_NET_POLL_CONTROLLER
1114 static void bcm_sysport_poll_controller(struct net_device *dev)
1116 struct bcm_sysport_priv *priv = netdev_priv(dev);
1118 disable_irq(priv->irq0);
1119 bcm_sysport_rx_isr(priv->irq0, priv);
1120 enable_irq(priv->irq0);
1122 if (!priv->is_lite) {
1123 disable_irq(priv->irq1);
1124 bcm_sysport_tx_isr(priv->irq1, priv);
1125 enable_irq(priv->irq1);
1130 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1131 struct net_device *dev)
1133 struct sk_buff *nskb;
1134 struct bcm_tsb *tsb;
1140 /* Re-allocate SKB if needed */
1141 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1142 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1145 dev->stats.tx_errors++;
1146 dev->stats.tx_dropped++;
1152 tsb = skb_push(skb, sizeof(*tsb));
1153 /* Zero-out TSB by default */
1154 memset(tsb, 0, sizeof(*tsb));
1156 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1157 ip_ver = htons(skb->protocol);
1160 ip_proto = ip_hdr(skb)->protocol;
1163 ip_proto = ipv6_hdr(skb)->nexthdr;
1169 /* Get the checksum offset and the L4 (transport) offset */
1170 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1171 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1172 csum_info |= (csum_start << L4_PTR_SHIFT);
1174 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1175 csum_info |= L4_LENGTH_VALID;
1176 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1177 csum_info |= L4_UDP;
1182 tsb->l4_ptr_dest_map = csum_info;
1188 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1189 struct net_device *dev)
1191 struct bcm_sysport_priv *priv = netdev_priv(dev);
1192 struct device *kdev = &priv->pdev->dev;
1193 struct bcm_sysport_tx_ring *ring;
1194 struct bcm_sysport_cb *cb;
1195 struct netdev_queue *txq;
1196 struct dma_desc *desc;
1197 unsigned int skb_len;
1198 unsigned long flags;
1204 queue = skb_get_queue_mapping(skb);
1205 txq = netdev_get_tx_queue(dev, queue);
1206 ring = &priv->tx_rings[queue];
1208 /* lock against tx reclaim in BH context and TX ring full interrupt */
1209 spin_lock_irqsave(&ring->lock, flags);
1210 if (unlikely(ring->desc_count == 0)) {
1211 netif_tx_stop_queue(txq);
1212 netdev_err(dev, "queue %d awake and ring full!\n", queue);
1213 ret = NETDEV_TX_BUSY;
1217 /* The Ethernet switch we are interfaced with needs packets to be at
1218 * least 64 bytes (including FCS) otherwise they will be discarded when
1219 * they enter the switch port logic. When Broadcom tags are enabled, we
1220 * need to make sure that packets are at least 68 bytes
1221 * (including FCS and tag) because the length verification is done after
1222 * the Broadcom tag is stripped off the ingress packet.
1224 if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
1229 /* Insert TSB and checksum infos */
1231 skb = bcm_sysport_insert_tsb(skb, dev);
1240 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1241 if (dma_mapping_error(kdev, mapping)) {
1242 priv->mib.tx_dma_failed++;
1243 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1244 skb->data, skb_len);
1249 /* Remember the SKB for future freeing */
1250 cb = &ring->cbs[ring->curr_desc];
1252 dma_unmap_addr_set(cb, dma_addr, mapping);
1253 dma_unmap_len_set(cb, dma_len, skb_len);
1255 /* Fetch a descriptor entry from our pool */
1256 desc = ring->desc_cpu;
1258 desc->addr_lo = lower_32_bits(mapping);
1259 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1260 len_status |= (skb_len << DESC_LEN_SHIFT);
1261 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1263 if (skb->ip_summed == CHECKSUM_PARTIAL)
1264 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1267 if (ring->curr_desc == ring->size)
1268 ring->curr_desc = 0;
1271 /* Ensure write completion of the descriptor status/length
1272 * in DRAM before the System Port WRITE_PORT register latches
1276 desc->addr_status_len = len_status;
1279 /* Write this descriptor address to the RING write port */
1280 tdma_port_write_desc_addr(priv, desc, ring->index);
1282 /* Check ring space and update SW control flow */
1283 if (ring->desc_count == 0)
1284 netif_tx_stop_queue(txq);
1286 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1287 ring->index, ring->desc_count, ring->curr_desc);
1291 spin_unlock_irqrestore(&ring->lock, flags);
1295 static void bcm_sysport_tx_timeout(struct net_device *dev)
1297 netdev_warn(dev, "transmit timeout!\n");
1299 netif_trans_update(dev);
1300 dev->stats.tx_errors++;
1302 netif_tx_wake_all_queues(dev);
1305 /* phylib adjust link callback */
1306 static void bcm_sysport_adj_link(struct net_device *dev)
1308 struct bcm_sysport_priv *priv = netdev_priv(dev);
1309 struct phy_device *phydev = dev->phydev;
1310 unsigned int changed = 0;
1311 u32 cmd_bits = 0, reg;
1313 if (priv->old_link != phydev->link) {
1315 priv->old_link = phydev->link;
1318 if (priv->old_duplex != phydev->duplex) {
1320 priv->old_duplex = phydev->duplex;
1326 switch (phydev->speed) {
1328 cmd_bits = CMD_SPEED_2500;
1331 cmd_bits = CMD_SPEED_1000;
1334 cmd_bits = CMD_SPEED_100;
1337 cmd_bits = CMD_SPEED_10;
1342 cmd_bits <<= CMD_SPEED_SHIFT;
1344 if (phydev->duplex == DUPLEX_HALF)
1345 cmd_bits |= CMD_HD_EN;
1347 if (priv->old_pause != phydev->pause) {
1349 priv->old_pause = phydev->pause;
1353 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1359 reg = umac_readl(priv, UMAC_CMD);
1360 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1361 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1362 CMD_TX_PAUSE_IGNORE);
1364 umac_writel(priv, reg, UMAC_CMD);
1368 phy_print_status(phydev);
1371 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1374 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1375 struct device *kdev = &priv->pdev->dev;
1380 /* Simple descriptors partitioning for now */
1383 /* We just need one DMA descriptor which is DMA-able, since writing to
1384 * the port will allocate a new descriptor in its internal linked-list
1386 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1389 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1393 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1395 dma_free_coherent(kdev, sizeof(struct dma_desc),
1396 ring->desc_cpu, ring->desc_dma);
1397 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1401 /* Initialize SW view of the ring */
1402 spin_lock_init(&ring->lock);
1404 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1405 ring->index = index;
1407 ring->clean_index = 0;
1408 ring->alloc_size = ring->size;
1410 ring->desc_count = ring->size;
1411 ring->curr_desc = 0;
1413 /* Initialize HW ring */
1414 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1415 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1416 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1417 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1418 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
1419 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1421 /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1422 * with the original definition of ACB_ALGO
1424 reg = tdma_readl(priv, TDMA_CONTROL);
1426 reg &= ~BIT(TSB_SWAP1);
1427 /* Set a correct TSB format based on host endian */
1428 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1429 reg |= tdma_control_bit(priv, TSB_SWAP0);
1431 reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1432 tdma_writel(priv, reg, TDMA_CONTROL);
1434 /* Program the number of descriptors as MAX_THRESHOLD and half of
1435 * its size for the hysteresis trigger
1437 tdma_writel(priv, ring->size |
1438 1 << RING_HYST_THRESH_SHIFT,
1439 TDMA_DESC_RING_MAX_HYST(index));
1441 /* Enable the ring queue in the arbiter */
1442 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1443 reg |= (1 << index);
1444 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1446 napi_enable(&ring->napi);
1448 netif_dbg(priv, hw, priv->netdev,
1449 "TDMA cfg, size=%d, desc_cpu=%p\n",
1450 ring->size, ring->desc_cpu);
1455 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1458 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1459 struct device *kdev = &priv->pdev->dev;
1462 /* Caller should stop the TDMA engine */
1463 reg = tdma_readl(priv, TDMA_STATUS);
1464 if (!(reg & TDMA_DISABLED))
1465 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1467 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1468 * fail, so by checking this pointer we know whether the TX ring was
1469 * fully initialized or not.
1474 napi_disable(&ring->napi);
1475 netif_napi_del(&ring->napi);
1477 bcm_sysport_tx_clean(priv, ring);
1482 if (ring->desc_dma) {
1483 dma_free_coherent(kdev, sizeof(struct dma_desc),
1484 ring->desc_cpu, ring->desc_dma);
1488 ring->alloc_size = 0;
1490 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1494 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1495 unsigned int enable)
1497 unsigned int timeout = 1000;
1500 reg = rdma_readl(priv, RDMA_CONTROL);
1505 rdma_writel(priv, reg, RDMA_CONTROL);
1507 /* Poll for RMDA disabling completion */
1509 reg = rdma_readl(priv, RDMA_STATUS);
1510 if (!!(reg & RDMA_DISABLED) == !enable)
1512 usleep_range(1000, 2000);
1513 } while (timeout-- > 0);
1515 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1521 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1522 unsigned int enable)
1524 unsigned int timeout = 1000;
1527 reg = tdma_readl(priv, TDMA_CONTROL);
1529 reg |= tdma_control_bit(priv, TDMA_EN);
1531 reg &= ~tdma_control_bit(priv, TDMA_EN);
1532 tdma_writel(priv, reg, TDMA_CONTROL);
1534 /* Poll for TMDA disabling completion */
1536 reg = tdma_readl(priv, TDMA_STATUS);
1537 if (!!(reg & TDMA_DISABLED) == !enable)
1540 usleep_range(1000, 2000);
1541 } while (timeout-- > 0);
1543 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1548 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1550 struct bcm_sysport_cb *cb;
1555 /* Initialize SW view of the RX ring */
1556 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1557 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1558 priv->rx_c_index = 0;
1559 priv->rx_read_ptr = 0;
1560 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1562 if (!priv->rx_cbs) {
1563 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1567 for (i = 0; i < priv->num_rx_bds; i++) {
1568 cb = priv->rx_cbs + i;
1569 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1572 ret = bcm_sysport_alloc_rx_bufs(priv);
1574 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1578 /* Initialize HW, ensure RDMA is disabled */
1579 reg = rdma_readl(priv, RDMA_STATUS);
1580 if (!(reg & RDMA_DISABLED))
1581 rdma_enable_set(priv, 0);
1583 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1584 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1585 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1586 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1587 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1588 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1589 /* Operate the queue in ring mode */
1590 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1591 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1592 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1593 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1595 rdma_writel(priv, 1, RDMA_MBDONE_INTR);
1597 netif_dbg(priv, hw, priv->netdev,
1598 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1599 priv->num_rx_bds, priv->rx_bds);
1604 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1606 struct bcm_sysport_cb *cb;
1610 /* Caller should ensure RDMA is disabled */
1611 reg = rdma_readl(priv, RDMA_STATUS);
1612 if (!(reg & RDMA_DISABLED))
1613 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1615 for (i = 0; i < priv->num_rx_bds; i++) {
1616 cb = &priv->rx_cbs[i];
1617 if (dma_unmap_addr(cb, dma_addr))
1618 dma_unmap_single(&priv->pdev->dev,
1619 dma_unmap_addr(cb, dma_addr),
1620 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1621 bcm_sysport_free_cb(cb);
1624 kfree(priv->rx_cbs);
1625 priv->rx_cbs = NULL;
1627 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1630 static void bcm_sysport_set_rx_mode(struct net_device *dev)
1632 struct bcm_sysport_priv *priv = netdev_priv(dev);
1638 reg = umac_readl(priv, UMAC_CMD);
1639 if (dev->flags & IFF_PROMISC)
1642 reg &= ~CMD_PROMISC;
1643 umac_writel(priv, reg, UMAC_CMD);
1645 /* No support for ALLMULTI */
1646 if (dev->flags & IFF_ALLMULTI)
1650 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1651 u32 mask, unsigned int enable)
1655 if (!priv->is_lite) {
1656 reg = umac_readl(priv, UMAC_CMD);
1661 umac_writel(priv, reg, UMAC_CMD);
1663 reg = gib_readl(priv, GIB_CONTROL);
1668 gib_writel(priv, reg, GIB_CONTROL);
1671 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1672 * to be processed (1 msec).
1675 usleep_range(1000, 2000);
1678 static inline void umac_reset(struct bcm_sysport_priv *priv)
1685 reg = umac_readl(priv, UMAC_CMD);
1686 reg |= CMD_SW_RESET;
1687 umac_writel(priv, reg, UMAC_CMD);
1689 reg = umac_readl(priv, UMAC_CMD);
1690 reg &= ~CMD_SW_RESET;
1691 umac_writel(priv, reg, UMAC_CMD);
1694 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1695 unsigned char *addr)
1697 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1699 u32 mac1 = (addr[4] << 8) | addr[5];
1701 if (!priv->is_lite) {
1702 umac_writel(priv, mac0, UMAC_MAC0);
1703 umac_writel(priv, mac1, UMAC_MAC1);
1705 gib_writel(priv, mac0, GIB_MAC0);
1706 gib_writel(priv, mac1, GIB_MAC1);
1710 static void topctrl_flush(struct bcm_sysport_priv *priv)
1712 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1713 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1715 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1716 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1719 static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1721 struct bcm_sysport_priv *priv = netdev_priv(dev);
1722 struct sockaddr *addr = p;
1724 if (!is_valid_ether_addr(addr->sa_data))
1727 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1729 /* interface is disabled, changes to MAC will be reflected on next
1732 if (!netif_running(dev))
1735 umac_set_hw_addr(priv, dev->dev_addr);
1740 static void bcm_sysport_get_stats64(struct net_device *dev,
1741 struct rtnl_link_stats64 *stats)
1743 struct bcm_sysport_priv *priv = netdev_priv(dev);
1744 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1747 netdev_stats_to_stats64(stats, &dev->stats);
1749 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1750 &stats->tx_packets);
1753 start = u64_stats_fetch_begin_irq(&priv->syncp);
1754 stats->rx_packets = stats64->rx_packets;
1755 stats->rx_bytes = stats64->rx_bytes;
1756 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
1759 static void bcm_sysport_netif_start(struct net_device *dev)
1761 struct bcm_sysport_priv *priv = netdev_priv(dev);
1764 napi_enable(&priv->napi);
1766 /* Enable RX interrupt and TX ring full interrupt */
1767 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1769 phy_start(dev->phydev);
1771 /* Enable TX interrupts for the TXQs */
1773 intrl2_1_mask_clear(priv, 0xffffffff);
1775 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1778 static void rbuf_init(struct bcm_sysport_priv *priv)
1782 reg = rbuf_readl(priv, RBUF_CONTROL);
1783 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1784 /* Set a correct RSB format on SYSTEMPORT Lite */
1786 reg &= ~RBUF_RSB_SWAP1;
1788 /* Set a correct RSB format based on host endian */
1789 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1790 reg |= RBUF_RSB_SWAP0;
1792 reg &= ~RBUF_RSB_SWAP0;
1793 rbuf_writel(priv, reg, RBUF_CONTROL);
1796 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1798 intrl2_0_mask_set(priv, 0xffffffff);
1799 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1800 if (!priv->is_lite) {
1801 intrl2_1_mask_set(priv, 0xffffffff);
1802 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1806 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1810 reg = gib_readl(priv, GIB_CONTROL);
1811 /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
1812 if (netdev_uses_dsa(priv->netdev)) {
1813 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1814 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
1816 reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
1817 reg |= 12 << GIB_IPG_LEN_SHIFT;
1818 gib_writel(priv, reg, GIB_CONTROL);
1821 static int bcm_sysport_open(struct net_device *dev)
1823 struct bcm_sysport_priv *priv = netdev_priv(dev);
1824 struct phy_device *phydev;
1831 /* Flush TX and RX FIFOs at TOPCTRL level */
1832 topctrl_flush(priv);
1834 /* Disable the UniMAC RX/TX */
1835 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1837 /* Enable RBUF 2bytes alignment and Receive Status Block */
1840 /* Set maximum frame length */
1842 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1844 gib_set_pad_extension(priv);
1846 /* Set MAC address */
1847 umac_set_hw_addr(priv, dev->dev_addr);
1849 /* Read CRC forward */
1851 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1853 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
1854 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
1856 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1857 0, priv->phy_interface);
1859 netdev_err(dev, "could not attach to PHY\n");
1863 /* Reset house keeping link status */
1864 priv->old_duplex = -1;
1865 priv->old_link = -1;
1866 priv->old_pause = -1;
1868 /* mask all interrupts and request them */
1869 bcm_sysport_mask_all_intrs(priv);
1871 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1873 netdev_err(dev, "failed to request RX interrupt\n");
1874 goto out_phy_disconnect;
1877 if (!priv->is_lite) {
1878 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
1881 netdev_err(dev, "failed to request TX interrupt\n");
1886 /* Initialize both hardware and software ring */
1887 spin_lock_init(&priv->desc_lock);
1888 for (i = 0; i < dev->num_tx_queues; i++) {
1889 ret = bcm_sysport_init_tx_ring(priv, i);
1891 netdev_err(dev, "failed to initialize TX ring %d\n",
1893 goto out_free_tx_ring;
1897 /* Initialize linked-list */
1898 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1900 /* Initialize RX ring */
1901 ret = bcm_sysport_init_rx_ring(priv);
1903 netdev_err(dev, "failed to initialize RX ring\n");
1904 goto out_free_rx_ring;
1908 ret = rdma_enable_set(priv, 1);
1910 goto out_free_rx_ring;
1913 ret = tdma_enable_set(priv, 1);
1915 goto out_clear_rx_int;
1917 /* Turn on UniMAC TX/RX */
1918 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
1920 bcm_sysport_netif_start(dev);
1922 netif_tx_start_all_queues(dev);
1927 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1929 bcm_sysport_fini_rx_ring(priv);
1931 for (i = 0; i < dev->num_tx_queues; i++)
1932 bcm_sysport_fini_tx_ring(priv, i);
1934 free_irq(priv->irq1, dev);
1936 free_irq(priv->irq0, dev);
1938 phy_disconnect(phydev);
1942 static void bcm_sysport_netif_stop(struct net_device *dev)
1944 struct bcm_sysport_priv *priv = netdev_priv(dev);
1946 /* stop all software from updating hardware */
1947 netif_tx_disable(dev);
1948 napi_disable(&priv->napi);
1949 phy_stop(dev->phydev);
1951 /* mask all interrupts */
1952 bcm_sysport_mask_all_intrs(priv);
1955 static int bcm_sysport_stop(struct net_device *dev)
1957 struct bcm_sysport_priv *priv = netdev_priv(dev);
1961 bcm_sysport_netif_stop(dev);
1963 /* Disable UniMAC RX */
1964 umac_enable_set(priv, CMD_RX_EN, 0);
1966 ret = tdma_enable_set(priv, 0);
1968 netdev_err(dev, "timeout disabling RDMA\n");
1972 /* Wait for a maximum packet size to be drained */
1973 usleep_range(2000, 3000);
1975 ret = rdma_enable_set(priv, 0);
1977 netdev_err(dev, "timeout disabling TDMA\n");
1981 /* Disable UniMAC TX */
1982 umac_enable_set(priv, CMD_TX_EN, 0);
1984 /* Free RX/TX rings SW structures */
1985 for (i = 0; i < dev->num_tx_queues; i++)
1986 bcm_sysport_fini_tx_ring(priv, i);
1987 bcm_sysport_fini_rx_ring(priv);
1989 free_irq(priv->irq0, dev);
1991 free_irq(priv->irq1, dev);
1993 /* Disconnect from PHY */
1994 phy_disconnect(dev->phydev);
1999 static const struct ethtool_ops bcm_sysport_ethtool_ops = {
2000 .get_drvinfo = bcm_sysport_get_drvinfo,
2001 .get_msglevel = bcm_sysport_get_msglvl,
2002 .set_msglevel = bcm_sysport_set_msglvl,
2003 .get_link = ethtool_op_get_link,
2004 .get_strings = bcm_sysport_get_strings,
2005 .get_ethtool_stats = bcm_sysport_get_stats,
2006 .get_sset_count = bcm_sysport_get_sset_count,
2007 .get_wol = bcm_sysport_get_wol,
2008 .set_wol = bcm_sysport_set_wol,
2009 .get_coalesce = bcm_sysport_get_coalesce,
2010 .set_coalesce = bcm_sysport_set_coalesce,
2011 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2012 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2015 static const struct net_device_ops bcm_sysport_netdev_ops = {
2016 .ndo_start_xmit = bcm_sysport_xmit,
2017 .ndo_tx_timeout = bcm_sysport_tx_timeout,
2018 .ndo_open = bcm_sysport_open,
2019 .ndo_stop = bcm_sysport_stop,
2020 .ndo_set_features = bcm_sysport_set_features,
2021 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
2022 .ndo_set_mac_address = bcm_sysport_change_mac,
2023 #ifdef CONFIG_NET_POLL_CONTROLLER
2024 .ndo_poll_controller = bcm_sysport_poll_controller,
2026 .ndo_get_stats64 = bcm_sysport_get_stats64,
2029 #define REV_FMT "v%2x.%02x"
2031 static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
2034 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
2036 [SYSTEMPORT_LITE] = {
2038 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
2042 static const struct of_device_id bcm_sysport_of_match[] = {
2043 { .compatible = "brcm,systemportlite-v1.00",
2044 .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
2045 { .compatible = "brcm,systemport-v1.00",
2046 .data = &bcm_sysport_params[SYSTEMPORT] },
2047 { .compatible = "brcm,systemport",
2048 .data = &bcm_sysport_params[SYSTEMPORT] },
2051 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2053 static int bcm_sysport_probe(struct platform_device *pdev)
2055 const struct bcm_sysport_hw_params *params;
2056 const struct of_device_id *of_id = NULL;
2057 struct bcm_sysport_priv *priv;
2058 struct device_node *dn;
2059 struct net_device *dev;
2060 const void *macaddr;
2065 dn = pdev->dev.of_node;
2066 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2067 of_id = of_match_node(bcm_sysport_of_match, dn);
2068 if (!of_id || !of_id->data)
2071 /* Fairly quickly we need to know the type of adapter we have */
2072 params = of_id->data;
2074 /* Read the Transmit/Receive Queue properties */
2075 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
2076 txq = TDMA_NUM_RINGS;
2077 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
2080 /* Sanity check the number of transmit queues */
2081 if (!txq || txq > TDMA_NUM_RINGS)
2084 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2088 /* Initialize private members */
2089 priv = netdev_priv(dev);
2091 /* Allocate number of TX rings */
2092 priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2093 sizeof(struct bcm_sysport_tx_ring),
2095 if (!priv->tx_rings) {
2097 goto err_free_netdev;
2100 priv->is_lite = params->is_lite;
2101 priv->num_rx_desc_words = params->num_rx_desc_words;
2103 priv->irq0 = platform_get_irq(pdev, 0);
2104 if (!priv->is_lite) {
2105 priv->irq1 = platform_get_irq(pdev, 1);
2106 priv->wol_irq = platform_get_irq(pdev, 2);
2108 priv->wol_irq = platform_get_irq(pdev, 1);
2110 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2111 dev_err(&pdev->dev, "invalid interrupts\n");
2113 goto err_free_netdev;
2116 priv->base = devm_ioremap_resource(&pdev->dev, r);
2117 if (IS_ERR(priv->base)) {
2118 ret = PTR_ERR(priv->base);
2119 goto err_free_netdev;
2125 priv->phy_interface = of_get_phy_mode(dn);
2126 /* Default to GMII interface mode */
2127 if ((int)priv->phy_interface < 0)
2128 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2130 /* In the case of a fixed PHY, the DT node associated
2131 * to the PHY is the Ethernet MAC DT node.
2133 if (of_phy_is_fixed_link(dn)) {
2134 ret = of_phy_register_fixed_link(dn);
2136 dev_err(&pdev->dev, "failed to register fixed PHY\n");
2137 goto err_free_netdev;
2143 /* Initialize netdevice members */
2144 macaddr = of_get_mac_address(dn);
2145 if (!macaddr || !is_valid_ether_addr(macaddr)) {
2146 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
2147 eth_hw_addr_random(dev);
2149 ether_addr_copy(dev->dev_addr, macaddr);
2152 SET_NETDEV_DEV(dev, &pdev->dev);
2153 dev_set_drvdata(&pdev->dev, dev);
2154 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
2155 dev->netdev_ops = &bcm_sysport_netdev_ops;
2156 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
2158 /* HW supported features, none enabled by default */
2159 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2160 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2161 dev->max_mtu = UMAC_MAX_MTU_SIZE;
2163 /* Request the WOL interrupt and advertise suspend if available */
2164 priv->wol_irq_disabled = 1;
2165 ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2166 bcm_sysport_wol_isr, 0, dev->name, priv);
2168 device_set_wakeup_capable(&pdev->dev, 1);
2170 /* Set the needed headroom once and for all */
2171 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2172 dev->needed_headroom += sizeof(struct bcm_tsb);
2174 /* libphy will adjust the link state accordingly */
2175 netif_carrier_off(dev);
2177 u64_stats_init(&priv->syncp);
2179 ret = register_netdev(dev);
2181 dev_err(&pdev->dev, "failed to register net_device\n");
2182 goto err_deregister_fixed_link;
2185 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2186 dev_info(&pdev->dev,
2187 "Broadcom SYSTEMPORT%s" REV_FMT
2188 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2189 priv->is_lite ? " Lite" : "",
2190 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2191 priv->base, priv->irq0, priv->irq1, txq, rxq);
2195 err_deregister_fixed_link:
2196 if (of_phy_is_fixed_link(dn))
2197 of_phy_deregister_fixed_link(dn);
2203 static int bcm_sysport_remove(struct platform_device *pdev)
2205 struct net_device *dev = dev_get_drvdata(&pdev->dev);
2206 struct device_node *dn = pdev->dev.of_node;
2208 /* Not much to do, ndo_close has been called
2209 * and we use managed allocations
2211 unregister_netdev(dev);
2212 if (of_phy_is_fixed_link(dn))
2213 of_phy_deregister_fixed_link(dn);
2215 dev_set_drvdata(&pdev->dev, NULL);
2220 #ifdef CONFIG_PM_SLEEP
2221 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2223 struct net_device *ndev = priv->netdev;
2224 unsigned int timeout = 1000;
2227 reg = umac_readl(priv, UMAC_MPD_CTRL);
2230 if (priv->wolopts & WAKE_MAGICSECURE) {
2231 /* Program the SecureOn password */
2232 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
2234 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
2238 umac_writel(priv, reg, UMAC_MPD_CTRL);
2240 /* Make sure RBUF entered WoL mode as result */
2242 reg = rbuf_readl(priv, RBUF_STATUS);
2243 if (reg & RBUF_WOL_MODE)
2247 } while (timeout-- > 0);
2249 /* Do not leave the UniMAC RBUF matching only MPD packets */
2251 reg = umac_readl(priv, UMAC_MPD_CTRL);
2253 umac_writel(priv, reg, UMAC_MPD_CTRL);
2254 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2258 /* UniMAC receive needs to be turned on */
2259 umac_enable_set(priv, CMD_RX_EN, 1);
2261 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2266 static int bcm_sysport_suspend(struct device *d)
2268 struct net_device *dev = dev_get_drvdata(d);
2269 struct bcm_sysport_priv *priv = netdev_priv(dev);
2274 if (!netif_running(dev))
2277 netif_device_detach(dev);
2279 bcm_sysport_netif_stop(dev);
2281 phy_suspend(dev->phydev);
2283 /* Disable UniMAC RX */
2284 umac_enable_set(priv, CMD_RX_EN, 0);
2286 ret = rdma_enable_set(priv, 0);
2288 netdev_err(dev, "RDMA timeout!\n");
2292 /* Disable RXCHK if enabled */
2293 if (priv->rx_chk_en) {
2294 reg = rxchk_readl(priv, RXCHK_CONTROL);
2296 rxchk_writel(priv, reg, RXCHK_CONTROL);
2301 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2303 ret = tdma_enable_set(priv, 0);
2305 netdev_err(dev, "TDMA timeout!\n");
2309 /* Wait for a packet boundary */
2310 usleep_range(2000, 3000);
2312 umac_enable_set(priv, CMD_TX_EN, 0);
2314 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2316 /* Free RX/TX rings SW structures */
2317 for (i = 0; i < dev->num_tx_queues; i++)
2318 bcm_sysport_fini_tx_ring(priv, i);
2319 bcm_sysport_fini_rx_ring(priv);
2321 /* Get prepared for Wake-on-LAN */
2322 if (device_may_wakeup(d) && priv->wolopts)
2323 ret = bcm_sysport_suspend_to_wol(priv);
2328 static int bcm_sysport_resume(struct device *d)
2330 struct net_device *dev = dev_get_drvdata(d);
2331 struct bcm_sysport_priv *priv = netdev_priv(dev);
2336 if (!netif_running(dev))
2341 /* Disable the UniMAC RX/TX */
2342 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
2344 /* We may have been suspended and never received a WOL event that
2345 * would turn off MPD detection, take care of that now
2347 bcm_sysport_resume_from_wol(priv);
2349 /* Initialize both hardware and software ring */
2350 for (i = 0; i < dev->num_tx_queues; i++) {
2351 ret = bcm_sysport_init_tx_ring(priv, i);
2353 netdev_err(dev, "failed to initialize TX ring %d\n",
2355 goto out_free_tx_rings;
2359 /* Initialize linked-list */
2360 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2362 /* Initialize RX ring */
2363 ret = bcm_sysport_init_rx_ring(priv);
2365 netdev_err(dev, "failed to initialize RX ring\n");
2366 goto out_free_rx_ring;
2369 /* RX pipe enable */
2370 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2372 ret = rdma_enable_set(priv, 1);
2374 netdev_err(dev, "failed to enable RDMA\n");
2375 goto out_free_rx_ring;
2379 if (priv->rx_chk_en) {
2380 reg = rxchk_readl(priv, RXCHK_CONTROL);
2382 rxchk_writel(priv, reg, RXCHK_CONTROL);
2387 /* Set maximum frame length */
2389 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2391 gib_set_pad_extension(priv);
2393 /* Set MAC address */
2394 umac_set_hw_addr(priv, dev->dev_addr);
2396 umac_enable_set(priv, CMD_RX_EN, 1);
2398 /* TX pipe enable */
2399 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2401 umac_enable_set(priv, CMD_TX_EN, 1);
2403 ret = tdma_enable_set(priv, 1);
2405 netdev_err(dev, "TDMA timeout!\n");
2406 goto out_free_rx_ring;
2409 phy_resume(dev->phydev);
2411 bcm_sysport_netif_start(dev);
2413 netif_device_attach(dev);
2418 bcm_sysport_fini_rx_ring(priv);
2420 for (i = 0; i < dev->num_tx_queues; i++)
2421 bcm_sysport_fini_tx_ring(priv, i);
2426 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2427 bcm_sysport_suspend, bcm_sysport_resume);
2429 static struct platform_driver bcm_sysport_driver = {
2430 .probe = bcm_sysport_probe,
2431 .remove = bcm_sysport_remove,
2433 .name = "brcm-systemport",
2434 .of_match_table = bcm_sysport_of_match,
2435 .pm = &bcm_sysport_pm_ops,
2438 module_platform_driver(bcm_sysport_driver);
2440 MODULE_AUTHOR("Broadcom Corporation");
2441 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2442 MODULE_ALIAS("platform:brcm-systemport");
2443 MODULE_LICENSE("GPL");