2 * Broadcom GENET (Gigabit Ethernet) controller driver
4 * Copyright (c) 2014-2017 Broadcom
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) "bcmgenet: " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/string.h>
20 #include <linux/if_ether.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/clk.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_net.h>
32 #include <linux/of_platform.h>
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
43 #include <linux/ipv6.h>
44 #include <linux/phy.h>
45 #include <linux/platform_data/bcmgenet.h>
47 #include <asm/unaligned.h>
51 /* Maximum number of hardware queues, downsized if needed */
52 #define GENET_MAX_MQ_CNT 4
54 /* Default highest priority queue for multi queue support */
55 #define GENET_Q0_PRIORITY 0
57 #define GENET_Q16_RX_BD_CNT \
58 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
59 #define GENET_Q16_TX_BD_CNT \
60 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
62 #define RX_BUF_LENGTH 2048
63 #define SKB_ALIGNMENT 32
65 /* Tx/Rx DMA register offset, skip 256 descriptors */
66 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
67 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
69 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
72 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
73 TOTAL_DESC * DMA_DESC_SIZE)
75 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
76 void __iomem *d, u32 value)
78 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
81 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
84 return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
87 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
91 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
93 /* Register writes to GISB bus can take couple hundred nanoseconds
94 * and are done for each packet, save these expensive writes unless
95 * the platform is explicitly configured for 64-bits/LPAE.
97 #ifdef CONFIG_PHYS_ADDR_T_64BIT
98 if (priv->hw_params->flags & GENET_HAS_40BITS)
99 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
103 /* Combined address + length/status setter */
104 static inline void dmadesc_set(struct bcmgenet_priv *priv,
105 void __iomem *d, dma_addr_t addr, u32 val)
107 dmadesc_set_addr(priv, d, addr);
108 dmadesc_set_length_status(priv, d, val);
111 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
116 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
118 /* Register writes to GISB bus can take couple hundred nanoseconds
119 * and are done for each packet, save these expensive writes unless
120 * the platform is explicitly configured for 64-bits/LPAE.
122 #ifdef CONFIG_PHYS_ADDR_T_64BIT
123 if (priv->hw_params->flags & GENET_HAS_40BITS)
124 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
129 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
131 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
134 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
136 if (GENET_IS_V1(priv))
137 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
139 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
142 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
144 if (GENET_IS_V1(priv))
145 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
147 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
150 /* These macros are defined to deal with register map change
151 * between GENET1.1 and GENET2. Only those currently being used
152 * by driver are defined.
154 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
156 if (GENET_IS_V1(priv))
157 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
159 return __raw_readl(priv->base +
160 priv->hw_params->tbuf_offset + TBUF_CTRL);
163 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
165 if (GENET_IS_V1(priv))
166 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
168 __raw_writel(val, priv->base +
169 priv->hw_params->tbuf_offset + TBUF_CTRL);
172 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
174 if (GENET_IS_V1(priv))
175 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
177 return __raw_readl(priv->base +
178 priv->hw_params->tbuf_offset + TBUF_BP_MC);
181 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
183 if (GENET_IS_V1(priv))
184 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
186 __raw_writel(val, priv->base +
187 priv->hw_params->tbuf_offset + TBUF_BP_MC);
190 /* RX/TX DMA register accessors */
227 static const u8 bcmgenet_dma_regs_v3plus[] = {
228 [DMA_RING_CFG] = 0x00,
231 [DMA_SCB_BURST_SIZE] = 0x0C,
232 [DMA_ARB_CTRL] = 0x2C,
233 [DMA_PRIORITY_0] = 0x30,
234 [DMA_PRIORITY_1] = 0x34,
235 [DMA_PRIORITY_2] = 0x38,
236 [DMA_RING0_TIMEOUT] = 0x2C,
237 [DMA_RING1_TIMEOUT] = 0x30,
238 [DMA_RING2_TIMEOUT] = 0x34,
239 [DMA_RING3_TIMEOUT] = 0x38,
240 [DMA_RING4_TIMEOUT] = 0x3c,
241 [DMA_RING5_TIMEOUT] = 0x40,
242 [DMA_RING6_TIMEOUT] = 0x44,
243 [DMA_RING7_TIMEOUT] = 0x48,
244 [DMA_RING8_TIMEOUT] = 0x4c,
245 [DMA_RING9_TIMEOUT] = 0x50,
246 [DMA_RING10_TIMEOUT] = 0x54,
247 [DMA_RING11_TIMEOUT] = 0x58,
248 [DMA_RING12_TIMEOUT] = 0x5c,
249 [DMA_RING13_TIMEOUT] = 0x60,
250 [DMA_RING14_TIMEOUT] = 0x64,
251 [DMA_RING15_TIMEOUT] = 0x68,
252 [DMA_RING16_TIMEOUT] = 0x6C,
253 [DMA_INDEX2RING_0] = 0x70,
254 [DMA_INDEX2RING_1] = 0x74,
255 [DMA_INDEX2RING_2] = 0x78,
256 [DMA_INDEX2RING_3] = 0x7C,
257 [DMA_INDEX2RING_4] = 0x80,
258 [DMA_INDEX2RING_5] = 0x84,
259 [DMA_INDEX2RING_6] = 0x88,
260 [DMA_INDEX2RING_7] = 0x8C,
263 static const u8 bcmgenet_dma_regs_v2[] = {
264 [DMA_RING_CFG] = 0x00,
267 [DMA_SCB_BURST_SIZE] = 0x0C,
268 [DMA_ARB_CTRL] = 0x30,
269 [DMA_PRIORITY_0] = 0x34,
270 [DMA_PRIORITY_1] = 0x38,
271 [DMA_PRIORITY_2] = 0x3C,
272 [DMA_RING0_TIMEOUT] = 0x2C,
273 [DMA_RING1_TIMEOUT] = 0x30,
274 [DMA_RING2_TIMEOUT] = 0x34,
275 [DMA_RING3_TIMEOUT] = 0x38,
276 [DMA_RING4_TIMEOUT] = 0x3c,
277 [DMA_RING5_TIMEOUT] = 0x40,
278 [DMA_RING6_TIMEOUT] = 0x44,
279 [DMA_RING7_TIMEOUT] = 0x48,
280 [DMA_RING8_TIMEOUT] = 0x4c,
281 [DMA_RING9_TIMEOUT] = 0x50,
282 [DMA_RING10_TIMEOUT] = 0x54,
283 [DMA_RING11_TIMEOUT] = 0x58,
284 [DMA_RING12_TIMEOUT] = 0x5c,
285 [DMA_RING13_TIMEOUT] = 0x60,
286 [DMA_RING14_TIMEOUT] = 0x64,
287 [DMA_RING15_TIMEOUT] = 0x68,
288 [DMA_RING16_TIMEOUT] = 0x6C,
291 static const u8 bcmgenet_dma_regs_v1[] = {
294 [DMA_SCB_BURST_SIZE] = 0x0C,
295 [DMA_ARB_CTRL] = 0x30,
296 [DMA_PRIORITY_0] = 0x34,
297 [DMA_PRIORITY_1] = 0x38,
298 [DMA_PRIORITY_2] = 0x3C,
299 [DMA_RING0_TIMEOUT] = 0x2C,
300 [DMA_RING1_TIMEOUT] = 0x30,
301 [DMA_RING2_TIMEOUT] = 0x34,
302 [DMA_RING3_TIMEOUT] = 0x38,
303 [DMA_RING4_TIMEOUT] = 0x3c,
304 [DMA_RING5_TIMEOUT] = 0x40,
305 [DMA_RING6_TIMEOUT] = 0x44,
306 [DMA_RING7_TIMEOUT] = 0x48,
307 [DMA_RING8_TIMEOUT] = 0x4c,
308 [DMA_RING9_TIMEOUT] = 0x50,
309 [DMA_RING10_TIMEOUT] = 0x54,
310 [DMA_RING11_TIMEOUT] = 0x58,
311 [DMA_RING12_TIMEOUT] = 0x5c,
312 [DMA_RING13_TIMEOUT] = 0x60,
313 [DMA_RING14_TIMEOUT] = 0x64,
314 [DMA_RING15_TIMEOUT] = 0x68,
315 [DMA_RING16_TIMEOUT] = 0x6C,
318 /* Set at runtime once bcmgenet version is known */
319 static const u8 *bcmgenet_dma_regs;
321 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
323 return netdev_priv(dev_get_drvdata(dev));
326 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
329 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
330 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
333 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
334 u32 val, enum dma_reg r)
336 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
337 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
340 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
343 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
344 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
347 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
348 u32 val, enum dma_reg r)
350 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
351 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
354 /* RDMA/TDMA ring registers and accessors
355 * we merge the common fields and just prefix with T/D the registers
356 * having different meaning depending on the direction
360 RDMA_WRITE_PTR = TDMA_READ_PTR,
362 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
364 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
366 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
372 DMA_MBUF_DONE_THRESH,
374 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
376 RDMA_READ_PTR = TDMA_WRITE_PTR,
378 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
381 /* GENET v4 supports 40-bits pointer addressing
382 * for obvious reasons the LO and HI word parts
383 * are contiguous, but this offsets the other
386 static const u8 genet_dma_ring_regs_v4[] = {
387 [TDMA_READ_PTR] = 0x00,
388 [TDMA_READ_PTR_HI] = 0x04,
389 [TDMA_CONS_INDEX] = 0x08,
390 [TDMA_PROD_INDEX] = 0x0C,
391 [DMA_RING_BUF_SIZE] = 0x10,
392 [DMA_START_ADDR] = 0x14,
393 [DMA_START_ADDR_HI] = 0x18,
394 [DMA_END_ADDR] = 0x1C,
395 [DMA_END_ADDR_HI] = 0x20,
396 [DMA_MBUF_DONE_THRESH] = 0x24,
397 [TDMA_FLOW_PERIOD] = 0x28,
398 [TDMA_WRITE_PTR] = 0x2C,
399 [TDMA_WRITE_PTR_HI] = 0x30,
402 static const u8 genet_dma_ring_regs_v123[] = {
403 [TDMA_READ_PTR] = 0x00,
404 [TDMA_CONS_INDEX] = 0x04,
405 [TDMA_PROD_INDEX] = 0x08,
406 [DMA_RING_BUF_SIZE] = 0x0C,
407 [DMA_START_ADDR] = 0x10,
408 [DMA_END_ADDR] = 0x14,
409 [DMA_MBUF_DONE_THRESH] = 0x18,
410 [TDMA_FLOW_PERIOD] = 0x1C,
411 [TDMA_WRITE_PTR] = 0x20,
414 /* Set at runtime once GENET version is known */
415 static const u8 *genet_dma_ring_regs;
417 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
421 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
422 (DMA_RING_SIZE * ring) +
423 genet_dma_ring_regs[r]);
426 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
427 unsigned int ring, u32 val,
430 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
431 (DMA_RING_SIZE * ring) +
432 genet_dma_ring_regs[r]);
435 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
439 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
440 (DMA_RING_SIZE * ring) +
441 genet_dma_ring_regs[r]);
444 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
445 unsigned int ring, u32 val,
448 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
449 (DMA_RING_SIZE * ring) +
450 genet_dma_ring_regs[r]);
453 static int bcmgenet_get_link_ksettings(struct net_device *dev,
454 struct ethtool_link_ksettings *cmd)
456 struct bcmgenet_priv *priv = netdev_priv(dev);
458 if (!netif_running(dev))
464 return phy_ethtool_ksettings_get(priv->phydev, cmd);
467 static int bcmgenet_set_link_ksettings(struct net_device *dev,
468 const struct ethtool_link_ksettings *cmd)
470 struct bcmgenet_priv *priv = netdev_priv(dev);
472 if (!netif_running(dev))
478 return phy_ethtool_ksettings_set(priv->phydev, cmd);
481 static int bcmgenet_set_rx_csum(struct net_device *dev,
482 netdev_features_t wanted)
484 struct bcmgenet_priv *priv = netdev_priv(dev);
488 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
490 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
492 /* enable rx checksumming */
494 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
496 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
497 priv->desc_rxchk_en = rx_csum_en;
499 /* If UniMAC forwards CRC, we need to skip over it to get
500 * a valid CHK bit to be set in the per-packet status word
502 if (rx_csum_en && priv->crc_fwd_en)
503 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
505 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
507 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
512 static int bcmgenet_set_tx_csum(struct net_device *dev,
513 netdev_features_t wanted)
515 struct bcmgenet_priv *priv = netdev_priv(dev);
517 u32 tbuf_ctrl, rbuf_ctrl;
519 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
520 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
522 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
524 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
526 tbuf_ctrl |= RBUF_64B_EN;
527 rbuf_ctrl |= RBUF_64B_EN;
529 tbuf_ctrl &= ~RBUF_64B_EN;
530 rbuf_ctrl &= ~RBUF_64B_EN;
532 priv->desc_64b_en = desc_64b_en;
534 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
535 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
540 static int bcmgenet_set_features(struct net_device *dev,
541 netdev_features_t features)
543 netdev_features_t changed = features ^ dev->features;
544 netdev_features_t wanted = dev->wanted_features;
547 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
548 ret = bcmgenet_set_tx_csum(dev, wanted);
549 if (changed & (NETIF_F_RXCSUM))
550 ret = bcmgenet_set_rx_csum(dev, wanted);
555 static u32 bcmgenet_get_msglevel(struct net_device *dev)
557 struct bcmgenet_priv *priv = netdev_priv(dev);
559 return priv->msg_enable;
562 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
564 struct bcmgenet_priv *priv = netdev_priv(dev);
566 priv->msg_enable = level;
569 static int bcmgenet_get_coalesce(struct net_device *dev,
570 struct ethtool_coalesce *ec)
572 struct bcmgenet_priv *priv = netdev_priv(dev);
574 ec->tx_max_coalesced_frames =
575 bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
576 DMA_MBUF_DONE_THRESH);
577 ec->rx_max_coalesced_frames =
578 bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
579 DMA_MBUF_DONE_THRESH);
580 ec->rx_coalesce_usecs =
581 bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
586 static int bcmgenet_set_coalesce(struct net_device *dev,
587 struct ethtool_coalesce *ec)
589 struct bcmgenet_priv *priv = netdev_priv(dev);
593 /* Base system clock is 125Mhz, DMA timeout is this reference clock
594 * divided by 1024, which yields roughly 8.192us, our maximum value
595 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
597 if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
598 ec->tx_max_coalesced_frames == 0 ||
599 ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
600 ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
603 if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
606 /* GENET TDMA hardware does not support a configurable timeout, but will
607 * always generate an interrupt either after MBDONE packets have been
608 * transmitted, or when the ring is emtpy.
610 if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
611 ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
614 /* Program all TX queues with the same values, as there is no
615 * ethtool knob to do coalescing on a per-queue basis
617 for (i = 0; i < priv->hw_params->tx_queues; i++)
618 bcmgenet_tdma_ring_writel(priv, i,
619 ec->tx_max_coalesced_frames,
620 DMA_MBUF_DONE_THRESH);
621 bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
622 ec->tx_max_coalesced_frames,
623 DMA_MBUF_DONE_THRESH);
625 for (i = 0; i < priv->hw_params->rx_queues; i++) {
626 bcmgenet_rdma_ring_writel(priv, i,
627 ec->rx_max_coalesced_frames,
628 DMA_MBUF_DONE_THRESH);
630 reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
631 reg &= ~DMA_TIMEOUT_MASK;
632 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
633 bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
636 bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
637 ec->rx_max_coalesced_frames,
638 DMA_MBUF_DONE_THRESH);
640 reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT);
641 reg &= ~DMA_TIMEOUT_MASK;
642 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
643 bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT);
648 /* standard ethtool support functions. */
649 enum bcmgenet_stat_type {
650 BCMGENET_STAT_NETDEV = -1,
651 BCMGENET_STAT_MIB_RX,
652 BCMGENET_STAT_MIB_TX,
658 struct bcmgenet_stats {
659 char stat_string[ETH_GSTRING_LEN];
662 enum bcmgenet_stat_type type;
663 /* reg offset from UMAC base for misc counters */
667 #define STAT_NETDEV(m) { \
668 .stat_string = __stringify(m), \
669 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
670 .stat_offset = offsetof(struct net_device_stats, m), \
671 .type = BCMGENET_STAT_NETDEV, \
674 #define STAT_GENET_MIB(str, m, _type) { \
675 .stat_string = str, \
676 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
677 .stat_offset = offsetof(struct bcmgenet_priv, m), \
681 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
682 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
683 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
684 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
686 #define STAT_GENET_MISC(str, m, offset) { \
687 .stat_string = str, \
688 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
689 .stat_offset = offsetof(struct bcmgenet_priv, m), \
690 .type = BCMGENET_STAT_MISC, \
691 .reg_offset = offset, \
695 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
696 * between the end of TX stats and the beginning of the RX RUNT
698 #define BCMGENET_STAT_OFFSET 0xc
700 /* Hardware counters must be kept in sync because the order/offset
701 * is important here (order in structure declaration = order in hardware)
703 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
705 STAT_NETDEV(rx_packets),
706 STAT_NETDEV(tx_packets),
707 STAT_NETDEV(rx_bytes),
708 STAT_NETDEV(tx_bytes),
709 STAT_NETDEV(rx_errors),
710 STAT_NETDEV(tx_errors),
711 STAT_NETDEV(rx_dropped),
712 STAT_NETDEV(tx_dropped),
713 STAT_NETDEV(multicast),
714 /* UniMAC RSV counters */
715 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
716 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
717 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
718 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
719 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
720 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
721 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
722 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
723 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
724 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
725 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
726 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
727 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
728 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
729 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
730 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
731 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
732 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
733 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
734 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
735 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
736 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
737 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
738 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
739 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
740 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
741 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
742 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
743 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
744 /* UniMAC TSV counters */
745 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
746 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
747 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
748 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
749 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
750 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
751 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
752 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
753 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
754 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
755 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
756 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
757 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
758 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
759 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
760 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
761 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
762 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
763 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
764 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
765 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
766 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
767 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
768 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
769 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
770 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
771 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
772 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
773 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
774 /* UniMAC RUNT counters */
775 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
776 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
777 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
778 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
779 /* Misc UniMAC counters */
780 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
781 UMAC_RBUF_OVFL_CNT_V1),
782 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
783 UMAC_RBUF_ERR_CNT_V1),
784 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
785 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
786 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
787 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
790 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
792 static void bcmgenet_get_drvinfo(struct net_device *dev,
793 struct ethtool_drvinfo *info)
795 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
796 strlcpy(info->version, "v2.0", sizeof(info->version));
799 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
801 switch (string_set) {
803 return BCMGENET_STATS_LEN;
809 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
816 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
817 memcpy(data + i * ETH_GSTRING_LEN,
818 bcmgenet_gstrings_stats[i].stat_string,
825 static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
831 case UMAC_RBUF_OVFL_CNT_V1:
832 if (GENET_IS_V2(priv))
833 new_offset = RBUF_OVFL_CNT_V2;
835 new_offset = RBUF_OVFL_CNT_V3PLUS;
837 val = bcmgenet_rbuf_readl(priv, new_offset);
838 /* clear if overflowed */
840 bcmgenet_rbuf_writel(priv, 0, new_offset);
842 case UMAC_RBUF_ERR_CNT_V1:
843 if (GENET_IS_V2(priv))
844 new_offset = RBUF_ERR_CNT_V2;
846 new_offset = RBUF_ERR_CNT_V3PLUS;
848 val = bcmgenet_rbuf_readl(priv, new_offset);
849 /* clear if overflowed */
851 bcmgenet_rbuf_writel(priv, 0, new_offset);
854 val = bcmgenet_umac_readl(priv, offset);
855 /* clear if overflowed */
857 bcmgenet_umac_writel(priv, 0, offset);
864 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
868 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
869 const struct bcmgenet_stats *s;
874 s = &bcmgenet_gstrings_stats[i];
876 case BCMGENET_STAT_NETDEV:
877 case BCMGENET_STAT_SOFT:
879 case BCMGENET_STAT_RUNT:
880 offset += BCMGENET_STAT_OFFSET;
882 case BCMGENET_STAT_MIB_TX:
883 offset += BCMGENET_STAT_OFFSET;
885 case BCMGENET_STAT_MIB_RX:
886 val = bcmgenet_umac_readl(priv,
887 UMAC_MIB_START + j + offset);
888 offset = 0; /* Reset Offset */
890 case BCMGENET_STAT_MISC:
891 if (GENET_IS_V1(priv)) {
892 val = bcmgenet_umac_readl(priv, s->reg_offset);
893 /* clear if overflowed */
895 bcmgenet_umac_writel(priv, 0,
898 val = bcmgenet_update_stat_misc(priv,
905 p = (char *)priv + s->stat_offset;
910 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
911 struct ethtool_stats *stats,
914 struct bcmgenet_priv *priv = netdev_priv(dev);
917 if (netif_running(dev))
918 bcmgenet_update_mib_counters(priv);
920 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
921 const struct bcmgenet_stats *s;
924 s = &bcmgenet_gstrings_stats[i];
925 if (s->type == BCMGENET_STAT_NETDEV)
926 p = (char *)&dev->stats;
930 if (sizeof(unsigned long) != sizeof(u32) &&
931 s->stat_sizeof == sizeof(unsigned long))
932 data[i] = *(unsigned long *)p;
938 static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
940 struct bcmgenet_priv *priv = netdev_priv(dev);
941 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
944 if (enable && !priv->clk_eee_enabled) {
945 clk_prepare_enable(priv->clk_eee);
946 priv->clk_eee_enabled = true;
949 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
954 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
956 /* Enable EEE and switch to a 27Mhz clock automatically */
957 reg = __raw_readl(priv->base + off);
959 reg |= TBUF_EEE_EN | TBUF_PM_EN;
961 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
962 __raw_writel(reg, priv->base + off);
964 /* Do the same for thing for RBUF */
965 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
967 reg |= RBUF_EEE_EN | RBUF_PM_EN;
969 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
970 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
972 if (!enable && priv->clk_eee_enabled) {
973 clk_disable_unprepare(priv->clk_eee);
974 priv->clk_eee_enabled = false;
977 priv->eee.eee_enabled = enable;
978 priv->eee.eee_active = enable;
981 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
983 struct bcmgenet_priv *priv = netdev_priv(dev);
984 struct ethtool_eee *p = &priv->eee;
986 if (GENET_IS_V1(priv))
989 e->eee_enabled = p->eee_enabled;
990 e->eee_active = p->eee_active;
991 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
993 return phy_ethtool_get_eee(priv->phydev, e);
996 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
998 struct bcmgenet_priv *priv = netdev_priv(dev);
999 struct ethtool_eee *p = &priv->eee;
1002 if (GENET_IS_V1(priv))
1005 p->eee_enabled = e->eee_enabled;
1007 if (!p->eee_enabled) {
1008 bcmgenet_eee_enable_set(dev, false);
1010 ret = phy_init_eee(priv->phydev, 0);
1012 netif_err(priv, hw, dev, "EEE initialization failed\n");
1016 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
1017 bcmgenet_eee_enable_set(dev, true);
1020 return phy_ethtool_set_eee(priv->phydev, e);
1023 static int bcmgenet_nway_reset(struct net_device *dev)
1025 struct bcmgenet_priv *priv = netdev_priv(dev);
1027 return genphy_restart_aneg(priv->phydev);
1030 /* standard ethtool support functions. */
1031 static const struct ethtool_ops bcmgenet_ethtool_ops = {
1032 .get_strings = bcmgenet_get_strings,
1033 .get_sset_count = bcmgenet_get_sset_count,
1034 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
1035 .get_drvinfo = bcmgenet_get_drvinfo,
1036 .get_link = ethtool_op_get_link,
1037 .get_msglevel = bcmgenet_get_msglevel,
1038 .set_msglevel = bcmgenet_set_msglevel,
1039 .get_wol = bcmgenet_get_wol,
1040 .set_wol = bcmgenet_set_wol,
1041 .get_eee = bcmgenet_get_eee,
1042 .set_eee = bcmgenet_set_eee,
1043 .nway_reset = bcmgenet_nway_reset,
1044 .get_coalesce = bcmgenet_get_coalesce,
1045 .set_coalesce = bcmgenet_set_coalesce,
1046 .get_link_ksettings = bcmgenet_get_link_ksettings,
1047 .set_link_ksettings = bcmgenet_set_link_ksettings,
1050 /* Power down the unimac, based on mode. */
1051 static int bcmgenet_power_down(struct bcmgenet_priv *priv,
1052 enum bcmgenet_power_mode mode)
1058 case GENET_POWER_CABLE_SENSE:
1059 phy_detach(priv->phydev);
1062 case GENET_POWER_WOL_MAGIC:
1063 ret = bcmgenet_wol_power_down_cfg(priv, mode);
1066 case GENET_POWER_PASSIVE:
1067 /* Power down LED */
1068 if (priv->hw_params->flags & GENET_HAS_EXT) {
1069 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1070 reg |= (EXT_PWR_DOWN_PHY |
1071 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1072 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1074 bcmgenet_phy_power_set(priv->dev, false);
1084 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1085 enum bcmgenet_power_mode mode)
1089 if (!(priv->hw_params->flags & GENET_HAS_EXT))
1092 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1095 case GENET_POWER_PASSIVE:
1096 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
1097 EXT_PWR_DOWN_BIAS | EXT_ENERGY_DET_MASK);
1099 case GENET_POWER_CABLE_SENSE:
1101 reg |= EXT_PWR_DN_EN_LD;
1103 case GENET_POWER_WOL_MAGIC:
1104 bcmgenet_wol_power_up_cfg(priv, mode);
1110 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1111 if (mode == GENET_POWER_PASSIVE) {
1112 bcmgenet_phy_power_set(priv->dev, true);
1113 bcmgenet_mii_reset(priv->dev);
1117 /* ioctl handle special commands that are not present in ethtool. */
1118 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1120 struct bcmgenet_priv *priv = netdev_priv(dev);
1123 if (!netif_running(dev))
1133 val = phy_mii_ioctl(priv->phydev, rq, cmd);
1144 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1145 struct bcmgenet_tx_ring *ring)
1147 struct enet_cb *tx_cb_ptr;
1149 tx_cb_ptr = ring->cbs;
1150 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1152 /* Advancing local write pointer */
1153 if (ring->write_ptr == ring->end_ptr)
1154 ring->write_ptr = ring->cb_ptr;
1161 /* Simple helper to free a control block's resources */
1162 static void bcmgenet_free_cb(struct enet_cb *cb)
1164 dev_kfree_skb_any(cb->skb);
1166 dma_unmap_addr_set(cb, dma_addr, 0);
1169 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
1171 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1172 INTRL2_CPU_MASK_SET);
1175 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
1177 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1178 INTRL2_CPU_MASK_CLEAR);
1181 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1183 bcmgenet_intrl2_1_writel(ring->priv,
1184 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1185 INTRL2_CPU_MASK_SET);
1188 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1190 bcmgenet_intrl2_1_writel(ring->priv,
1191 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1192 INTRL2_CPU_MASK_CLEAR);
1195 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
1197 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1198 INTRL2_CPU_MASK_SET);
1201 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1203 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1204 INTRL2_CPU_MASK_CLEAR);
1207 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1209 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1210 INTRL2_CPU_MASK_CLEAR);
1213 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1215 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1216 INTRL2_CPU_MASK_SET);
1219 /* Unlocked version of the reclaim routine */
1220 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1221 struct bcmgenet_tx_ring *ring)
1223 struct bcmgenet_priv *priv = netdev_priv(dev);
1224 struct device *kdev = &priv->pdev->dev;
1225 struct enet_cb *tx_cb_ptr;
1226 struct netdev_queue *txq;
1227 unsigned int pkts_compl = 0;
1228 unsigned int bytes_compl = 0;
1229 unsigned int c_index;
1230 unsigned int txbds_ready;
1231 unsigned int txbds_processed = 0;
1233 /* Compute how many buffers are transmitted since last xmit call */
1234 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
1235 c_index &= DMA_C_INDEX_MASK;
1237 if (likely(c_index >= ring->c_index))
1238 txbds_ready = c_index - ring->c_index;
1240 txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
1242 netif_dbg(priv, tx_done, dev,
1243 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1244 __func__, ring->index, ring->c_index, c_index, txbds_ready);
1246 /* Reclaim transmitted buffers */
1247 while (txbds_processed < txbds_ready) {
1248 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
1249 if (tx_cb_ptr->skb) {
1251 bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
1252 dma_unmap_single(kdev,
1253 dma_unmap_addr(tx_cb_ptr, dma_addr),
1254 dma_unmap_len(tx_cb_ptr, dma_len),
1256 bcmgenet_free_cb(tx_cb_ptr);
1257 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1258 dma_unmap_page(kdev,
1259 dma_unmap_addr(tx_cb_ptr, dma_addr),
1260 dma_unmap_len(tx_cb_ptr, dma_len),
1262 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
1266 if (likely(ring->clean_ptr < ring->end_ptr))
1269 ring->clean_ptr = ring->cb_ptr;
1272 ring->free_bds += txbds_processed;
1273 ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
1275 dev->stats.tx_packets += pkts_compl;
1276 dev->stats.tx_bytes += bytes_compl;
1278 txq = netdev_get_tx_queue(dev, ring->queue);
1279 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
1281 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1282 if (netif_tx_queue_stopped(txq))
1283 netif_tx_wake_queue(txq);
1289 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1290 struct bcmgenet_tx_ring *ring)
1292 unsigned int released;
1293 unsigned long flags;
1295 spin_lock_irqsave(&ring->lock, flags);
1296 released = __bcmgenet_tx_reclaim(dev, ring);
1297 spin_unlock_irqrestore(&ring->lock, flags);
1302 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1304 struct bcmgenet_tx_ring *ring =
1305 container_of(napi, struct bcmgenet_tx_ring, napi);
1306 unsigned int work_done = 0;
1308 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1310 if (work_done == 0) {
1311 napi_complete(napi);
1312 ring->int_enable(ring);
1320 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1322 struct bcmgenet_priv *priv = netdev_priv(dev);
1325 if (netif_is_multiqueue(dev)) {
1326 for (i = 0; i < priv->hw_params->tx_queues; i++)
1327 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1330 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1333 /* Transmits a single SKB (either head of a fragment or a single SKB)
1334 * caller must hold priv->lock
1336 static int bcmgenet_xmit_single(struct net_device *dev,
1337 struct sk_buff *skb,
1339 struct bcmgenet_tx_ring *ring)
1341 struct bcmgenet_priv *priv = netdev_priv(dev);
1342 struct device *kdev = &priv->pdev->dev;
1343 struct enet_cb *tx_cb_ptr;
1344 unsigned int skb_len;
1349 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1351 if (unlikely(!tx_cb_ptr))
1354 tx_cb_ptr->skb = skb;
1356 skb_len = skb_headlen(skb);
1358 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1359 ret = dma_mapping_error(kdev, mapping);
1361 priv->mib.tx_dma_failed++;
1362 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1367 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1368 dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
1369 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1370 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1373 if (skb->ip_summed == CHECKSUM_PARTIAL)
1374 length_status |= DMA_TX_DO_CSUM;
1376 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1381 /* Transmit a SKB fragment */
1382 static int bcmgenet_xmit_frag(struct net_device *dev,
1385 struct bcmgenet_tx_ring *ring)
1387 struct bcmgenet_priv *priv = netdev_priv(dev);
1388 struct device *kdev = &priv->pdev->dev;
1389 struct enet_cb *tx_cb_ptr;
1390 unsigned int frag_size;
1394 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1396 if (unlikely(!tx_cb_ptr))
1399 tx_cb_ptr->skb = NULL;
1401 frag_size = skb_frag_size(frag);
1403 mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
1404 ret = dma_mapping_error(kdev, mapping);
1406 priv->mib.tx_dma_failed++;
1407 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1412 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1413 dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
1415 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1416 (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1417 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1422 /* Reallocate the SKB to put enough headroom in front of it and insert
1423 * the transmit checksum offsets in the descriptors
1425 static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1426 struct sk_buff *skb)
1428 struct status_64 *status = NULL;
1429 struct sk_buff *new_skb;
1435 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1436 /* If 64 byte status block enabled, must make sure skb has
1437 * enough headroom for us to insert 64B status block.
1439 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1442 dev->stats.tx_dropped++;
1448 skb_push(skb, sizeof(*status));
1449 status = (struct status_64 *)skb->data;
1451 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1452 ip_ver = htons(skb->protocol);
1455 ip_proto = ip_hdr(skb)->protocol;
1458 ip_proto = ipv6_hdr(skb)->nexthdr;
1464 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1465 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1466 (offset + skb->csum_offset);
1468 /* Set the length valid bit for TCP and UDP and just set
1469 * the special UDP flag for IPv4, else just set to 0.
1471 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1472 tx_csum_info |= STATUS_TX_CSUM_LV;
1473 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1474 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1479 status->tx_csum_info = tx_csum_info;
1485 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1487 struct bcmgenet_priv *priv = netdev_priv(dev);
1488 struct bcmgenet_tx_ring *ring = NULL;
1489 struct netdev_queue *txq;
1490 unsigned long flags = 0;
1491 int nr_frags, index;
1496 index = skb_get_queue_mapping(skb);
1497 /* Mapping strategy:
1498 * queue_mapping = 0, unclassified, packet xmited through ring16
1499 * queue_mapping = 1, goes to ring 0. (highest priority queue
1500 * queue_mapping = 2, goes to ring 1.
1501 * queue_mapping = 3, goes to ring 2.
1502 * queue_mapping = 4, goes to ring 3.
1509 ring = &priv->tx_rings[index];
1510 txq = netdev_get_tx_queue(dev, ring->queue);
1512 nr_frags = skb_shinfo(skb)->nr_frags;
1514 spin_lock_irqsave(&ring->lock, flags);
1515 if (ring->free_bds <= (nr_frags + 1)) {
1516 if (!netif_tx_queue_stopped(txq)) {
1517 netif_tx_stop_queue(txq);
1519 "%s: tx ring %d full when queue %d awake\n",
1520 __func__, index, ring->queue);
1522 ret = NETDEV_TX_BUSY;
1526 if (skb_padto(skb, ETH_ZLEN)) {
1531 /* Retain how many bytes will be sent on the wire, without TSB inserted
1532 * by transmit checksum offload
1534 GENET_CB(skb)->bytes_sent = skb->len;
1536 /* set the SKB transmit checksum */
1537 if (priv->desc_64b_en) {
1538 skb = bcmgenet_put_tx_csum(dev, skb);
1545 dma_desc_flags = DMA_SOP;
1547 dma_desc_flags |= DMA_EOP;
1549 /* Transmit single SKB or head of fragment list */
1550 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1557 for (i = 0; i < nr_frags; i++) {
1558 ret = bcmgenet_xmit_frag(dev,
1559 &skb_shinfo(skb)->frags[i],
1560 (i == nr_frags - 1) ? DMA_EOP : 0,
1568 skb_tx_timestamp(skb);
1570 /* Decrement total BD count and advance our write pointer */
1571 ring->free_bds -= nr_frags + 1;
1572 ring->prod_index += nr_frags + 1;
1573 ring->prod_index &= DMA_P_INDEX_MASK;
1575 netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
1577 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1578 netif_tx_stop_queue(txq);
1580 if (!skb->xmit_more || netif_xmit_stopped(txq))
1581 /* Packets are ready, update producer index */
1582 bcmgenet_tdma_ring_writel(priv, ring->index,
1583 ring->prod_index, TDMA_PROD_INDEX);
1585 spin_unlock_irqrestore(&ring->lock, flags);
1590 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1593 struct device *kdev = &priv->pdev->dev;
1594 struct sk_buff *skb;
1595 struct sk_buff *rx_skb;
1598 /* Allocate a new Rx skb */
1599 skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
1600 GFP_ATOMIC | __GFP_NOWARN);
1602 priv->mib.alloc_rx_buff_failed++;
1603 netif_err(priv, rx_err, priv->dev,
1604 "%s: Rx skb allocation failed\n", __func__);
1608 /* DMA-map the new Rx skb */
1609 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
1611 if (dma_mapping_error(kdev, mapping)) {
1612 priv->mib.rx_dma_failed++;
1613 dev_kfree_skb_any(skb);
1614 netif_err(priv, rx_err, priv->dev,
1615 "%s: Rx skb DMA mapping failed\n", __func__);
1619 /* Grab the current Rx skb from the ring and DMA-unmap it */
1622 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
1623 priv->rx_buf_len, DMA_FROM_DEVICE);
1625 /* Put the new Rx skb on the ring */
1627 dma_unmap_addr_set(cb, dma_addr, mapping);
1628 dmadesc_set_addr(priv, cb->bd_addr, mapping);
1630 /* Return the current Rx skb to caller */
1634 /* bcmgenet_desc_rx - descriptor based rx process.
1635 * this could be called from bottom half, or from NAPI polling method.
1637 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1638 unsigned int budget)
1640 struct bcmgenet_priv *priv = ring->priv;
1641 struct net_device *dev = priv->dev;
1643 struct sk_buff *skb;
1644 u32 dma_length_status;
1645 unsigned long dma_flag;
1647 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1648 unsigned int p_index;
1649 unsigned int discards;
1650 unsigned int chksum_ok = 0;
1652 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
1654 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1655 DMA_P_INDEX_DISCARD_CNT_MASK;
1656 if (discards > ring->old_discards) {
1657 discards = discards - ring->old_discards;
1658 dev->stats.rx_missed_errors += discards;
1659 dev->stats.rx_errors += discards;
1660 ring->old_discards += discards;
1662 /* Clear HW register when we reach 75% of maximum 0xFFFF */
1663 if (ring->old_discards >= 0xC000) {
1664 ring->old_discards = 0;
1665 bcmgenet_rdma_ring_writel(priv, ring->index, 0,
1670 p_index &= DMA_P_INDEX_MASK;
1672 if (likely(p_index >= ring->c_index))
1673 rxpkttoprocess = p_index - ring->c_index;
1675 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
1678 netif_dbg(priv, rx_status, dev,
1679 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1681 while ((rxpktprocessed < rxpkttoprocess) &&
1682 (rxpktprocessed < budget)) {
1683 cb = &priv->rx_cbs[ring->read_ptr];
1684 skb = bcmgenet_rx_refill(priv, cb);
1686 if (unlikely(!skb)) {
1687 dev->stats.rx_dropped++;
1691 if (!priv->desc_64b_en) {
1693 dmadesc_get_length_status(priv, cb->bd_addr);
1695 struct status_64 *status;
1697 status = (struct status_64 *)skb->data;
1698 dma_length_status = status->length_status;
1701 /* DMA flags and length are still valid no matter how
1702 * we got the Receive Status Vector (64B RSB or register)
1704 dma_flag = dma_length_status & 0xffff;
1705 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1707 netif_dbg(priv, rx_status, dev,
1708 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1709 __func__, p_index, ring->c_index,
1710 ring->read_ptr, dma_length_status);
1712 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1713 netif_err(priv, rx_status, dev,
1714 "dropping fragmented packet!\n");
1715 dev->stats.rx_errors++;
1716 dev_kfree_skb_any(skb);
1721 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1726 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1727 (unsigned int)dma_flag);
1728 if (dma_flag & DMA_RX_CRC_ERROR)
1729 dev->stats.rx_crc_errors++;
1730 if (dma_flag & DMA_RX_OV)
1731 dev->stats.rx_over_errors++;
1732 if (dma_flag & DMA_RX_NO)
1733 dev->stats.rx_frame_errors++;
1734 if (dma_flag & DMA_RX_LG)
1735 dev->stats.rx_length_errors++;
1736 dev->stats.rx_errors++;
1737 dev_kfree_skb_any(skb);
1739 } /* error packet */
1741 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1742 priv->desc_rxchk_en;
1745 if (priv->desc_64b_en) {
1750 if (likely(chksum_ok))
1751 skb->ip_summed = CHECKSUM_UNNECESSARY;
1753 /* remove hardware 2bytes added for IP alignment */
1757 if (priv->crc_fwd_en) {
1758 skb_trim(skb, len - ETH_FCS_LEN);
1762 /*Finish setting up the received SKB and send it to the kernel*/
1763 skb->protocol = eth_type_trans(skb, priv->dev);
1764 dev->stats.rx_packets++;
1765 dev->stats.rx_bytes += len;
1766 if (dma_flag & DMA_RX_MULT)
1767 dev->stats.multicast++;
1770 napi_gro_receive(&ring->napi, skb);
1771 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1775 if (likely(ring->read_ptr < ring->end_ptr))
1778 ring->read_ptr = ring->cb_ptr;
1780 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
1781 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
1784 return rxpktprocessed;
1787 /* Rx NAPI polling method */
1788 static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
1790 struct bcmgenet_rx_ring *ring = container_of(napi,
1791 struct bcmgenet_rx_ring, napi);
1792 unsigned int work_done;
1794 work_done = bcmgenet_desc_rx(ring, budget);
1796 if (work_done < budget) {
1797 napi_complete_done(napi, work_done);
1798 ring->int_enable(ring);
1804 /* Assign skb to RX DMA descriptor. */
1805 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1806 struct bcmgenet_rx_ring *ring)
1809 struct sk_buff *skb;
1812 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
1814 /* loop here for each buffer needing assign */
1815 for (i = 0; i < ring->size; i++) {
1817 skb = bcmgenet_rx_refill(priv, cb);
1819 dev_kfree_skb_any(skb);
1827 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1829 struct device *kdev = &priv->pdev->dev;
1833 for (i = 0; i < priv->num_rx_bds; i++) {
1834 cb = &priv->rx_cbs[i];
1836 if (dma_unmap_addr(cb, dma_addr)) {
1837 dma_unmap_single(kdev,
1838 dma_unmap_addr(cb, dma_addr),
1839 priv->rx_buf_len, DMA_FROM_DEVICE);
1840 dma_unmap_addr_set(cb, dma_addr, 0);
1844 bcmgenet_free_cb(cb);
1848 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1852 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1857 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1859 /* UniMAC stops on a packet boundary, wait for a full-size packet
1863 usleep_range(1000, 2000);
1866 static int reset_umac(struct bcmgenet_priv *priv)
1868 struct device *kdev = &priv->pdev->dev;
1869 unsigned int timeout = 0;
1872 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1873 bcmgenet_rbuf_ctrl_set(priv, 0);
1876 /* disable MAC while updating its registers */
1877 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1879 /* issue soft reset, wait for it to complete */
1880 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1881 while (timeout++ < 1000) {
1882 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1883 if (!(reg & CMD_SW_RESET))
1889 if (timeout == 1000) {
1891 "timeout waiting for MAC to come out of reset\n");
1898 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1900 /* Mask all interrupts.*/
1901 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1902 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1903 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1904 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1905 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1906 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1909 static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
1911 u32 int0_enable = 0;
1913 /* Monitor cable plug/unplugged event for internal PHY, external PHY
1916 if (priv->internal_phy) {
1917 int0_enable |= UMAC_IRQ_LINK_EVENT;
1918 if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
1919 int0_enable |= UMAC_IRQ_PHY_DET_R;
1920 } else if (priv->ext_phy) {
1921 int0_enable |= UMAC_IRQ_LINK_EVENT;
1922 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1923 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
1924 int0_enable |= UMAC_IRQ_LINK_EVENT;
1926 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1929 static int init_umac(struct bcmgenet_priv *priv)
1931 struct device *kdev = &priv->pdev->dev;
1934 u32 int0_enable = 0;
1935 u32 int1_enable = 0;
1938 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1940 ret = reset_umac(priv);
1944 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1945 /* clear tx/rx counter */
1946 bcmgenet_umac_writel(priv,
1947 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1949 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1951 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1953 /* init rx registers, enable ip header optimization */
1954 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1955 reg |= RBUF_ALIGN_2B;
1956 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1958 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1959 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1961 bcmgenet_intr_disable(priv);
1963 /* Enable Rx default queue 16 interrupts */
1964 int0_enable |= UMAC_IRQ_RXDMA_DONE;
1966 /* Enable Tx default queue 16 interrupts */
1967 int0_enable |= UMAC_IRQ_TXDMA_DONE;
1969 /* Configure backpressure vectors for MoCA */
1970 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1971 reg = bcmgenet_bp_mc_get(priv);
1972 reg |= BIT(priv->hw_params->bp_in_en_shift);
1974 /* bp_mask: back pressure mask */
1975 if (netif_is_multiqueue(priv->dev))
1976 reg |= priv->hw_params->bp_in_mask;
1978 reg &= ~priv->hw_params->bp_in_mask;
1979 bcmgenet_bp_mc_set(priv, reg);
1982 /* Enable MDIO interrupts on GENET v3+ */
1983 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1984 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1986 /* Enable Rx priority queue interrupts */
1987 for (i = 0; i < priv->hw_params->rx_queues; ++i)
1988 int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
1990 /* Enable Tx priority queue interrupts */
1991 for (i = 0; i < priv->hw_params->tx_queues; ++i)
1992 int1_enable |= (1 << i);
1994 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1995 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
1997 /* Enable rx/tx engine.*/
1998 dev_dbg(kdev, "done init umac\n");
2003 /* Initialize a Tx ring along with corresponding hardware registers */
2004 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
2005 unsigned int index, unsigned int size,
2006 unsigned int start_ptr, unsigned int end_ptr)
2008 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
2009 u32 words_per_bd = WORDS_PER_BD(priv);
2010 u32 flow_period_val = 0;
2012 spin_lock_init(&ring->lock);
2014 ring->index = index;
2015 if (index == DESC_INDEX) {
2017 ring->int_enable = bcmgenet_tx_ring16_int_enable;
2018 ring->int_disable = bcmgenet_tx_ring16_int_disable;
2020 ring->queue = index + 1;
2021 ring->int_enable = bcmgenet_tx_ring_int_enable;
2022 ring->int_disable = bcmgenet_tx_ring_int_disable;
2024 ring->cbs = priv->tx_cbs + start_ptr;
2026 ring->clean_ptr = start_ptr;
2028 ring->free_bds = size;
2029 ring->write_ptr = start_ptr;
2030 ring->cb_ptr = start_ptr;
2031 ring->end_ptr = end_ptr - 1;
2032 ring->prod_index = 0;
2034 /* Set flow period for ring != 16 */
2035 if (index != DESC_INDEX)
2036 flow_period_val = ENET_MAX_MTU_SIZE << 16;
2038 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
2039 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
2040 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2041 /* Disable rate control for now */
2042 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
2044 bcmgenet_tdma_ring_writel(priv, index,
2045 ((size << DMA_RING_SIZE_SHIFT) |
2046 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2048 /* Set start and end address, read and write pointers */
2049 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2051 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2053 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2055 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2059 /* Initialize a RDMA ring */
2060 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
2061 unsigned int index, unsigned int size,
2062 unsigned int start_ptr, unsigned int end_ptr)
2064 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
2065 u32 words_per_bd = WORDS_PER_BD(priv);
2069 ring->index = index;
2070 if (index == DESC_INDEX) {
2071 ring->int_enable = bcmgenet_rx_ring16_int_enable;
2072 ring->int_disable = bcmgenet_rx_ring16_int_disable;
2074 ring->int_enable = bcmgenet_rx_ring_int_enable;
2075 ring->int_disable = bcmgenet_rx_ring_int_disable;
2077 ring->cbs = priv->rx_cbs + start_ptr;
2080 ring->read_ptr = start_ptr;
2081 ring->cb_ptr = start_ptr;
2082 ring->end_ptr = end_ptr - 1;
2084 ret = bcmgenet_alloc_rx_buffers(priv, ring);
2088 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
2089 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
2090 bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2091 bcmgenet_rdma_ring_writel(priv, index,
2092 ((size << DMA_RING_SIZE_SHIFT) |
2093 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2094 bcmgenet_rdma_ring_writel(priv, index,
2095 (DMA_FC_THRESH_LO <<
2096 DMA_XOFF_THRESHOLD_SHIFT) |
2097 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
2099 /* Set start and end address, read and write pointers */
2100 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2102 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2104 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2106 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2112 static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
2115 struct bcmgenet_tx_ring *ring;
2117 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2118 ring = &priv->tx_rings[i];
2119 netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
2122 ring = &priv->tx_rings[DESC_INDEX];
2123 netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
2126 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2129 struct bcmgenet_tx_ring *ring;
2131 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2132 ring = &priv->tx_rings[i];
2133 napi_enable(&ring->napi);
2136 ring = &priv->tx_rings[DESC_INDEX];
2137 napi_enable(&ring->napi);
2140 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2143 struct bcmgenet_tx_ring *ring;
2145 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2146 ring = &priv->tx_rings[i];
2147 napi_disable(&ring->napi);
2150 ring = &priv->tx_rings[DESC_INDEX];
2151 napi_disable(&ring->napi);
2154 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
2157 struct bcmgenet_tx_ring *ring;
2159 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2160 ring = &priv->tx_rings[i];
2161 netif_napi_del(&ring->napi);
2164 ring = &priv->tx_rings[DESC_INDEX];
2165 netif_napi_del(&ring->napi);
2168 /* Initialize Tx queues
2170 * Queues 0-3 are priority-based, each one has 32 descriptors,
2171 * with queue 0 being the highest priority queue.
2173 * Queue 16 is the default Tx queue with
2174 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2176 * The transmit control block pool is then partitioned as follows:
2177 * - Tx queue 0 uses tx_cbs[0..31]
2178 * - Tx queue 1 uses tx_cbs[32..63]
2179 * - Tx queue 2 uses tx_cbs[64..95]
2180 * - Tx queue 3 uses tx_cbs[96..127]
2181 * - Tx queue 16 uses tx_cbs[128..255]
2183 static void bcmgenet_init_tx_queues(struct net_device *dev)
2185 struct bcmgenet_priv *priv = netdev_priv(dev);
2187 u32 dma_ctrl, ring_cfg;
2188 u32 dma_priority[3] = {0, 0, 0};
2190 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
2191 dma_enable = dma_ctrl & DMA_EN;
2192 dma_ctrl &= ~DMA_EN;
2193 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2198 /* Enable strict priority arbiter mode */
2199 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
2201 /* Initialize Tx priority queues */
2202 for (i = 0; i < priv->hw_params->tx_queues; i++) {
2203 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
2204 i * priv->hw_params->tx_bds_per_q,
2205 (i + 1) * priv->hw_params->tx_bds_per_q);
2206 ring_cfg |= (1 << i);
2207 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2208 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
2209 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
2212 /* Initialize Tx default queue 16 */
2213 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
2214 priv->hw_params->tx_queues *
2215 priv->hw_params->tx_bds_per_q,
2217 ring_cfg |= (1 << DESC_INDEX);
2218 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2219 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
2220 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
2221 DMA_PRIO_REG_SHIFT(DESC_INDEX));
2223 /* Set Tx queue priorities */
2224 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2225 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2226 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2228 /* Initialize Tx NAPI */
2229 bcmgenet_init_tx_napi(priv);
2231 /* Enable Tx queues */
2232 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
2237 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2240 static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
2243 struct bcmgenet_rx_ring *ring;
2245 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2246 ring = &priv->rx_rings[i];
2247 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2250 ring = &priv->rx_rings[DESC_INDEX];
2251 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2254 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2257 struct bcmgenet_rx_ring *ring;
2259 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2260 ring = &priv->rx_rings[i];
2261 napi_enable(&ring->napi);
2264 ring = &priv->rx_rings[DESC_INDEX];
2265 napi_enable(&ring->napi);
2268 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2271 struct bcmgenet_rx_ring *ring;
2273 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2274 ring = &priv->rx_rings[i];
2275 napi_disable(&ring->napi);
2278 ring = &priv->rx_rings[DESC_INDEX];
2279 napi_disable(&ring->napi);
2282 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2285 struct bcmgenet_rx_ring *ring;
2287 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2288 ring = &priv->rx_rings[i];
2289 netif_napi_del(&ring->napi);
2292 ring = &priv->rx_rings[DESC_INDEX];
2293 netif_napi_del(&ring->napi);
2296 /* Initialize Rx queues
2298 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2299 * used to direct traffic to these queues.
2301 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2303 static int bcmgenet_init_rx_queues(struct net_device *dev)
2305 struct bcmgenet_priv *priv = netdev_priv(dev);
2312 dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2313 dma_enable = dma_ctrl & DMA_EN;
2314 dma_ctrl &= ~DMA_EN;
2315 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2320 /* Initialize Rx priority queues */
2321 for (i = 0; i < priv->hw_params->rx_queues; i++) {
2322 ret = bcmgenet_init_rx_ring(priv, i,
2323 priv->hw_params->rx_bds_per_q,
2324 i * priv->hw_params->rx_bds_per_q,
2326 priv->hw_params->rx_bds_per_q);
2330 ring_cfg |= (1 << i);
2331 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2334 /* Initialize Rx default queue 16 */
2335 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2336 priv->hw_params->rx_queues *
2337 priv->hw_params->rx_bds_per_q,
2342 ring_cfg |= (1 << DESC_INDEX);
2343 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2345 /* Initialize Rx NAPI */
2346 bcmgenet_init_rx_napi(priv);
2349 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2351 /* Configure ring as descriptor ring and re-enable DMA if enabled */
2354 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2359 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2367 /* Disable TDMA to stop add more frames in TX DMA */
2368 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2370 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2372 /* Check TDMA status register to confirm TDMA is disabled */
2373 while (timeout++ < DMA_TIMEOUT_VAL) {
2374 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2375 if (reg & DMA_DISABLED)
2381 if (timeout == DMA_TIMEOUT_VAL) {
2382 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2386 /* Wait 10ms for packet drain in both tx and rx dma */
2387 usleep_range(10000, 20000);
2390 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2392 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2395 /* Check RDMA status register to confirm RDMA is disabled */
2396 while (timeout++ < DMA_TIMEOUT_VAL) {
2397 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2398 if (reg & DMA_DISABLED)
2404 if (timeout == DMA_TIMEOUT_VAL) {
2405 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2410 for (i = 0; i < priv->hw_params->rx_queues; i++)
2411 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2412 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2414 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2417 for (i = 0; i < priv->hw_params->tx_queues; i++)
2418 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2419 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2421 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2426 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2429 struct netdev_queue *txq;
2431 bcmgenet_fini_rx_napi(priv);
2432 bcmgenet_fini_tx_napi(priv);
2435 bcmgenet_dma_teardown(priv);
2437 for (i = 0; i < priv->num_tx_bds; i++) {
2438 if (priv->tx_cbs[i].skb != NULL) {
2439 dev_kfree_skb(priv->tx_cbs[i].skb);
2440 priv->tx_cbs[i].skb = NULL;
2444 for (i = 0; i < priv->hw_params->tx_queues; i++) {
2445 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
2446 netdev_tx_reset_queue(txq);
2449 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
2450 netdev_tx_reset_queue(txq);
2452 bcmgenet_free_rx_buffers(priv);
2453 kfree(priv->rx_cbs);
2454 kfree(priv->tx_cbs);
2457 /* init_edma: Initialize DMA control register */
2458 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2464 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2466 /* Initialize common Rx ring structures */
2467 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
2468 priv->num_rx_bds = TOTAL_DESC;
2469 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
2474 for (i = 0; i < priv->num_rx_bds; i++) {
2475 cb = priv->rx_cbs + i;
2476 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
2479 /* Initialize common TX ring structures */
2480 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
2481 priv->num_tx_bds = TOTAL_DESC;
2482 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
2484 if (!priv->tx_cbs) {
2485 kfree(priv->rx_cbs);
2489 for (i = 0; i < priv->num_tx_bds; i++) {
2490 cb = priv->tx_cbs + i;
2491 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2495 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2497 /* Initialize Rx queues */
2498 ret = bcmgenet_init_rx_queues(priv->dev);
2500 netdev_err(priv->dev, "failed to initialize Rx queues\n");
2501 bcmgenet_free_rx_buffers(priv);
2502 kfree(priv->rx_cbs);
2503 kfree(priv->tx_cbs);
2508 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2510 /* Initialize Tx queues */
2511 bcmgenet_init_tx_queues(priv->dev);
2516 /* Interrupt bottom half */
2517 static void bcmgenet_irq_task(struct work_struct *work)
2519 unsigned long flags;
2520 unsigned int status;
2521 struct bcmgenet_priv *priv = container_of(
2522 work, struct bcmgenet_priv, bcmgenet_irq_work);
2524 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2526 spin_lock_irqsave(&priv->lock, flags);
2527 status = priv->irq0_stat;
2528 priv->irq0_stat = 0;
2529 spin_unlock_irqrestore(&priv->lock, flags);
2531 if (status & UMAC_IRQ_MPD_R) {
2532 netif_dbg(priv, wol, priv->dev,
2533 "magic packet detected, waking up\n");
2534 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2537 if (status & UMAC_IRQ_PHY_DET_R &&
2538 priv->dev->phydev->autoneg != AUTONEG_ENABLE)
2539 phy_init_hw(priv->dev->phydev);
2541 /* Link UP/DOWN event */
2542 if (status & UMAC_IRQ_LINK_EVENT)
2543 phy_mac_interrupt(priv->phydev,
2544 !!(status & UMAC_IRQ_LINK_UP));
2547 /* bcmgenet_isr1: handle Rx and Tx priority queues */
2548 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2550 struct bcmgenet_priv *priv = dev_id;
2551 struct bcmgenet_rx_ring *rx_ring;
2552 struct bcmgenet_tx_ring *tx_ring;
2553 unsigned int index, status;
2555 /* Read irq status */
2556 status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2557 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2559 /* clear interrupts */
2560 bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
2562 netif_dbg(priv, intr, priv->dev,
2563 "%s: IRQ=0x%x\n", __func__, status);
2565 /* Check Rx priority queue interrupts */
2566 for (index = 0; index < priv->hw_params->rx_queues; index++) {
2567 if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
2570 rx_ring = &priv->rx_rings[index];
2572 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2573 rx_ring->int_disable(rx_ring);
2574 __napi_schedule_irqoff(&rx_ring->napi);
2578 /* Check Tx priority queue interrupts */
2579 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2580 if (!(status & BIT(index)))
2583 tx_ring = &priv->tx_rings[index];
2585 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2586 tx_ring->int_disable(tx_ring);
2587 __napi_schedule_irqoff(&tx_ring->napi);
2594 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
2595 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2597 struct bcmgenet_priv *priv = dev_id;
2598 struct bcmgenet_rx_ring *rx_ring;
2599 struct bcmgenet_tx_ring *tx_ring;
2600 unsigned int status;
2601 unsigned long flags;
2603 /* Read irq status */
2604 status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2605 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2607 /* clear interrupts */
2608 bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
2610 netif_dbg(priv, intr, priv->dev,
2611 "IRQ=0x%x\n", status);
2613 if (status & UMAC_IRQ_RXDMA_DONE) {
2614 rx_ring = &priv->rx_rings[DESC_INDEX];
2616 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2617 rx_ring->int_disable(rx_ring);
2618 __napi_schedule_irqoff(&rx_ring->napi);
2622 if (status & UMAC_IRQ_TXDMA_DONE) {
2623 tx_ring = &priv->tx_rings[DESC_INDEX];
2625 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2626 tx_ring->int_disable(tx_ring);
2627 __napi_schedule_irqoff(&tx_ring->napi);
2631 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2632 status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2636 /* all other interested interrupts handled in bottom half */
2637 status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_MPD_R | UMAC_IRQ_PHY_DET_R);
2639 /* Save irq status for bottom-half processing. */
2640 spin_lock_irqsave(&priv->lock, flags);
2641 priv->irq0_stat |= status;
2642 spin_unlock_irqrestore(&priv->lock, flags);
2644 schedule_work(&priv->bcmgenet_irq_work);
2650 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2652 struct bcmgenet_priv *priv = dev_id;
2654 pm_wakeup_event(&priv->pdev->dev, 0);
2659 #ifdef CONFIG_NET_POLL_CONTROLLER
2660 static void bcmgenet_poll_controller(struct net_device *dev)
2662 struct bcmgenet_priv *priv = netdev_priv(dev);
2664 /* Invoke the main RX/TX interrupt handler */
2665 disable_irq(priv->irq0);
2666 bcmgenet_isr0(priv->irq0, priv);
2667 enable_irq(priv->irq0);
2669 /* And the interrupt handler for RX/TX priority queues */
2670 disable_irq(priv->irq1);
2671 bcmgenet_isr1(priv->irq1, priv);
2672 enable_irq(priv->irq1);
2676 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2680 reg = bcmgenet_rbuf_ctrl_get(priv);
2682 bcmgenet_rbuf_ctrl_set(priv, reg);
2686 bcmgenet_rbuf_ctrl_set(priv, reg);
2690 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2691 unsigned char *addr)
2693 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2694 (addr[2] << 8) | addr[3], UMAC_MAC0);
2695 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2698 /* Returns a reusable dma control register value */
2699 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2706 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2707 for (i = 0; i < priv->hw_params->tx_queues; i++)
2708 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2709 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2711 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2713 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2714 for (i = 0; i < priv->hw_params->rx_queues; i++)
2715 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2716 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2718 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2720 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2722 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2727 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2731 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2733 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2735 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2737 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2740 /* bcmgenet_hfb_clear
2742 * Clear Hardware Filter Block and disable all filtering.
2744 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
2748 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
2749 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
2750 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
2752 for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
2753 bcmgenet_rdma_writel(priv, 0x0, i);
2755 for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
2756 bcmgenet_hfb_reg_writel(priv, 0x0,
2757 HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
2759 for (i = 0; i < priv->hw_params->hfb_filter_cnt *
2760 priv->hw_params->hfb_filter_size; i++)
2761 bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
2764 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
2766 if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
2769 bcmgenet_hfb_clear(priv);
2772 static void bcmgenet_netif_start(struct net_device *dev)
2774 struct bcmgenet_priv *priv = netdev_priv(dev);
2776 /* Start the network engine */
2777 bcmgenet_enable_rx_napi(priv);
2778 bcmgenet_enable_tx_napi(priv);
2780 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2782 netif_tx_start_all_queues(dev);
2784 /* Monitor link interrupts now */
2785 bcmgenet_link_intr_enable(priv);
2787 phy_start(priv->phydev);
2790 static int bcmgenet_open(struct net_device *dev)
2792 struct bcmgenet_priv *priv = netdev_priv(dev);
2793 unsigned long dma_ctrl;
2797 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2799 /* Turn on the clock */
2800 clk_prepare_enable(priv->clk);
2802 /* If this is an internal GPHY, power it back on now, before UniMAC is
2803 * brought out of reset as absolutely no UniMAC activity is allowed
2805 if (priv->internal_phy)
2806 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2808 /* take MAC out of reset */
2809 bcmgenet_umac_reset(priv);
2811 ret = init_umac(priv);
2813 goto err_clk_disable;
2815 /* disable ethernet MAC while updating its registers */
2816 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2818 /* Make sure we reflect the value of CRC_CMD_FWD */
2819 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2820 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2822 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2824 /* Disable RX/TX DMA and flush TX queues */
2825 dma_ctrl = bcmgenet_dma_disable(priv);
2827 /* Reinitialize TDMA and RDMA and SW housekeeping */
2828 ret = bcmgenet_init_dma(priv);
2830 netdev_err(dev, "failed to initialize DMA\n");
2831 goto err_clk_disable;
2834 /* Always enable ring 16 - descriptor ring */
2835 bcmgenet_enable_dma(priv, dma_ctrl);
2838 bcmgenet_hfb_init(priv);
2840 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2843 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2847 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2850 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2854 ret = bcmgenet_mii_probe(dev);
2856 netdev_err(dev, "failed to connect to PHY\n");
2860 bcmgenet_netif_start(dev);
2865 free_irq(priv->irq1, priv);
2867 free_irq(priv->irq0, priv);
2869 bcmgenet_fini_dma(priv);
2871 if (priv->internal_phy)
2872 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2873 clk_disable_unprepare(priv->clk);
2877 static void bcmgenet_netif_stop(struct net_device *dev)
2879 struct bcmgenet_priv *priv = netdev_priv(dev);
2881 netif_tx_stop_all_queues(dev);
2882 phy_stop(priv->phydev);
2883 bcmgenet_intr_disable(priv);
2884 bcmgenet_disable_rx_napi(priv);
2885 bcmgenet_disable_tx_napi(priv);
2887 /* Wait for pending work items to complete. Since interrupts are
2888 * disabled no new work will be scheduled.
2890 cancel_work_sync(&priv->bcmgenet_irq_work);
2892 priv->old_link = -1;
2893 priv->old_speed = -1;
2894 priv->old_duplex = -1;
2895 priv->old_pause = -1;
2898 static int bcmgenet_close(struct net_device *dev)
2900 struct bcmgenet_priv *priv = netdev_priv(dev);
2903 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2905 bcmgenet_netif_stop(dev);
2907 /* Really kill the PHY state machine and disconnect from it */
2908 phy_disconnect(priv->phydev);
2910 /* Disable MAC receive */
2911 umac_enable_set(priv, CMD_RX_EN, false);
2913 ret = bcmgenet_dma_teardown(priv);
2917 /* Disable MAC transmit. TX DMA disabled have to done before this */
2918 umac_enable_set(priv, CMD_TX_EN, false);
2921 bcmgenet_tx_reclaim_all(dev);
2922 bcmgenet_fini_dma(priv);
2924 free_irq(priv->irq0, priv);
2925 free_irq(priv->irq1, priv);
2927 if (priv->internal_phy)
2928 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2930 clk_disable_unprepare(priv->clk);
2935 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
2937 struct bcmgenet_priv *priv = ring->priv;
2938 u32 p_index, c_index, intsts, intmsk;
2939 struct netdev_queue *txq;
2940 unsigned int free_bds;
2941 unsigned long flags;
2944 if (!netif_msg_tx_err(priv))
2947 txq = netdev_get_tx_queue(priv->dev, ring->queue);
2949 spin_lock_irqsave(&ring->lock, flags);
2950 if (ring->index == DESC_INDEX) {
2951 intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2952 intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
2954 intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2955 intmsk = 1 << ring->index;
2957 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
2958 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
2959 txq_stopped = netif_tx_queue_stopped(txq);
2960 free_bds = ring->free_bds;
2961 spin_unlock_irqrestore(&ring->lock, flags);
2963 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
2964 "TX queue status: %s, interrupts: %s\n"
2965 "(sw)free_bds: %d (sw)size: %d\n"
2966 "(sw)p_index: %d (hw)p_index: %d\n"
2967 "(sw)c_index: %d (hw)c_index: %d\n"
2968 "(sw)clean_p: %d (sw)write_p: %d\n"
2969 "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
2970 ring->index, ring->queue,
2971 txq_stopped ? "stopped" : "active",
2972 intsts & intmsk ? "enabled" : "disabled",
2973 free_bds, ring->size,
2974 ring->prod_index, p_index & DMA_P_INDEX_MASK,
2975 ring->c_index, c_index & DMA_C_INDEX_MASK,
2976 ring->clean_ptr, ring->write_ptr,
2977 ring->cb_ptr, ring->end_ptr);
2980 static void bcmgenet_timeout(struct net_device *dev)
2982 struct bcmgenet_priv *priv = netdev_priv(dev);
2983 u32 int0_enable = 0;
2984 u32 int1_enable = 0;
2987 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
2989 for (q = 0; q < priv->hw_params->tx_queues; q++)
2990 bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
2991 bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
2993 bcmgenet_tx_reclaim_all(dev);
2995 for (q = 0; q < priv->hw_params->tx_queues; q++)
2996 int1_enable |= (1 << q);
2998 int0_enable = UMAC_IRQ_TXDMA_DONE;
3000 /* Re-enable TX interrupts if disabled */
3001 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
3002 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
3004 netif_trans_update(dev);
3006 dev->stats.tx_errors++;
3008 netif_tx_wake_all_queues(dev);
3011 #define MAX_MDF_FILTER 17
3013 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
3014 unsigned char *addr,
3017 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
3018 UMAC_MDF_ADDR + (*i * 4));
3019 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
3020 addr[4] << 8 | addr[5],
3021 UMAC_MDF_ADDR + ((*i + 1) * 4));
3025 static void bcmgenet_set_rx_mode(struct net_device *dev)
3027 struct bcmgenet_priv *priv = netdev_priv(dev);
3028 struct netdev_hw_addr *ha;
3032 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
3034 /* Number of filters needed */
3035 nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
3038 * Turn on promicuous mode for three scenarios
3039 * 1. IFF_PROMISC flag is set
3040 * 2. IFF_ALLMULTI flag is set
3041 * 3. The number of filters needed exceeds the number filters
3042 * supported by the hardware.
3044 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
3045 if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
3046 (nfilter > MAX_MDF_FILTER)) {
3048 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3049 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
3052 reg &= ~CMD_PROMISC;
3053 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3056 /* update MDF filter */
3059 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
3060 /* my own address.*/
3061 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
3064 netdev_for_each_uc_addr(ha, dev)
3065 bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3068 netdev_for_each_mc_addr(ha, dev)
3069 bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3071 /* Enable filters */
3072 reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
3073 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
3076 /* Set the hardware MAC address. */
3077 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3079 struct sockaddr *addr = p;
3081 /* Setting the MAC address at the hardware level is not possible
3082 * without disabling the UniMAC RX/TX enable bits.
3084 if (netif_running(dev))
3087 ether_addr_copy(dev->dev_addr, addr->sa_data);
3092 static const struct net_device_ops bcmgenet_netdev_ops = {
3093 .ndo_open = bcmgenet_open,
3094 .ndo_stop = bcmgenet_close,
3095 .ndo_start_xmit = bcmgenet_xmit,
3096 .ndo_tx_timeout = bcmgenet_timeout,
3097 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
3098 .ndo_set_mac_address = bcmgenet_set_mac_addr,
3099 .ndo_do_ioctl = bcmgenet_ioctl,
3100 .ndo_set_features = bcmgenet_set_features,
3101 #ifdef CONFIG_NET_POLL_CONTROLLER
3102 .ndo_poll_controller = bcmgenet_poll_controller,
3106 /* Array of GENET hardware parameters/characteristics */
3107 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
3113 .bp_in_en_shift = 16,
3114 .bp_in_mask = 0xffff,
3115 .hfb_filter_cnt = 16,
3117 .hfb_offset = 0x1000,
3118 .rdma_offset = 0x2000,
3119 .tdma_offset = 0x3000,
3127 .bp_in_en_shift = 16,
3128 .bp_in_mask = 0xffff,
3129 .hfb_filter_cnt = 16,
3131 .tbuf_offset = 0x0600,
3132 .hfb_offset = 0x1000,
3133 .hfb_reg_offset = 0x2000,
3134 .rdma_offset = 0x3000,
3135 .tdma_offset = 0x4000,
3137 .flags = GENET_HAS_EXT,
3144 .bp_in_en_shift = 17,
3145 .bp_in_mask = 0x1ffff,
3146 .hfb_filter_cnt = 48,
3147 .hfb_filter_size = 128,
3149 .tbuf_offset = 0x0600,
3150 .hfb_offset = 0x8000,
3151 .hfb_reg_offset = 0xfc00,
3152 .rdma_offset = 0x10000,
3153 .tdma_offset = 0x11000,
3155 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3156 GENET_HAS_MOCA_LINK_DET,
3163 .bp_in_en_shift = 17,
3164 .bp_in_mask = 0x1ffff,
3165 .hfb_filter_cnt = 48,
3166 .hfb_filter_size = 128,
3168 .tbuf_offset = 0x0600,
3169 .hfb_offset = 0x8000,
3170 .hfb_reg_offset = 0xfc00,
3171 .rdma_offset = 0x2000,
3172 .tdma_offset = 0x4000,
3174 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3175 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3179 /* Infer hardware parameters from the detected GENET version */
3180 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3182 struct bcmgenet_hw_params *params;
3187 if (GENET_IS_V4(priv)) {
3188 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3189 genet_dma_ring_regs = genet_dma_ring_regs_v4;
3190 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3191 priv->version = GENET_V4;
3192 } else if (GENET_IS_V3(priv)) {
3193 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3194 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3195 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3196 priv->version = GENET_V3;
3197 } else if (GENET_IS_V2(priv)) {
3198 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3199 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3200 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3201 priv->version = GENET_V2;
3202 } else if (GENET_IS_V1(priv)) {
3203 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3204 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3205 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3206 priv->version = GENET_V1;
3209 /* enum genet_version starts at 1 */
3210 priv->hw_params = &bcmgenet_hw_params[priv->version];
3211 params = priv->hw_params;
3213 /* Read GENET HW version */
3214 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3215 major = (reg >> 24 & 0x0f);
3218 else if (major == 0)
3220 if (major != priv->version) {
3221 dev_err(&priv->pdev->dev,
3222 "GENET version mismatch, got: %d, configured for: %d\n",
3223 major, priv->version);
3226 /* Print the GENET core version */
3227 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3228 major, (reg >> 16) & 0x0f, reg & 0xffff);
3230 /* Store the integrated PHY revision for the MDIO probing function
3231 * to pass this information to the PHY driver. The PHY driver expects
3232 * to find the PHY major revision in bits 15:8 while the GENET register
3233 * stores that information in bits 7:0, account for that.
3235 * On newer chips, starting with PHY revision G0, a new scheme is
3236 * deployed similar to the Starfighter 2 switch with GPHY major
3237 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3238 * is reserved as well as special value 0x01ff, we have a small
3239 * heuristic to check for the new GPHY revision and re-arrange things
3240 * so the GPHY driver is happy.
3242 gphy_rev = reg & 0xffff;
3244 /* This is reserved so should require special treatment */
3245 if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3246 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3250 /* This is the good old scheme, just GPHY major, no minor nor patch */
3251 if ((gphy_rev & 0xf0) != 0)
3252 priv->gphy_rev = gphy_rev << 8;
3254 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3255 else if ((gphy_rev & 0xff00) != 0)
3256 priv->gphy_rev = gphy_rev;
3258 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3259 if (!(params->flags & GENET_HAS_40BITS))
3260 pr_warn("GENET does not support 40-bits PA\n");
3263 pr_debug("Configuration for version: %d\n"
3264 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3265 "BP << en: %2d, BP msk: 0x%05x\n"
3266 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3267 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3268 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3271 params->tx_queues, params->tx_bds_per_q,
3272 params->rx_queues, params->rx_bds_per_q,
3273 params->bp_in_en_shift, params->bp_in_mask,
3274 params->hfb_filter_cnt, params->qtag_mask,
3275 params->tbuf_offset, params->hfb_offset,
3276 params->hfb_reg_offset,
3277 params->rdma_offset, params->tdma_offset,
3278 params->words_per_bd);
3281 static const struct of_device_id bcmgenet_match[] = {
3282 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
3283 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
3284 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
3285 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
3288 MODULE_DEVICE_TABLE(of, bcmgenet_match);
3290 static int bcmgenet_probe(struct platform_device *pdev)
3292 struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
3293 struct device_node *dn = pdev->dev.of_node;
3294 const struct of_device_id *of_id = NULL;
3295 struct bcmgenet_priv *priv;
3296 struct net_device *dev;
3297 const void *macaddr;
3300 const char *phy_mode_str;
3302 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3303 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3304 GENET_MAX_MQ_CNT + 1);
3306 dev_err(&pdev->dev, "can't allocate net device\n");
3311 of_id = of_match_node(bcmgenet_match, dn);
3316 priv = netdev_priv(dev);
3317 priv->irq0 = platform_get_irq(pdev, 0);
3318 priv->irq1 = platform_get_irq(pdev, 1);
3319 priv->wol_irq = platform_get_irq(pdev, 2);
3320 if (!priv->irq0 || !priv->irq1) {
3321 dev_err(&pdev->dev, "can't find IRQs\n");
3327 macaddr = of_get_mac_address(dn);
3329 dev_err(&pdev->dev, "can't find MAC address\n");
3334 macaddr = pd->mac_address;
3337 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3338 priv->base = devm_ioremap_resource(&pdev->dev, r);
3339 if (IS_ERR(priv->base)) {
3340 err = PTR_ERR(priv->base);
3344 spin_lock_init(&priv->lock);
3346 SET_NETDEV_DEV(dev, &pdev->dev);
3347 dev_set_drvdata(&pdev->dev, dev);
3348 ether_addr_copy(dev->dev_addr, macaddr);
3349 dev->watchdog_timeo = 2 * HZ;
3350 dev->ethtool_ops = &bcmgenet_ethtool_ops;
3351 dev->netdev_ops = &bcmgenet_netdev_ops;
3353 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
3355 /* Set hardware features */
3356 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
3357 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
3359 /* Request the WOL interrupt and advertise suspend if available */
3360 priv->wol_irq_disabled = true;
3361 if (priv->wol_irq > 0) {
3362 err = devm_request_irq(&pdev->dev, priv->wol_irq,
3363 bcmgenet_wol_isr, 0, dev->name, priv);
3365 device_set_wakeup_capable(&pdev->dev, 1);
3368 /* Set the needed headroom to account for any possible
3369 * features enabling/disabling at runtime
3371 dev->needed_headroom += 64;
3373 netdev_boot_setup_check(dev);
3378 priv->version = (enum bcmgenet_version)of_id->data;
3380 priv->version = pd->genet_version;
3382 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
3383 if (IS_ERR(priv->clk)) {
3384 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
3388 clk_prepare_enable(priv->clk);
3390 bcmgenet_set_hw_params(priv);
3392 /* Mii wait queue */
3393 init_waitqueue_head(&priv->wq);
3394 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
3395 priv->rx_buf_len = RX_BUF_LENGTH;
3396 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
3398 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
3399 if (IS_ERR(priv->clk_wol)) {
3400 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
3401 priv->clk_wol = NULL;
3404 priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
3405 if (IS_ERR(priv->clk_eee)) {
3406 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
3407 priv->clk_eee = NULL;
3410 /* If this is an internal GPHY, power it on now, before UniMAC is
3411 * brought out of reset as absolutely no UniMAC activity is allowed
3413 if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
3414 !strcasecmp(phy_mode_str, "internal"))
3415 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3417 err = reset_umac(priv);
3419 goto err_clk_disable;
3421 err = bcmgenet_mii_init(dev);
3423 goto err_clk_disable;
3425 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
3426 * just the ring 16 descriptor based TX
3428 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
3429 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
3431 /* libphy will determine the link state */
3432 netif_carrier_off(dev);
3434 /* Turn off the main clock, WOL clock is handled separately */
3435 clk_disable_unprepare(priv->clk);
3437 err = register_netdev(dev);
3439 bcmgenet_mii_exit(dev);
3446 clk_disable_unprepare(priv->clk);
3452 static int bcmgenet_remove(struct platform_device *pdev)
3454 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
3456 dev_set_drvdata(&pdev->dev, NULL);
3457 unregister_netdev(priv->dev);
3458 bcmgenet_mii_exit(priv->dev);
3459 free_netdev(priv->dev);
3464 #ifdef CONFIG_PM_SLEEP
3465 static int bcmgenet_suspend(struct device *d)
3467 struct net_device *dev = dev_get_drvdata(d);
3468 struct bcmgenet_priv *priv = netdev_priv(dev);
3471 if (!netif_running(dev))
3474 bcmgenet_netif_stop(dev);
3476 if (!device_may_wakeup(d))
3477 phy_suspend(priv->phydev);
3479 netif_device_detach(dev);
3481 /* Disable MAC receive */
3482 umac_enable_set(priv, CMD_RX_EN, false);
3484 ret = bcmgenet_dma_teardown(priv);
3488 /* Disable MAC transmit. TX DMA disabled have to done before this */
3489 umac_enable_set(priv, CMD_TX_EN, false);
3492 bcmgenet_tx_reclaim_all(dev);
3493 bcmgenet_fini_dma(priv);
3495 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
3496 if (device_may_wakeup(d) && priv->wolopts) {
3497 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
3498 clk_prepare_enable(priv->clk_wol);
3499 } else if (priv->internal_phy) {
3500 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3503 /* Turn off the clocks */
3504 clk_disable_unprepare(priv->clk);
3509 static int bcmgenet_resume(struct device *d)
3511 struct net_device *dev = dev_get_drvdata(d);
3512 struct bcmgenet_priv *priv = netdev_priv(dev);
3513 unsigned long dma_ctrl;
3516 if (!netif_running(dev))
3519 /* Turn on the clock */
3520 ret = clk_prepare_enable(priv->clk);
3524 /* If this is an internal GPHY, power it back on now, before UniMAC is
3525 * brought out of reset as absolutely no UniMAC activity is allowed
3527 if (priv->internal_phy)
3528 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3530 bcmgenet_umac_reset(priv);
3532 ret = init_umac(priv);
3534 goto out_clk_disable;
3536 /* From WOL-enabled suspend, switch to regular clock */
3538 clk_disable_unprepare(priv->clk_wol);
3540 phy_init_hw(priv->phydev);
3541 /* Speed settings must be restored */
3542 bcmgenet_mii_config(priv->dev);
3544 /* disable ethernet MAC while updating its registers */
3545 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
3547 bcmgenet_set_hw_addr(priv, dev->dev_addr);
3550 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
3552 /* Disable RX/TX DMA and flush TX queues */
3553 dma_ctrl = bcmgenet_dma_disable(priv);
3555 /* Reinitialize TDMA and RDMA and SW housekeeping */
3556 ret = bcmgenet_init_dma(priv);
3558 netdev_err(dev, "failed to initialize DMA\n");
3559 goto out_clk_disable;
3562 /* Always enable ring 16 - descriptor ring */
3563 bcmgenet_enable_dma(priv, dma_ctrl);
3565 netif_device_attach(dev);
3567 if (!device_may_wakeup(d))
3568 phy_resume(priv->phydev);
3570 if (priv->eee.eee_enabled)
3571 bcmgenet_eee_enable_set(dev, true);
3573 bcmgenet_netif_start(dev);
3578 if (priv->internal_phy)
3579 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3580 clk_disable_unprepare(priv->clk);
3583 #endif /* CONFIG_PM_SLEEP */
3585 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
3587 static struct platform_driver bcmgenet_driver = {
3588 .probe = bcmgenet_probe,
3589 .remove = bcmgenet_remove,
3592 .of_match_table = bcmgenet_match,
3593 .pm = &bcmgenet_pm_ops,
3596 module_platform_driver(bcmgenet_driver);
3598 MODULE_AUTHOR("Broadcom Corporation");
3599 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3600 MODULE_ALIAS("platform:bcmgenet");
3601 MODULE_LICENSE("GPL");