2 * Broadcom GENET (Gigabit Ethernet) controller driver
4 * Copyright (c) 2014-2017 Broadcom
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) "bcmgenet: " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/string.h>
20 #include <linux/if_ether.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/clk.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_net.h>
32 #include <linux/of_platform.h>
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
43 #include <linux/ipv6.h>
44 #include <linux/phy.h>
45 #include <linux/platform_data/bcmgenet.h>
47 #include <asm/unaligned.h>
51 /* Maximum number of hardware queues, downsized if needed */
52 #define GENET_MAX_MQ_CNT 4
54 /* Default highest priority queue for multi queue support */
55 #define GENET_Q0_PRIORITY 0
57 #define GENET_Q16_RX_BD_CNT \
58 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
59 #define GENET_Q16_TX_BD_CNT \
60 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
62 #define RX_BUF_LENGTH 2048
63 #define SKB_ALIGNMENT 32
65 /* Tx/Rx DMA register offset, skip 256 descriptors */
66 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
67 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
69 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
72 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
73 TOTAL_DESC * DMA_DESC_SIZE)
75 /* Forward declarations */
76 static void bcmgenet_set_rx_mode(struct net_device *dev);
78 static inline void bcmgenet_writel(u32 value, void __iomem *offset)
80 /* MIPS chips strapped for BE will automagically configure the
81 * peripheral registers for CPU-native byte order.
83 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
84 __raw_writel(value, offset);
86 writel_relaxed(value, offset);
89 static inline u32 bcmgenet_readl(void __iomem *offset)
91 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
92 return __raw_readl(offset);
94 return readl_relaxed(offset);
97 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
98 void __iomem *d, u32 value)
100 bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
103 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
106 return bcmgenet_readl(d + DMA_DESC_LENGTH_STATUS);
109 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
113 bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
115 /* Register writes to GISB bus can take couple hundred nanoseconds
116 * and are done for each packet, save these expensive writes unless
117 * the platform is explicitly configured for 64-bits/LPAE.
119 #ifdef CONFIG_PHYS_ADDR_T_64BIT
120 if (priv->hw_params->flags & GENET_HAS_40BITS)
121 bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
125 /* Combined address + length/status setter */
126 static inline void dmadesc_set(struct bcmgenet_priv *priv,
127 void __iomem *d, dma_addr_t addr, u32 val)
129 dmadesc_set_addr(priv, d, addr);
130 dmadesc_set_length_status(priv, d, val);
133 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
138 addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO);
140 /* Register writes to GISB bus can take couple hundred nanoseconds
141 * and are done for each packet, save these expensive writes unless
142 * the platform is explicitly configured for 64-bits/LPAE.
144 #ifdef CONFIG_PHYS_ADDR_T_64BIT
145 if (priv->hw_params->flags & GENET_HAS_40BITS)
146 addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32;
151 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
153 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
156 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
158 if (GENET_IS_V1(priv))
159 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
161 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
164 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
166 if (GENET_IS_V1(priv))
167 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
169 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
172 /* These macros are defined to deal with register map change
173 * between GENET1.1 and GENET2. Only those currently being used
174 * by driver are defined.
176 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
178 if (GENET_IS_V1(priv))
179 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
181 return bcmgenet_readl(priv->base +
182 priv->hw_params->tbuf_offset + TBUF_CTRL);
185 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
187 if (GENET_IS_V1(priv))
188 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
190 bcmgenet_writel(val, priv->base +
191 priv->hw_params->tbuf_offset + TBUF_CTRL);
194 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
196 if (GENET_IS_V1(priv))
197 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
199 return bcmgenet_readl(priv->base +
200 priv->hw_params->tbuf_offset + TBUF_BP_MC);
203 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
205 if (GENET_IS_V1(priv))
206 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
208 bcmgenet_writel(val, priv->base +
209 priv->hw_params->tbuf_offset + TBUF_BP_MC);
212 /* RX/TX DMA register accessors */
249 static const u8 bcmgenet_dma_regs_v3plus[] = {
250 [DMA_RING_CFG] = 0x00,
253 [DMA_SCB_BURST_SIZE] = 0x0C,
254 [DMA_ARB_CTRL] = 0x2C,
255 [DMA_PRIORITY_0] = 0x30,
256 [DMA_PRIORITY_1] = 0x34,
257 [DMA_PRIORITY_2] = 0x38,
258 [DMA_RING0_TIMEOUT] = 0x2C,
259 [DMA_RING1_TIMEOUT] = 0x30,
260 [DMA_RING2_TIMEOUT] = 0x34,
261 [DMA_RING3_TIMEOUT] = 0x38,
262 [DMA_RING4_TIMEOUT] = 0x3c,
263 [DMA_RING5_TIMEOUT] = 0x40,
264 [DMA_RING6_TIMEOUT] = 0x44,
265 [DMA_RING7_TIMEOUT] = 0x48,
266 [DMA_RING8_TIMEOUT] = 0x4c,
267 [DMA_RING9_TIMEOUT] = 0x50,
268 [DMA_RING10_TIMEOUT] = 0x54,
269 [DMA_RING11_TIMEOUT] = 0x58,
270 [DMA_RING12_TIMEOUT] = 0x5c,
271 [DMA_RING13_TIMEOUT] = 0x60,
272 [DMA_RING14_TIMEOUT] = 0x64,
273 [DMA_RING15_TIMEOUT] = 0x68,
274 [DMA_RING16_TIMEOUT] = 0x6C,
275 [DMA_INDEX2RING_0] = 0x70,
276 [DMA_INDEX2RING_1] = 0x74,
277 [DMA_INDEX2RING_2] = 0x78,
278 [DMA_INDEX2RING_3] = 0x7C,
279 [DMA_INDEX2RING_4] = 0x80,
280 [DMA_INDEX2RING_5] = 0x84,
281 [DMA_INDEX2RING_6] = 0x88,
282 [DMA_INDEX2RING_7] = 0x8C,
285 static const u8 bcmgenet_dma_regs_v2[] = {
286 [DMA_RING_CFG] = 0x00,
289 [DMA_SCB_BURST_SIZE] = 0x0C,
290 [DMA_ARB_CTRL] = 0x30,
291 [DMA_PRIORITY_0] = 0x34,
292 [DMA_PRIORITY_1] = 0x38,
293 [DMA_PRIORITY_2] = 0x3C,
294 [DMA_RING0_TIMEOUT] = 0x2C,
295 [DMA_RING1_TIMEOUT] = 0x30,
296 [DMA_RING2_TIMEOUT] = 0x34,
297 [DMA_RING3_TIMEOUT] = 0x38,
298 [DMA_RING4_TIMEOUT] = 0x3c,
299 [DMA_RING5_TIMEOUT] = 0x40,
300 [DMA_RING6_TIMEOUT] = 0x44,
301 [DMA_RING7_TIMEOUT] = 0x48,
302 [DMA_RING8_TIMEOUT] = 0x4c,
303 [DMA_RING9_TIMEOUT] = 0x50,
304 [DMA_RING10_TIMEOUT] = 0x54,
305 [DMA_RING11_TIMEOUT] = 0x58,
306 [DMA_RING12_TIMEOUT] = 0x5c,
307 [DMA_RING13_TIMEOUT] = 0x60,
308 [DMA_RING14_TIMEOUT] = 0x64,
309 [DMA_RING15_TIMEOUT] = 0x68,
310 [DMA_RING16_TIMEOUT] = 0x6C,
313 static const u8 bcmgenet_dma_regs_v1[] = {
316 [DMA_SCB_BURST_SIZE] = 0x0C,
317 [DMA_ARB_CTRL] = 0x30,
318 [DMA_PRIORITY_0] = 0x34,
319 [DMA_PRIORITY_1] = 0x38,
320 [DMA_PRIORITY_2] = 0x3C,
321 [DMA_RING0_TIMEOUT] = 0x2C,
322 [DMA_RING1_TIMEOUT] = 0x30,
323 [DMA_RING2_TIMEOUT] = 0x34,
324 [DMA_RING3_TIMEOUT] = 0x38,
325 [DMA_RING4_TIMEOUT] = 0x3c,
326 [DMA_RING5_TIMEOUT] = 0x40,
327 [DMA_RING6_TIMEOUT] = 0x44,
328 [DMA_RING7_TIMEOUT] = 0x48,
329 [DMA_RING8_TIMEOUT] = 0x4c,
330 [DMA_RING9_TIMEOUT] = 0x50,
331 [DMA_RING10_TIMEOUT] = 0x54,
332 [DMA_RING11_TIMEOUT] = 0x58,
333 [DMA_RING12_TIMEOUT] = 0x5c,
334 [DMA_RING13_TIMEOUT] = 0x60,
335 [DMA_RING14_TIMEOUT] = 0x64,
336 [DMA_RING15_TIMEOUT] = 0x68,
337 [DMA_RING16_TIMEOUT] = 0x6C,
340 /* Set at runtime once bcmgenet version is known */
341 static const u8 *bcmgenet_dma_regs;
343 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
345 return netdev_priv(dev_get_drvdata(dev));
348 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
351 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
352 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
355 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
356 u32 val, enum dma_reg r)
358 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
359 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
362 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
365 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
366 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
369 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
370 u32 val, enum dma_reg r)
372 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
373 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
376 /* RDMA/TDMA ring registers and accessors
377 * we merge the common fields and just prefix with T/D the registers
378 * having different meaning depending on the direction
382 RDMA_WRITE_PTR = TDMA_READ_PTR,
384 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
386 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
388 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
394 DMA_MBUF_DONE_THRESH,
396 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
398 RDMA_READ_PTR = TDMA_WRITE_PTR,
400 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
403 /* GENET v4 supports 40-bits pointer addressing
404 * for obvious reasons the LO and HI word parts
405 * are contiguous, but this offsets the other
408 static const u8 genet_dma_ring_regs_v4[] = {
409 [TDMA_READ_PTR] = 0x00,
410 [TDMA_READ_PTR_HI] = 0x04,
411 [TDMA_CONS_INDEX] = 0x08,
412 [TDMA_PROD_INDEX] = 0x0C,
413 [DMA_RING_BUF_SIZE] = 0x10,
414 [DMA_START_ADDR] = 0x14,
415 [DMA_START_ADDR_HI] = 0x18,
416 [DMA_END_ADDR] = 0x1C,
417 [DMA_END_ADDR_HI] = 0x20,
418 [DMA_MBUF_DONE_THRESH] = 0x24,
419 [TDMA_FLOW_PERIOD] = 0x28,
420 [TDMA_WRITE_PTR] = 0x2C,
421 [TDMA_WRITE_PTR_HI] = 0x30,
424 static const u8 genet_dma_ring_regs_v123[] = {
425 [TDMA_READ_PTR] = 0x00,
426 [TDMA_CONS_INDEX] = 0x04,
427 [TDMA_PROD_INDEX] = 0x08,
428 [DMA_RING_BUF_SIZE] = 0x0C,
429 [DMA_START_ADDR] = 0x10,
430 [DMA_END_ADDR] = 0x14,
431 [DMA_MBUF_DONE_THRESH] = 0x18,
432 [TDMA_FLOW_PERIOD] = 0x1C,
433 [TDMA_WRITE_PTR] = 0x20,
436 /* Set at runtime once GENET version is known */
437 static const u8 *genet_dma_ring_regs;
439 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
443 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
444 (DMA_RING_SIZE * ring) +
445 genet_dma_ring_regs[r]);
448 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
449 unsigned int ring, u32 val,
452 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
453 (DMA_RING_SIZE * ring) +
454 genet_dma_ring_regs[r]);
457 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
461 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
462 (DMA_RING_SIZE * ring) +
463 genet_dma_ring_regs[r]);
466 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
467 unsigned int ring, u32 val,
470 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
471 (DMA_RING_SIZE * ring) +
472 genet_dma_ring_regs[r]);
475 static int bcmgenet_begin(struct net_device *dev)
477 struct bcmgenet_priv *priv = netdev_priv(dev);
479 /* Turn on the clock */
480 return clk_prepare_enable(priv->clk);
483 static void bcmgenet_complete(struct net_device *dev)
485 struct bcmgenet_priv *priv = netdev_priv(dev);
487 /* Turn off the clock */
488 clk_disable_unprepare(priv->clk);
491 static int bcmgenet_get_link_ksettings(struct net_device *dev,
492 struct ethtool_link_ksettings *cmd)
494 struct bcmgenet_priv *priv = netdev_priv(dev);
496 if (!netif_running(dev))
502 phy_ethtool_ksettings_get(priv->phydev, cmd);
507 static int bcmgenet_set_link_ksettings(struct net_device *dev,
508 const struct ethtool_link_ksettings *cmd)
510 struct bcmgenet_priv *priv = netdev_priv(dev);
512 if (!netif_running(dev))
518 return phy_ethtool_ksettings_set(priv->phydev, cmd);
521 static int bcmgenet_set_rx_csum(struct net_device *dev,
522 netdev_features_t wanted)
524 struct bcmgenet_priv *priv = netdev_priv(dev);
528 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
530 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
532 /* enable rx checksumming */
534 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
536 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
537 priv->desc_rxchk_en = rx_csum_en;
539 /* If UniMAC forwards CRC, we need to skip over it to get
540 * a valid CHK bit to be set in the per-packet status word
542 if (rx_csum_en && priv->crc_fwd_en)
543 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
545 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
547 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
552 static int bcmgenet_set_tx_csum(struct net_device *dev,
553 netdev_features_t wanted)
555 struct bcmgenet_priv *priv = netdev_priv(dev);
557 u32 tbuf_ctrl, rbuf_ctrl;
559 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
560 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
562 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
564 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
566 tbuf_ctrl |= RBUF_64B_EN;
567 rbuf_ctrl |= RBUF_64B_EN;
569 tbuf_ctrl &= ~RBUF_64B_EN;
570 rbuf_ctrl &= ~RBUF_64B_EN;
572 priv->desc_64b_en = desc_64b_en;
574 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
575 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
580 static int bcmgenet_set_features(struct net_device *dev,
581 netdev_features_t features)
583 netdev_features_t changed = features ^ dev->features;
584 netdev_features_t wanted = dev->wanted_features;
587 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
588 ret = bcmgenet_set_tx_csum(dev, wanted);
589 if (changed & (NETIF_F_RXCSUM))
590 ret = bcmgenet_set_rx_csum(dev, wanted);
595 static u32 bcmgenet_get_msglevel(struct net_device *dev)
597 struct bcmgenet_priv *priv = netdev_priv(dev);
599 return priv->msg_enable;
602 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
604 struct bcmgenet_priv *priv = netdev_priv(dev);
606 priv->msg_enable = level;
609 static int bcmgenet_get_coalesce(struct net_device *dev,
610 struct ethtool_coalesce *ec)
612 struct bcmgenet_priv *priv = netdev_priv(dev);
614 ec->tx_max_coalesced_frames =
615 bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
616 DMA_MBUF_DONE_THRESH);
617 ec->rx_max_coalesced_frames =
618 bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
619 DMA_MBUF_DONE_THRESH);
620 ec->rx_coalesce_usecs =
621 bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
626 static int bcmgenet_set_coalesce(struct net_device *dev,
627 struct ethtool_coalesce *ec)
629 struct bcmgenet_priv *priv = netdev_priv(dev);
633 /* Base system clock is 125Mhz, DMA timeout is this reference clock
634 * divided by 1024, which yields roughly 8.192us, our maximum value
635 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
637 if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
638 ec->tx_max_coalesced_frames == 0 ||
639 ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
640 ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
643 if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
646 /* GENET TDMA hardware does not support a configurable timeout, but will
647 * always generate an interrupt either after MBDONE packets have been
648 * transmitted, or when the ring is empty.
650 if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
651 ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
654 /* Program all TX queues with the same values, as there is no
655 * ethtool knob to do coalescing on a per-queue basis
657 for (i = 0; i < priv->hw_params->tx_queues; i++)
658 bcmgenet_tdma_ring_writel(priv, i,
659 ec->tx_max_coalesced_frames,
660 DMA_MBUF_DONE_THRESH);
661 bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
662 ec->tx_max_coalesced_frames,
663 DMA_MBUF_DONE_THRESH);
665 for (i = 0; i < priv->hw_params->rx_queues; i++) {
666 bcmgenet_rdma_ring_writel(priv, i,
667 ec->rx_max_coalesced_frames,
668 DMA_MBUF_DONE_THRESH);
670 reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
671 reg &= ~DMA_TIMEOUT_MASK;
672 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
673 bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
676 bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
677 ec->rx_max_coalesced_frames,
678 DMA_MBUF_DONE_THRESH);
680 reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT);
681 reg &= ~DMA_TIMEOUT_MASK;
682 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
683 bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT);
688 /* standard ethtool support functions. */
689 enum bcmgenet_stat_type {
690 BCMGENET_STAT_NETDEV = -1,
691 BCMGENET_STAT_MIB_RX,
692 BCMGENET_STAT_MIB_TX,
698 struct bcmgenet_stats {
699 char stat_string[ETH_GSTRING_LEN];
702 enum bcmgenet_stat_type type;
703 /* reg offset from UMAC base for misc counters */
707 #define STAT_NETDEV(m) { \
708 .stat_string = __stringify(m), \
709 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
710 .stat_offset = offsetof(struct net_device_stats, m), \
711 .type = BCMGENET_STAT_NETDEV, \
714 #define STAT_GENET_MIB(str, m, _type) { \
715 .stat_string = str, \
716 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
717 .stat_offset = offsetof(struct bcmgenet_priv, m), \
721 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
722 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
723 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
724 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
726 #define STAT_GENET_MISC(str, m, offset) { \
727 .stat_string = str, \
728 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
729 .stat_offset = offsetof(struct bcmgenet_priv, m), \
730 .type = BCMGENET_STAT_MISC, \
731 .reg_offset = offset, \
734 #define STAT_GENET_Q(num) \
735 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
736 tx_rings[num].packets), \
737 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
738 tx_rings[num].bytes), \
739 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
740 rx_rings[num].bytes), \
741 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
742 rx_rings[num].packets), \
743 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
744 rx_rings[num].errors), \
745 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
746 rx_rings[num].dropped)
748 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
749 * between the end of TX stats and the beginning of the RX RUNT
751 #define BCMGENET_STAT_OFFSET 0xc
753 /* Hardware counters must be kept in sync because the order/offset
754 * is important here (order in structure declaration = order in hardware)
756 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
758 STAT_NETDEV(rx_packets),
759 STAT_NETDEV(tx_packets),
760 STAT_NETDEV(rx_bytes),
761 STAT_NETDEV(tx_bytes),
762 STAT_NETDEV(rx_errors),
763 STAT_NETDEV(tx_errors),
764 STAT_NETDEV(rx_dropped),
765 STAT_NETDEV(tx_dropped),
766 STAT_NETDEV(multicast),
767 /* UniMAC RSV counters */
768 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
769 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
770 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
771 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
772 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
773 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
774 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
775 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
776 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
777 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
778 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
779 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
780 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
781 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
782 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
783 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
784 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
785 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
786 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
787 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
788 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
789 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
790 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
791 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
792 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
793 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
794 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
795 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
796 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
797 /* UniMAC TSV counters */
798 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
799 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
800 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
801 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
802 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
803 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
804 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
805 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
806 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
807 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
808 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
809 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
810 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
811 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
812 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
813 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
814 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
815 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
816 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
817 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
818 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
819 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
820 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
821 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
822 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
823 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
824 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
825 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
826 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
827 /* UniMAC RUNT counters */
828 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
829 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
830 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
831 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
832 /* Misc UniMAC counters */
833 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
834 UMAC_RBUF_OVFL_CNT_V1),
835 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
836 UMAC_RBUF_ERR_CNT_V1),
837 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
838 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
839 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
840 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
849 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
851 static void bcmgenet_get_drvinfo(struct net_device *dev,
852 struct ethtool_drvinfo *info)
854 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
855 strlcpy(info->version, "v2.0", sizeof(info->version));
858 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
860 switch (string_set) {
862 return BCMGENET_STATS_LEN;
868 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
875 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
876 memcpy(data + i * ETH_GSTRING_LEN,
877 bcmgenet_gstrings_stats[i].stat_string,
884 static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
890 case UMAC_RBUF_OVFL_CNT_V1:
891 if (GENET_IS_V2(priv))
892 new_offset = RBUF_OVFL_CNT_V2;
894 new_offset = RBUF_OVFL_CNT_V3PLUS;
896 val = bcmgenet_rbuf_readl(priv, new_offset);
897 /* clear if overflowed */
899 bcmgenet_rbuf_writel(priv, 0, new_offset);
901 case UMAC_RBUF_ERR_CNT_V1:
902 if (GENET_IS_V2(priv))
903 new_offset = RBUF_ERR_CNT_V2;
905 new_offset = RBUF_ERR_CNT_V3PLUS;
907 val = bcmgenet_rbuf_readl(priv, new_offset);
908 /* clear if overflowed */
910 bcmgenet_rbuf_writel(priv, 0, new_offset);
913 val = bcmgenet_umac_readl(priv, offset);
914 /* clear if overflowed */
916 bcmgenet_umac_writel(priv, 0, offset);
923 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
927 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
928 const struct bcmgenet_stats *s;
933 s = &bcmgenet_gstrings_stats[i];
935 case BCMGENET_STAT_NETDEV:
936 case BCMGENET_STAT_SOFT:
938 case BCMGENET_STAT_RUNT:
939 offset += BCMGENET_STAT_OFFSET;
941 case BCMGENET_STAT_MIB_TX:
942 offset += BCMGENET_STAT_OFFSET;
944 case BCMGENET_STAT_MIB_RX:
945 val = bcmgenet_umac_readl(priv,
946 UMAC_MIB_START + j + offset);
947 offset = 0; /* Reset Offset */
949 case BCMGENET_STAT_MISC:
950 if (GENET_IS_V1(priv)) {
951 val = bcmgenet_umac_readl(priv, s->reg_offset);
952 /* clear if overflowed */
954 bcmgenet_umac_writel(priv, 0,
957 val = bcmgenet_update_stat_misc(priv,
964 p = (char *)priv + s->stat_offset;
969 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
970 struct ethtool_stats *stats,
973 struct bcmgenet_priv *priv = netdev_priv(dev);
976 if (netif_running(dev))
977 bcmgenet_update_mib_counters(priv);
979 dev->netdev_ops->ndo_get_stats(dev);
981 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
982 const struct bcmgenet_stats *s;
985 s = &bcmgenet_gstrings_stats[i];
986 if (s->type == BCMGENET_STAT_NETDEV)
987 p = (char *)&dev->stats;
991 if (sizeof(unsigned long) != sizeof(u32) &&
992 s->stat_sizeof == sizeof(unsigned long))
993 data[i] = *(unsigned long *)p;
999 static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
1001 struct bcmgenet_priv *priv = netdev_priv(dev);
1002 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
1005 if (enable && !priv->clk_eee_enabled) {
1006 clk_prepare_enable(priv->clk_eee);
1007 priv->clk_eee_enabled = true;
1010 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
1015 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
1017 /* Enable EEE and switch to a 27Mhz clock automatically */
1018 reg = bcmgenet_readl(priv->base + off);
1020 reg |= TBUF_EEE_EN | TBUF_PM_EN;
1022 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
1023 bcmgenet_writel(reg, priv->base + off);
1025 /* Do the same for thing for RBUF */
1026 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
1028 reg |= RBUF_EEE_EN | RBUF_PM_EN;
1030 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
1031 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
1033 if (!enable && priv->clk_eee_enabled) {
1034 clk_disable_unprepare(priv->clk_eee);
1035 priv->clk_eee_enabled = false;
1038 priv->eee.eee_enabled = enable;
1039 priv->eee.eee_active = enable;
1042 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
1044 struct bcmgenet_priv *priv = netdev_priv(dev);
1045 struct ethtool_eee *p = &priv->eee;
1047 if (GENET_IS_V1(priv))
1050 e->eee_enabled = p->eee_enabled;
1051 e->eee_active = p->eee_active;
1052 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
1054 return phy_ethtool_get_eee(priv->phydev, e);
1057 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
1059 struct bcmgenet_priv *priv = netdev_priv(dev);
1060 struct ethtool_eee *p = &priv->eee;
1063 if (GENET_IS_V1(priv))
1066 p->eee_enabled = e->eee_enabled;
1068 if (!p->eee_enabled) {
1069 bcmgenet_eee_enable_set(dev, false);
1071 ret = phy_init_eee(priv->phydev, 0);
1073 netif_err(priv, hw, dev, "EEE initialization failed\n");
1077 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
1078 bcmgenet_eee_enable_set(dev, true);
1081 return phy_ethtool_set_eee(priv->phydev, e);
1084 /* standard ethtool support functions. */
1085 static const struct ethtool_ops bcmgenet_ethtool_ops = {
1086 .begin = bcmgenet_begin,
1087 .complete = bcmgenet_complete,
1088 .get_strings = bcmgenet_get_strings,
1089 .get_sset_count = bcmgenet_get_sset_count,
1090 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
1091 .get_drvinfo = bcmgenet_get_drvinfo,
1092 .get_link = ethtool_op_get_link,
1093 .get_msglevel = bcmgenet_get_msglevel,
1094 .set_msglevel = bcmgenet_set_msglevel,
1095 .get_wol = bcmgenet_get_wol,
1096 .set_wol = bcmgenet_set_wol,
1097 .get_eee = bcmgenet_get_eee,
1098 .set_eee = bcmgenet_set_eee,
1099 .nway_reset = phy_ethtool_nway_reset,
1100 .get_coalesce = bcmgenet_get_coalesce,
1101 .set_coalesce = bcmgenet_set_coalesce,
1102 .get_link_ksettings = bcmgenet_get_link_ksettings,
1103 .set_link_ksettings = bcmgenet_set_link_ksettings,
1106 /* Power down the unimac, based on mode. */
1107 static int bcmgenet_power_down(struct bcmgenet_priv *priv,
1108 enum bcmgenet_power_mode mode)
1114 case GENET_POWER_CABLE_SENSE:
1115 phy_detach(priv->phydev);
1118 case GENET_POWER_WOL_MAGIC:
1119 ret = bcmgenet_wol_power_down_cfg(priv, mode);
1122 case GENET_POWER_PASSIVE:
1123 /* Power down LED */
1124 if (priv->hw_params->flags & GENET_HAS_EXT) {
1125 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1126 if (GENET_IS_V5(priv))
1127 reg |= EXT_PWR_DOWN_PHY_EN |
1128 EXT_PWR_DOWN_PHY_RD |
1129 EXT_PWR_DOWN_PHY_SD |
1130 EXT_PWR_DOWN_PHY_RX |
1131 EXT_PWR_DOWN_PHY_TX |
1134 reg |= EXT_PWR_DOWN_PHY;
1136 reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1137 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1139 bcmgenet_phy_power_set(priv->dev, false);
1149 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1150 enum bcmgenet_power_mode mode)
1154 if (!(priv->hw_params->flags & GENET_HAS_EXT))
1157 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1160 case GENET_POWER_PASSIVE:
1161 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
1162 EXT_ENERGY_DET_MASK);
1163 if (GENET_IS_V5(priv)) {
1164 reg &= ~(EXT_PWR_DOWN_PHY_EN |
1165 EXT_PWR_DOWN_PHY_RD |
1166 EXT_PWR_DOWN_PHY_SD |
1167 EXT_PWR_DOWN_PHY_RX |
1168 EXT_PWR_DOWN_PHY_TX |
1170 reg |= EXT_PHY_RESET;
1171 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1174 reg &= ~EXT_PHY_RESET;
1176 reg &= ~EXT_PWR_DOWN_PHY;
1177 reg |= EXT_PWR_DN_EN_LD;
1179 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1180 bcmgenet_phy_power_set(priv->dev, true);
1181 bcmgenet_mii_reset(priv->dev);
1184 case GENET_POWER_CABLE_SENSE:
1186 if (!GENET_IS_V5(priv)) {
1187 reg |= EXT_PWR_DN_EN_LD;
1188 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1191 case GENET_POWER_WOL_MAGIC:
1192 bcmgenet_wol_power_up_cfg(priv, mode);
1199 /* ioctl handle special commands that are not present in ethtool. */
1200 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1202 struct bcmgenet_priv *priv = netdev_priv(dev);
1204 if (!netif_running(dev))
1210 return phy_mii_ioctl(priv->phydev, rq, cmd);
1213 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1214 struct bcmgenet_tx_ring *ring)
1216 struct enet_cb *tx_cb_ptr;
1218 tx_cb_ptr = ring->cbs;
1219 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1221 /* Advancing local write pointer */
1222 if (ring->write_ptr == ring->end_ptr)
1223 ring->write_ptr = ring->cb_ptr;
1230 static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
1231 struct bcmgenet_tx_ring *ring)
1233 struct enet_cb *tx_cb_ptr;
1235 tx_cb_ptr = ring->cbs;
1236 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1238 /* Rewinding local write pointer */
1239 if (ring->write_ptr == ring->cb_ptr)
1240 ring->write_ptr = ring->end_ptr;
1247 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
1249 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1250 INTRL2_CPU_MASK_SET);
1253 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
1255 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1256 INTRL2_CPU_MASK_CLEAR);
1259 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1261 bcmgenet_intrl2_1_writel(ring->priv,
1262 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1263 INTRL2_CPU_MASK_SET);
1266 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1268 bcmgenet_intrl2_1_writel(ring->priv,
1269 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1270 INTRL2_CPU_MASK_CLEAR);
1273 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
1275 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1276 INTRL2_CPU_MASK_SET);
1279 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1281 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1282 INTRL2_CPU_MASK_CLEAR);
1285 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1287 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1288 INTRL2_CPU_MASK_CLEAR);
1291 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1293 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1294 INTRL2_CPU_MASK_SET);
1297 /* Simple helper to free a transmit control block's resources
1298 * Returns an skb when the last transmit control block associated with the
1299 * skb is freed. The skb should be freed by the caller if necessary.
1301 static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
1304 struct sk_buff *skb;
1310 if (cb == GENET_CB(skb)->first_cb)
1311 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1312 dma_unmap_len(cb, dma_len),
1315 dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
1316 dma_unmap_len(cb, dma_len),
1318 dma_unmap_addr_set(cb, dma_addr, 0);
1320 if (cb == GENET_CB(skb)->last_cb)
1323 } else if (dma_unmap_addr(cb, dma_addr)) {
1325 dma_unmap_addr(cb, dma_addr),
1326 dma_unmap_len(cb, dma_len),
1328 dma_unmap_addr_set(cb, dma_addr, 0);
1334 /* Simple helper to free a receive control block's resources */
1335 static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
1338 struct sk_buff *skb;
1343 if (dma_unmap_addr(cb, dma_addr)) {
1344 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1345 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
1346 dma_unmap_addr_set(cb, dma_addr, 0);
1352 /* Unlocked version of the reclaim routine */
1353 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1354 struct bcmgenet_tx_ring *ring)
1356 struct bcmgenet_priv *priv = netdev_priv(dev);
1357 unsigned int txbds_processed = 0;
1358 unsigned int bytes_compl = 0;
1359 unsigned int pkts_compl = 0;
1360 unsigned int txbds_ready;
1361 unsigned int c_index;
1362 struct sk_buff *skb;
1364 /* Clear status before servicing to reduce spurious interrupts */
1365 if (ring->index == DESC_INDEX)
1366 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
1369 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
1372 /* Compute how many buffers are transmitted since last xmit call */
1373 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
1375 txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
1377 netif_dbg(priv, tx_done, dev,
1378 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1379 __func__, ring->index, ring->c_index, c_index, txbds_ready);
1381 /* Reclaim transmitted buffers */
1382 while (txbds_processed < txbds_ready) {
1383 skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
1384 &priv->tx_cbs[ring->clean_ptr]);
1387 bytes_compl += GENET_CB(skb)->bytes_sent;
1388 dev_consume_skb_any(skb);
1392 if (likely(ring->clean_ptr < ring->end_ptr))
1395 ring->clean_ptr = ring->cb_ptr;
1398 ring->free_bds += txbds_processed;
1399 ring->c_index = c_index;
1401 ring->packets += pkts_compl;
1402 ring->bytes += bytes_compl;
1404 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
1405 pkts_compl, bytes_compl);
1407 return txbds_processed;
1410 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1411 struct bcmgenet_tx_ring *ring)
1413 unsigned int released;
1414 unsigned long flags;
1416 spin_lock_irqsave(&ring->lock, flags);
1417 released = __bcmgenet_tx_reclaim(dev, ring);
1418 spin_unlock_irqrestore(&ring->lock, flags);
1423 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1425 struct bcmgenet_tx_ring *ring =
1426 container_of(napi, struct bcmgenet_tx_ring, napi);
1427 unsigned int work_done = 0;
1428 struct netdev_queue *txq;
1429 unsigned long flags;
1431 spin_lock_irqsave(&ring->lock, flags);
1432 work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
1433 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1434 txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
1435 netif_tx_wake_queue(txq);
1437 spin_unlock_irqrestore(&ring->lock, flags);
1439 if (work_done == 0) {
1440 napi_complete(napi);
1441 ring->int_enable(ring);
1449 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1451 struct bcmgenet_priv *priv = netdev_priv(dev);
1454 if (netif_is_multiqueue(dev)) {
1455 for (i = 0; i < priv->hw_params->tx_queues; i++)
1456 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1459 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1462 /* Reallocate the SKB to put enough headroom in front of it and insert
1463 * the transmit checksum offsets in the descriptors
1465 static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1466 struct sk_buff *skb)
1468 struct status_64 *status = NULL;
1469 struct sk_buff *new_skb;
1475 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1476 /* If 64 byte status block enabled, must make sure skb has
1477 * enough headroom for us to insert 64B status block.
1479 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1482 dev->stats.tx_dropped++;
1488 skb_push(skb, sizeof(*status));
1489 status = (struct status_64 *)skb->data;
1491 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1492 ip_ver = htons(skb->protocol);
1495 ip_proto = ip_hdr(skb)->protocol;
1498 ip_proto = ipv6_hdr(skb)->nexthdr;
1504 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1505 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1506 (offset + skb->csum_offset);
1508 /* Set the length valid bit for TCP and UDP and just set
1509 * the special UDP flag for IPv4, else just set to 0.
1511 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1512 tx_csum_info |= STATUS_TX_CSUM_LV;
1513 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1514 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1519 status->tx_csum_info = tx_csum_info;
1525 static void bcmgenet_hide_tsb(struct sk_buff *skb)
1527 __skb_pull(skb, sizeof(struct status_64));
1530 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1532 struct bcmgenet_priv *priv = netdev_priv(dev);
1533 struct device *kdev = &priv->pdev->dev;
1534 struct bcmgenet_tx_ring *ring = NULL;
1535 struct enet_cb *tx_cb_ptr;
1536 struct netdev_queue *txq;
1537 unsigned long flags = 0;
1538 int nr_frags, index;
1546 index = skb_get_queue_mapping(skb);
1547 /* Mapping strategy:
1548 * queue_mapping = 0, unclassified, packet xmited through ring16
1549 * queue_mapping = 1, goes to ring 0. (highest priority queue
1550 * queue_mapping = 2, goes to ring 1.
1551 * queue_mapping = 3, goes to ring 2.
1552 * queue_mapping = 4, goes to ring 3.
1559 ring = &priv->tx_rings[index];
1560 txq = netdev_get_tx_queue(dev, ring->queue);
1562 nr_frags = skb_shinfo(skb)->nr_frags;
1564 spin_lock_irqsave(&ring->lock, flags);
1565 if (ring->free_bds <= (nr_frags + 1)) {
1566 if (!netif_tx_queue_stopped(txq)) {
1567 netif_tx_stop_queue(txq);
1569 "%s: tx ring %d full when queue %d awake\n",
1570 __func__, index, ring->queue);
1572 ret = NETDEV_TX_BUSY;
1576 /* Retain how many bytes will be sent on the wire, without TSB inserted
1577 * by transmit checksum offload
1579 GENET_CB(skb)->bytes_sent = skb->len;
1581 /* set the SKB transmit checksum */
1582 if (priv->desc_64b_en) {
1583 skb = bcmgenet_put_tx_csum(dev, skb);
1590 for (i = 0; i <= nr_frags; i++) {
1591 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1593 if (unlikely(!tx_cb_ptr))
1597 /* Transmit single SKB or head of fragment list */
1598 GENET_CB(skb)->first_cb = tx_cb_ptr;
1599 size = skb_headlen(skb);
1600 mapping = dma_map_single(kdev, skb->data, size,
1604 frag = &skb_shinfo(skb)->frags[i - 1];
1605 size = skb_frag_size(frag);
1606 mapping = skb_frag_dma_map(kdev, frag, 0, size,
1610 ret = dma_mapping_error(kdev, mapping);
1612 priv->mib.tx_dma_failed++;
1613 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1615 goto out_unmap_frags;
1617 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1618 dma_unmap_len_set(tx_cb_ptr, dma_len, size);
1620 tx_cb_ptr->skb = skb;
1622 len_stat = (size << DMA_BUFLENGTH_SHIFT) |
1623 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
1625 /* Note: if we ever change from DMA_TX_APPEND_CRC below we
1626 * will need to restore software padding of "runt" packets
1629 len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
1630 if (skb->ip_summed == CHECKSUM_PARTIAL)
1631 len_stat |= DMA_TX_DO_CSUM;
1634 len_stat |= DMA_EOP;
1636 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
1639 GENET_CB(skb)->last_cb = tx_cb_ptr;
1641 bcmgenet_hide_tsb(skb);
1642 skb_tx_timestamp(skb);
1644 /* Decrement total BD count and advance our write pointer */
1645 ring->free_bds -= nr_frags + 1;
1646 ring->prod_index += nr_frags + 1;
1647 ring->prod_index &= DMA_P_INDEX_MASK;
1649 netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
1651 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1652 netif_tx_stop_queue(txq);
1654 if (!skb->xmit_more || netif_xmit_stopped(txq))
1655 /* Packets are ready, update producer index */
1656 bcmgenet_tdma_ring_writel(priv, ring->index,
1657 ring->prod_index, TDMA_PROD_INDEX);
1659 spin_unlock_irqrestore(&ring->lock, flags);
1664 /* Back up for failed control block mapping */
1665 bcmgenet_put_txcb(priv, ring);
1667 /* Unmap successfully mapped control blocks */
1669 tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
1670 bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
1677 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1680 struct device *kdev = &priv->pdev->dev;
1681 struct sk_buff *skb;
1682 struct sk_buff *rx_skb;
1685 /* Allocate a new Rx skb */
1686 skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
1687 GFP_ATOMIC | __GFP_NOWARN);
1689 priv->mib.alloc_rx_buff_failed++;
1690 netif_err(priv, rx_err, priv->dev,
1691 "%s: Rx skb allocation failed\n", __func__);
1695 /* DMA-map the new Rx skb */
1696 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
1698 if (dma_mapping_error(kdev, mapping)) {
1699 priv->mib.rx_dma_failed++;
1700 dev_kfree_skb_any(skb);
1701 netif_err(priv, rx_err, priv->dev,
1702 "%s: Rx skb DMA mapping failed\n", __func__);
1706 /* Grab the current Rx skb from the ring and DMA-unmap it */
1707 rx_skb = bcmgenet_free_rx_cb(kdev, cb);
1709 /* Put the new Rx skb on the ring */
1711 dma_unmap_addr_set(cb, dma_addr, mapping);
1712 dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
1713 dmadesc_set_addr(priv, cb->bd_addr, mapping);
1715 /* Return the current Rx skb to caller */
1719 /* bcmgenet_desc_rx - descriptor based rx process.
1720 * this could be called from bottom half, or from NAPI polling method.
1722 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1723 unsigned int budget)
1725 struct bcmgenet_priv *priv = ring->priv;
1726 struct net_device *dev = priv->dev;
1728 struct sk_buff *skb;
1729 u32 dma_length_status;
1730 unsigned long dma_flag;
1732 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1733 unsigned int p_index, mask;
1734 unsigned int discards;
1735 unsigned int chksum_ok = 0;
1737 /* Clear status before servicing to reduce spurious interrupts */
1738 if (ring->index == DESC_INDEX) {
1739 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
1742 mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
1743 bcmgenet_intrl2_1_writel(priv,
1748 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
1750 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1751 DMA_P_INDEX_DISCARD_CNT_MASK;
1752 if (discards > ring->old_discards) {
1753 discards = discards - ring->old_discards;
1754 ring->errors += discards;
1755 ring->old_discards += discards;
1757 /* Clear HW register when we reach 75% of maximum 0xFFFF */
1758 if (ring->old_discards >= 0xC000) {
1759 ring->old_discards = 0;
1760 bcmgenet_rdma_ring_writel(priv, ring->index, 0,
1765 p_index &= DMA_P_INDEX_MASK;
1766 rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
1768 netif_dbg(priv, rx_status, dev,
1769 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1771 while ((rxpktprocessed < rxpkttoprocess) &&
1772 (rxpktprocessed < budget)) {
1773 cb = &priv->rx_cbs[ring->read_ptr];
1774 skb = bcmgenet_rx_refill(priv, cb);
1776 if (unlikely(!skb)) {
1781 if (!priv->desc_64b_en) {
1783 dmadesc_get_length_status(priv, cb->bd_addr);
1785 struct status_64 *status;
1787 status = (struct status_64 *)skb->data;
1788 dma_length_status = status->length_status;
1791 /* DMA flags and length are still valid no matter how
1792 * we got the Receive Status Vector (64B RSB or register)
1794 dma_flag = dma_length_status & 0xffff;
1795 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1797 netif_dbg(priv, rx_status, dev,
1798 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1799 __func__, p_index, ring->c_index,
1800 ring->read_ptr, dma_length_status);
1802 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1803 netif_err(priv, rx_status, dev,
1804 "dropping fragmented packet!\n");
1806 dev_kfree_skb_any(skb);
1811 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1816 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1817 (unsigned int)dma_flag);
1818 if (dma_flag & DMA_RX_CRC_ERROR)
1819 dev->stats.rx_crc_errors++;
1820 if (dma_flag & DMA_RX_OV)
1821 dev->stats.rx_over_errors++;
1822 if (dma_flag & DMA_RX_NO)
1823 dev->stats.rx_frame_errors++;
1824 if (dma_flag & DMA_RX_LG)
1825 dev->stats.rx_length_errors++;
1826 dev->stats.rx_errors++;
1827 dev_kfree_skb_any(skb);
1829 } /* error packet */
1831 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1832 priv->desc_rxchk_en;
1835 if (priv->desc_64b_en) {
1840 if (likely(chksum_ok))
1841 skb->ip_summed = CHECKSUM_UNNECESSARY;
1843 /* remove hardware 2bytes added for IP alignment */
1847 if (priv->crc_fwd_en) {
1848 skb_trim(skb, len - ETH_FCS_LEN);
1852 /*Finish setting up the received SKB and send it to the kernel*/
1853 skb->protocol = eth_type_trans(skb, priv->dev);
1856 if (dma_flag & DMA_RX_MULT)
1857 dev->stats.multicast++;
1860 napi_gro_receive(&ring->napi, skb);
1861 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1865 if (likely(ring->read_ptr < ring->end_ptr))
1868 ring->read_ptr = ring->cb_ptr;
1870 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
1871 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
1874 return rxpktprocessed;
1877 /* Rx NAPI polling method */
1878 static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
1880 struct bcmgenet_rx_ring *ring = container_of(napi,
1881 struct bcmgenet_rx_ring, napi);
1882 unsigned int work_done;
1884 work_done = bcmgenet_desc_rx(ring, budget);
1886 if (work_done < budget) {
1887 napi_complete_done(napi, work_done);
1888 ring->int_enable(ring);
1894 /* Assign skb to RX DMA descriptor. */
1895 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1896 struct bcmgenet_rx_ring *ring)
1899 struct sk_buff *skb;
1902 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
1904 /* loop here for each buffer needing assign */
1905 for (i = 0; i < ring->size; i++) {
1907 skb = bcmgenet_rx_refill(priv, cb);
1909 dev_consume_skb_any(skb);
1917 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1919 struct sk_buff *skb;
1923 for (i = 0; i < priv->num_rx_bds; i++) {
1924 cb = &priv->rx_cbs[i];
1926 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
1928 dev_consume_skb_any(skb);
1932 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1936 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1941 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1943 /* UniMAC stops on a packet boundary, wait for a full-size packet
1947 usleep_range(1000, 2000);
1950 static int reset_umac(struct bcmgenet_priv *priv)
1952 struct device *kdev = &priv->pdev->dev;
1953 unsigned int timeout = 0;
1956 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1957 bcmgenet_rbuf_ctrl_set(priv, 0);
1960 /* disable MAC while updating its registers */
1961 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1963 /* issue soft reset, wait for it to complete */
1964 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1965 while (timeout++ < 1000) {
1966 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1967 if (!(reg & CMD_SW_RESET))
1973 if (timeout == 1000) {
1975 "timeout waiting for MAC to come out of reset\n");
1982 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1984 /* Mask all interrupts.*/
1985 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1986 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1987 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1988 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1991 static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
1993 u32 int0_enable = 0;
1995 /* Monitor cable plug/unplugged event for internal PHY, external PHY
1998 if (priv->internal_phy) {
1999 int0_enable |= UMAC_IRQ_LINK_EVENT;
2000 if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
2001 int0_enable |= UMAC_IRQ_PHY_DET_R;
2002 } else if (priv->ext_phy) {
2003 int0_enable |= UMAC_IRQ_LINK_EVENT;
2004 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2005 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
2006 int0_enable |= UMAC_IRQ_LINK_EVENT;
2008 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2011 static int init_umac(struct bcmgenet_priv *priv)
2013 struct device *kdev = &priv->pdev->dev;
2016 u32 int0_enable = 0;
2018 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
2020 ret = reset_umac(priv);
2024 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
2025 /* clear tx/rx counter */
2026 bcmgenet_umac_writel(priv,
2027 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
2029 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
2031 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2033 /* init rx registers, enable ip header optimization */
2034 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
2035 reg |= RBUF_ALIGN_2B;
2036 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
2038 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
2039 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
2041 bcmgenet_intr_disable(priv);
2043 /* Configure backpressure vectors for MoCA */
2044 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2045 reg = bcmgenet_bp_mc_get(priv);
2046 reg |= BIT(priv->hw_params->bp_in_en_shift);
2048 /* bp_mask: back pressure mask */
2049 if (netif_is_multiqueue(priv->dev))
2050 reg |= priv->hw_params->bp_in_mask;
2052 reg &= ~priv->hw_params->bp_in_mask;
2053 bcmgenet_bp_mc_set(priv, reg);
2056 /* Enable MDIO interrupts on GENET v3+ */
2057 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
2058 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2060 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2062 dev_dbg(kdev, "done init umac\n");
2067 /* Initialize a Tx ring along with corresponding hardware registers */
2068 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
2069 unsigned int index, unsigned int size,
2070 unsigned int start_ptr, unsigned int end_ptr)
2072 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
2073 u32 words_per_bd = WORDS_PER_BD(priv);
2074 u32 flow_period_val = 0;
2076 spin_lock_init(&ring->lock);
2078 ring->index = index;
2079 if (index == DESC_INDEX) {
2081 ring->int_enable = bcmgenet_tx_ring16_int_enable;
2082 ring->int_disable = bcmgenet_tx_ring16_int_disable;
2084 ring->queue = index + 1;
2085 ring->int_enable = bcmgenet_tx_ring_int_enable;
2086 ring->int_disable = bcmgenet_tx_ring_int_disable;
2088 ring->cbs = priv->tx_cbs + start_ptr;
2090 ring->clean_ptr = start_ptr;
2092 ring->free_bds = size;
2093 ring->write_ptr = start_ptr;
2094 ring->cb_ptr = start_ptr;
2095 ring->end_ptr = end_ptr - 1;
2096 ring->prod_index = 0;
2098 /* Set flow period for ring != 16 */
2099 if (index != DESC_INDEX)
2100 flow_period_val = ENET_MAX_MTU_SIZE << 16;
2102 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
2103 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
2104 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2105 /* Disable rate control for now */
2106 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
2108 bcmgenet_tdma_ring_writel(priv, index,
2109 ((size << DMA_RING_SIZE_SHIFT) |
2110 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2112 /* Set start and end address, read and write pointers */
2113 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2115 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2117 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2119 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2123 /* Initialize a RDMA ring */
2124 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
2125 unsigned int index, unsigned int size,
2126 unsigned int start_ptr, unsigned int end_ptr)
2128 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
2129 u32 words_per_bd = WORDS_PER_BD(priv);
2133 ring->index = index;
2134 if (index == DESC_INDEX) {
2135 ring->int_enable = bcmgenet_rx_ring16_int_enable;
2136 ring->int_disable = bcmgenet_rx_ring16_int_disable;
2138 ring->int_enable = bcmgenet_rx_ring_int_enable;
2139 ring->int_disable = bcmgenet_rx_ring_int_disable;
2141 ring->cbs = priv->rx_cbs + start_ptr;
2144 ring->read_ptr = start_ptr;
2145 ring->cb_ptr = start_ptr;
2146 ring->end_ptr = end_ptr - 1;
2148 ret = bcmgenet_alloc_rx_buffers(priv, ring);
2152 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
2153 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
2154 bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2155 bcmgenet_rdma_ring_writel(priv, index,
2156 ((size << DMA_RING_SIZE_SHIFT) |
2157 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2158 bcmgenet_rdma_ring_writel(priv, index,
2159 (DMA_FC_THRESH_LO <<
2160 DMA_XOFF_THRESHOLD_SHIFT) |
2161 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
2163 /* Set start and end address, read and write pointers */
2164 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2166 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2168 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2170 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2176 static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
2179 struct bcmgenet_tx_ring *ring;
2181 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2182 ring = &priv->tx_rings[i];
2183 netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
2186 ring = &priv->tx_rings[DESC_INDEX];
2187 netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
2190 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2193 u32 int0_enable = UMAC_IRQ_TXDMA_DONE;
2194 u32 int1_enable = 0;
2195 struct bcmgenet_tx_ring *ring;
2197 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2198 ring = &priv->tx_rings[i];
2199 napi_enable(&ring->napi);
2200 int1_enable |= (1 << i);
2203 ring = &priv->tx_rings[DESC_INDEX];
2204 napi_enable(&ring->napi);
2206 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2207 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
2210 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2213 u32 int0_disable = UMAC_IRQ_TXDMA_DONE;
2214 u32 int1_disable = 0xffff;
2215 struct bcmgenet_tx_ring *ring;
2217 bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
2218 bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
2220 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2221 ring = &priv->tx_rings[i];
2222 napi_disable(&ring->napi);
2225 ring = &priv->tx_rings[DESC_INDEX];
2226 napi_disable(&ring->napi);
2229 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
2232 struct bcmgenet_tx_ring *ring;
2234 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2235 ring = &priv->tx_rings[i];
2236 netif_napi_del(&ring->napi);
2239 ring = &priv->tx_rings[DESC_INDEX];
2240 netif_napi_del(&ring->napi);
2243 /* Initialize Tx queues
2245 * Queues 0-3 are priority-based, each one has 32 descriptors,
2246 * with queue 0 being the highest priority queue.
2248 * Queue 16 is the default Tx queue with
2249 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2251 * The transmit control block pool is then partitioned as follows:
2252 * - Tx queue 0 uses tx_cbs[0..31]
2253 * - Tx queue 1 uses tx_cbs[32..63]
2254 * - Tx queue 2 uses tx_cbs[64..95]
2255 * - Tx queue 3 uses tx_cbs[96..127]
2256 * - Tx queue 16 uses tx_cbs[128..255]
2258 static void bcmgenet_init_tx_queues(struct net_device *dev)
2260 struct bcmgenet_priv *priv = netdev_priv(dev);
2262 u32 dma_ctrl, ring_cfg;
2263 u32 dma_priority[3] = {0, 0, 0};
2265 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
2266 dma_enable = dma_ctrl & DMA_EN;
2267 dma_ctrl &= ~DMA_EN;
2268 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2273 /* Enable strict priority arbiter mode */
2274 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
2276 /* Initialize Tx priority queues */
2277 for (i = 0; i < priv->hw_params->tx_queues; i++) {
2278 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
2279 i * priv->hw_params->tx_bds_per_q,
2280 (i + 1) * priv->hw_params->tx_bds_per_q);
2281 ring_cfg |= (1 << i);
2282 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2283 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
2284 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
2287 /* Initialize Tx default queue 16 */
2288 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
2289 priv->hw_params->tx_queues *
2290 priv->hw_params->tx_bds_per_q,
2292 ring_cfg |= (1 << DESC_INDEX);
2293 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2294 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
2295 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
2296 DMA_PRIO_REG_SHIFT(DESC_INDEX));
2298 /* Set Tx queue priorities */
2299 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2300 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2301 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2303 /* Initialize Tx NAPI */
2304 bcmgenet_init_tx_napi(priv);
2306 /* Enable Tx queues */
2307 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
2312 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2315 static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
2318 struct bcmgenet_rx_ring *ring;
2320 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2321 ring = &priv->rx_rings[i];
2322 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2325 ring = &priv->rx_rings[DESC_INDEX];
2326 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2329 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2332 u32 int0_enable = UMAC_IRQ_RXDMA_DONE;
2333 u32 int1_enable = 0;
2334 struct bcmgenet_rx_ring *ring;
2336 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2337 ring = &priv->rx_rings[i];
2338 napi_enable(&ring->napi);
2339 int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
2342 ring = &priv->rx_rings[DESC_INDEX];
2343 napi_enable(&ring->napi);
2345 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2346 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
2349 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2352 u32 int0_disable = UMAC_IRQ_RXDMA_DONE;
2353 u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT;
2354 struct bcmgenet_rx_ring *ring;
2356 bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
2357 bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
2359 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2360 ring = &priv->rx_rings[i];
2361 napi_disable(&ring->napi);
2364 ring = &priv->rx_rings[DESC_INDEX];
2365 napi_disable(&ring->napi);
2368 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2371 struct bcmgenet_rx_ring *ring;
2373 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2374 ring = &priv->rx_rings[i];
2375 netif_napi_del(&ring->napi);
2378 ring = &priv->rx_rings[DESC_INDEX];
2379 netif_napi_del(&ring->napi);
2382 /* Initialize Rx queues
2384 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2385 * used to direct traffic to these queues.
2387 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2389 static int bcmgenet_init_rx_queues(struct net_device *dev)
2391 struct bcmgenet_priv *priv = netdev_priv(dev);
2398 dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2399 dma_enable = dma_ctrl & DMA_EN;
2400 dma_ctrl &= ~DMA_EN;
2401 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2406 /* Initialize Rx priority queues */
2407 for (i = 0; i < priv->hw_params->rx_queues; i++) {
2408 ret = bcmgenet_init_rx_ring(priv, i,
2409 priv->hw_params->rx_bds_per_q,
2410 i * priv->hw_params->rx_bds_per_q,
2412 priv->hw_params->rx_bds_per_q);
2416 ring_cfg |= (1 << i);
2417 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2420 /* Initialize Rx default queue 16 */
2421 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2422 priv->hw_params->rx_queues *
2423 priv->hw_params->rx_bds_per_q,
2428 ring_cfg |= (1 << DESC_INDEX);
2429 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2431 /* Initialize Rx NAPI */
2432 bcmgenet_init_rx_napi(priv);
2435 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2437 /* Configure ring as descriptor ring and re-enable DMA if enabled */
2440 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2445 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2453 /* Disable TDMA to stop add more frames in TX DMA */
2454 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2456 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2458 /* Check TDMA status register to confirm TDMA is disabled */
2459 while (timeout++ < DMA_TIMEOUT_VAL) {
2460 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2461 if (reg & DMA_DISABLED)
2467 if (timeout == DMA_TIMEOUT_VAL) {
2468 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2472 /* Wait 10ms for packet drain in both tx and rx dma */
2473 usleep_range(10000, 20000);
2476 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2478 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2481 /* Check RDMA status register to confirm RDMA is disabled */
2482 while (timeout++ < DMA_TIMEOUT_VAL) {
2483 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2484 if (reg & DMA_DISABLED)
2490 if (timeout == DMA_TIMEOUT_VAL) {
2491 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2496 for (i = 0; i < priv->hw_params->rx_queues; i++)
2497 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2498 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2500 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2503 for (i = 0; i < priv->hw_params->tx_queues; i++)
2504 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2505 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2507 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2512 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2514 struct netdev_queue *txq;
2515 struct sk_buff *skb;
2519 bcmgenet_fini_rx_napi(priv);
2520 bcmgenet_fini_tx_napi(priv);
2523 bcmgenet_dma_teardown(priv);
2525 for (i = 0; i < priv->num_tx_bds; i++) {
2526 cb = priv->tx_cbs + i;
2527 skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
2532 for (i = 0; i < priv->hw_params->tx_queues; i++) {
2533 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
2534 netdev_tx_reset_queue(txq);
2537 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
2538 netdev_tx_reset_queue(txq);
2540 bcmgenet_free_rx_buffers(priv);
2541 kfree(priv->rx_cbs);
2542 kfree(priv->tx_cbs);
2545 /* init_edma: Initialize DMA control register */
2546 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2552 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2554 /* Initialize common Rx ring structures */
2555 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
2556 priv->num_rx_bds = TOTAL_DESC;
2557 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
2562 for (i = 0; i < priv->num_rx_bds; i++) {
2563 cb = priv->rx_cbs + i;
2564 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
2567 /* Initialize common TX ring structures */
2568 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
2569 priv->num_tx_bds = TOTAL_DESC;
2570 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
2572 if (!priv->tx_cbs) {
2573 kfree(priv->rx_cbs);
2577 for (i = 0; i < priv->num_tx_bds; i++) {
2578 cb = priv->tx_cbs + i;
2579 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2583 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2585 /* Initialize Rx queues */
2586 ret = bcmgenet_init_rx_queues(priv->dev);
2588 netdev_err(priv->dev, "failed to initialize Rx queues\n");
2589 bcmgenet_free_rx_buffers(priv);
2590 kfree(priv->rx_cbs);
2591 kfree(priv->tx_cbs);
2596 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2598 /* Initialize Tx queues */
2599 bcmgenet_init_tx_queues(priv->dev);
2604 /* Interrupt bottom half */
2605 static void bcmgenet_irq_task(struct work_struct *work)
2607 unsigned long flags;
2608 unsigned int status;
2609 struct bcmgenet_priv *priv = container_of(
2610 work, struct bcmgenet_priv, bcmgenet_irq_work);
2612 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2614 spin_lock_irqsave(&priv->lock, flags);
2615 status = priv->irq0_stat;
2616 priv->irq0_stat = 0;
2617 spin_unlock_irqrestore(&priv->lock, flags);
2619 if (status & UMAC_IRQ_MPD_R) {
2620 netif_dbg(priv, wol, priv->dev,
2621 "magic packet detected, waking up\n");
2622 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2625 if (status & UMAC_IRQ_PHY_DET_R &&
2626 priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
2627 phy_init_hw(priv->dev->phydev);
2628 genphy_config_aneg(priv->dev->phydev);
2631 /* Link UP/DOWN event */
2632 if (status & UMAC_IRQ_LINK_EVENT)
2633 phy_mac_interrupt(priv->phydev,
2634 !!(status & UMAC_IRQ_LINK_UP));
2637 /* bcmgenet_isr1: handle Rx and Tx priority queues */
2638 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2640 struct bcmgenet_priv *priv = dev_id;
2641 struct bcmgenet_rx_ring *rx_ring;
2642 struct bcmgenet_tx_ring *tx_ring;
2643 unsigned int index, status;
2645 /* Read irq status */
2646 status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2647 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2649 /* clear interrupts */
2650 bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
2652 netif_dbg(priv, intr, priv->dev,
2653 "%s: IRQ=0x%x\n", __func__, status);
2655 /* Check Rx priority queue interrupts */
2656 for (index = 0; index < priv->hw_params->rx_queues; index++) {
2657 if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
2660 rx_ring = &priv->rx_rings[index];
2662 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2663 rx_ring->int_disable(rx_ring);
2664 __napi_schedule_irqoff(&rx_ring->napi);
2668 /* Check Tx priority queue interrupts */
2669 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2670 if (!(status & BIT(index)))
2673 tx_ring = &priv->tx_rings[index];
2675 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2676 tx_ring->int_disable(tx_ring);
2677 __napi_schedule_irqoff(&tx_ring->napi);
2684 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
2685 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2687 struct bcmgenet_priv *priv = dev_id;
2688 struct bcmgenet_rx_ring *rx_ring;
2689 struct bcmgenet_tx_ring *tx_ring;
2690 unsigned int status;
2691 unsigned long flags;
2693 /* Read irq status */
2694 status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2695 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2697 /* clear interrupts */
2698 bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
2700 netif_dbg(priv, intr, priv->dev,
2701 "IRQ=0x%x\n", status);
2703 if (status & UMAC_IRQ_RXDMA_DONE) {
2704 rx_ring = &priv->rx_rings[DESC_INDEX];
2706 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2707 rx_ring->int_disable(rx_ring);
2708 __napi_schedule_irqoff(&rx_ring->napi);
2712 if (status & UMAC_IRQ_TXDMA_DONE) {
2713 tx_ring = &priv->tx_rings[DESC_INDEX];
2715 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2716 tx_ring->int_disable(tx_ring);
2717 __napi_schedule_irqoff(&tx_ring->napi);
2721 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2722 UMAC_IRQ_PHY_DET_F |
2723 UMAC_IRQ_LINK_EVENT |
2726 /* all other interested interrupts handled in bottom half */
2727 schedule_work(&priv->bcmgenet_irq_work);
2730 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2731 status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2735 /* all other interested interrupts handled in bottom half */
2736 status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_MPD_R | UMAC_IRQ_PHY_DET_R);
2738 /* Save irq status for bottom-half processing. */
2739 spin_lock_irqsave(&priv->lock, flags);
2740 priv->irq0_stat |= status;
2741 spin_unlock_irqrestore(&priv->lock, flags);
2743 schedule_work(&priv->bcmgenet_irq_work);
2749 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2751 struct bcmgenet_priv *priv = dev_id;
2753 pm_wakeup_event(&priv->pdev->dev, 0);
2758 #ifdef CONFIG_NET_POLL_CONTROLLER
2759 static void bcmgenet_poll_controller(struct net_device *dev)
2761 struct bcmgenet_priv *priv = netdev_priv(dev);
2763 /* Invoke the main RX/TX interrupt handler */
2764 disable_irq(priv->irq0);
2765 bcmgenet_isr0(priv->irq0, priv);
2766 enable_irq(priv->irq0);
2768 /* And the interrupt handler for RX/TX priority queues */
2769 disable_irq(priv->irq1);
2770 bcmgenet_isr1(priv->irq1, priv);
2771 enable_irq(priv->irq1);
2775 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2779 reg = bcmgenet_rbuf_ctrl_get(priv);
2781 bcmgenet_rbuf_ctrl_set(priv, reg);
2785 bcmgenet_rbuf_ctrl_set(priv, reg);
2789 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2790 unsigned char *addr)
2792 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2793 (addr[2] << 8) | addr[3], UMAC_MAC0);
2794 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2797 /* Returns a reusable dma control register value */
2798 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2805 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2806 for (i = 0; i < priv->hw_params->tx_queues; i++)
2807 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2808 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2810 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2812 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2813 for (i = 0; i < priv->hw_params->rx_queues; i++)
2814 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2815 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2817 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2819 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2821 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2826 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2830 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2832 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2834 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2836 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2839 /* bcmgenet_hfb_clear
2841 * Clear Hardware Filter Block and disable all filtering.
2843 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
2847 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
2848 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
2849 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
2851 for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
2852 bcmgenet_rdma_writel(priv, 0x0, i);
2854 for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
2855 bcmgenet_hfb_reg_writel(priv, 0x0,
2856 HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
2858 for (i = 0; i < priv->hw_params->hfb_filter_cnt *
2859 priv->hw_params->hfb_filter_size; i++)
2860 bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
2863 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
2865 if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
2868 bcmgenet_hfb_clear(priv);
2871 static void bcmgenet_netif_start(struct net_device *dev)
2873 struct bcmgenet_priv *priv = netdev_priv(dev);
2875 /* Start the network engine */
2876 bcmgenet_set_rx_mode(dev);
2877 bcmgenet_enable_rx_napi(priv);
2878 bcmgenet_enable_tx_napi(priv);
2880 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2882 netif_tx_start_all_queues(dev);
2884 /* Monitor link interrupts now */
2885 bcmgenet_link_intr_enable(priv);
2887 phy_start(priv->phydev);
2890 static int bcmgenet_open(struct net_device *dev)
2892 struct bcmgenet_priv *priv = netdev_priv(dev);
2893 unsigned long dma_ctrl;
2897 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2899 /* Turn on the clock */
2900 clk_prepare_enable(priv->clk);
2902 /* If this is an internal GPHY, power it back on now, before UniMAC is
2903 * brought out of reset as absolutely no UniMAC activity is allowed
2905 if (priv->internal_phy)
2906 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2908 /* take MAC out of reset */
2909 bcmgenet_umac_reset(priv);
2911 ret = init_umac(priv);
2913 goto err_clk_disable;
2915 /* disable ethernet MAC while updating its registers */
2916 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2918 /* Make sure we reflect the value of CRC_CMD_FWD */
2919 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2920 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2922 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2924 /* Disable RX/TX DMA and flush TX queues */
2925 dma_ctrl = bcmgenet_dma_disable(priv);
2927 /* Reinitialize TDMA and RDMA and SW housekeeping */
2928 ret = bcmgenet_init_dma(priv);
2930 netdev_err(dev, "failed to initialize DMA\n");
2931 goto err_clk_disable;
2934 /* Always enable ring 16 - descriptor ring */
2935 bcmgenet_enable_dma(priv, dma_ctrl);
2938 bcmgenet_hfb_init(priv);
2940 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2943 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2947 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2950 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2954 ret = bcmgenet_mii_probe(dev);
2956 netdev_err(dev, "failed to connect to PHY\n");
2960 bcmgenet_netif_start(dev);
2965 free_irq(priv->irq1, priv);
2967 free_irq(priv->irq0, priv);
2969 bcmgenet_fini_dma(priv);
2971 if (priv->internal_phy)
2972 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2973 clk_disable_unprepare(priv->clk);
2977 static void bcmgenet_netif_stop(struct net_device *dev)
2979 struct bcmgenet_priv *priv = netdev_priv(dev);
2981 netif_tx_stop_all_queues(dev);
2982 phy_stop(priv->phydev);
2983 bcmgenet_intr_disable(priv);
2984 bcmgenet_disable_rx_napi(priv);
2985 bcmgenet_disable_tx_napi(priv);
2987 /* Wait for pending work items to complete. Since interrupts are
2988 * disabled no new work will be scheduled.
2990 cancel_work_sync(&priv->bcmgenet_irq_work);
2992 priv->old_link = -1;
2993 priv->old_speed = -1;
2994 priv->old_duplex = -1;
2995 priv->old_pause = -1;
2998 static int bcmgenet_close(struct net_device *dev)
3000 struct bcmgenet_priv *priv = netdev_priv(dev);
3003 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
3005 bcmgenet_netif_stop(dev);
3007 /* Really kill the PHY state machine and disconnect from it */
3008 phy_disconnect(priv->phydev);
3010 /* Disable MAC receive */
3011 umac_enable_set(priv, CMD_RX_EN, false);
3013 ret = bcmgenet_dma_teardown(priv);
3017 /* Disable MAC transmit. TX DMA disabled must be done before this */
3018 umac_enable_set(priv, CMD_TX_EN, false);
3021 bcmgenet_tx_reclaim_all(dev);
3022 bcmgenet_fini_dma(priv);
3024 free_irq(priv->irq0, priv);
3025 free_irq(priv->irq1, priv);
3027 if (priv->internal_phy)
3028 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3030 clk_disable_unprepare(priv->clk);
3035 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
3037 struct bcmgenet_priv *priv = ring->priv;
3038 u32 p_index, c_index, intsts, intmsk;
3039 struct netdev_queue *txq;
3040 unsigned int free_bds;
3041 unsigned long flags;
3044 if (!netif_msg_tx_err(priv))
3047 txq = netdev_get_tx_queue(priv->dev, ring->queue);
3049 spin_lock_irqsave(&ring->lock, flags);
3050 if (ring->index == DESC_INDEX) {
3051 intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
3052 intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
3054 intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
3055 intmsk = 1 << ring->index;
3057 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
3058 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
3059 txq_stopped = netif_tx_queue_stopped(txq);
3060 free_bds = ring->free_bds;
3061 spin_unlock_irqrestore(&ring->lock, flags);
3063 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
3064 "TX queue status: %s, interrupts: %s\n"
3065 "(sw)free_bds: %d (sw)size: %d\n"
3066 "(sw)p_index: %d (hw)p_index: %d\n"
3067 "(sw)c_index: %d (hw)c_index: %d\n"
3068 "(sw)clean_p: %d (sw)write_p: %d\n"
3069 "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
3070 ring->index, ring->queue,
3071 txq_stopped ? "stopped" : "active",
3072 intsts & intmsk ? "enabled" : "disabled",
3073 free_bds, ring->size,
3074 ring->prod_index, p_index & DMA_P_INDEX_MASK,
3075 ring->c_index, c_index & DMA_C_INDEX_MASK,
3076 ring->clean_ptr, ring->write_ptr,
3077 ring->cb_ptr, ring->end_ptr);
3080 static void bcmgenet_timeout(struct net_device *dev)
3082 struct bcmgenet_priv *priv = netdev_priv(dev);
3083 u32 int0_enable = 0;
3084 u32 int1_enable = 0;
3087 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
3089 for (q = 0; q < priv->hw_params->tx_queues; q++)
3090 bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
3091 bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
3093 bcmgenet_tx_reclaim_all(dev);
3095 for (q = 0; q < priv->hw_params->tx_queues; q++)
3096 int1_enable |= (1 << q);
3098 int0_enable = UMAC_IRQ_TXDMA_DONE;
3100 /* Re-enable TX interrupts if disabled */
3101 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
3102 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
3104 netif_trans_update(dev);
3106 dev->stats.tx_errors++;
3108 netif_tx_wake_all_queues(dev);
3111 #define MAX_MDF_FILTER 17
3113 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
3114 unsigned char *addr,
3117 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
3118 UMAC_MDF_ADDR + (*i * 4));
3119 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
3120 addr[4] << 8 | addr[5],
3121 UMAC_MDF_ADDR + ((*i + 1) * 4));
3125 static void bcmgenet_set_rx_mode(struct net_device *dev)
3127 struct bcmgenet_priv *priv = netdev_priv(dev);
3128 struct netdev_hw_addr *ha;
3132 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
3134 /* Number of filters needed */
3135 nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
3138 * Turn on promicuous mode for three scenarios
3139 * 1. IFF_PROMISC flag is set
3140 * 2. IFF_ALLMULTI flag is set
3141 * 3. The number of filters needed exceeds the number filters
3142 * supported by the hardware.
3144 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
3145 if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
3146 (nfilter > MAX_MDF_FILTER)) {
3148 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3149 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
3152 reg &= ~CMD_PROMISC;
3153 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3156 /* update MDF filter */
3159 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
3160 /* my own address.*/
3161 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
3164 netdev_for_each_uc_addr(ha, dev)
3165 bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3168 netdev_for_each_mc_addr(ha, dev)
3169 bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3171 /* Enable filters */
3172 reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
3173 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
3176 /* Set the hardware MAC address. */
3177 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3179 struct sockaddr *addr = p;
3181 /* Setting the MAC address at the hardware level is not possible
3182 * without disabling the UniMAC RX/TX enable bits.
3184 if (netif_running(dev))
3187 ether_addr_copy(dev->dev_addr, addr->sa_data);
3192 static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
3194 struct bcmgenet_priv *priv = netdev_priv(dev);
3195 unsigned long tx_bytes = 0, tx_packets = 0;
3196 unsigned long rx_bytes = 0, rx_packets = 0;
3197 unsigned long rx_errors = 0, rx_dropped = 0;
3198 struct bcmgenet_tx_ring *tx_ring;
3199 struct bcmgenet_rx_ring *rx_ring;
3202 for (q = 0; q < priv->hw_params->tx_queues; q++) {
3203 tx_ring = &priv->tx_rings[q];
3204 tx_bytes += tx_ring->bytes;
3205 tx_packets += tx_ring->packets;
3207 tx_ring = &priv->tx_rings[DESC_INDEX];
3208 tx_bytes += tx_ring->bytes;
3209 tx_packets += tx_ring->packets;
3211 for (q = 0; q < priv->hw_params->rx_queues; q++) {
3212 rx_ring = &priv->rx_rings[q];
3214 rx_bytes += rx_ring->bytes;
3215 rx_packets += rx_ring->packets;
3216 rx_errors += rx_ring->errors;
3217 rx_dropped += rx_ring->dropped;
3219 rx_ring = &priv->rx_rings[DESC_INDEX];
3220 rx_bytes += rx_ring->bytes;
3221 rx_packets += rx_ring->packets;
3222 rx_errors += rx_ring->errors;
3223 rx_dropped += rx_ring->dropped;
3225 dev->stats.tx_bytes = tx_bytes;
3226 dev->stats.tx_packets = tx_packets;
3227 dev->stats.rx_bytes = rx_bytes;
3228 dev->stats.rx_packets = rx_packets;
3229 dev->stats.rx_errors = rx_errors;
3230 dev->stats.rx_missed_errors = rx_errors;
3231 dev->stats.rx_dropped = rx_dropped;
3235 static const struct net_device_ops bcmgenet_netdev_ops = {
3236 .ndo_open = bcmgenet_open,
3237 .ndo_stop = bcmgenet_close,
3238 .ndo_start_xmit = bcmgenet_xmit,
3239 .ndo_tx_timeout = bcmgenet_timeout,
3240 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
3241 .ndo_set_mac_address = bcmgenet_set_mac_addr,
3242 .ndo_do_ioctl = bcmgenet_ioctl,
3243 .ndo_set_features = bcmgenet_set_features,
3244 #ifdef CONFIG_NET_POLL_CONTROLLER
3245 .ndo_poll_controller = bcmgenet_poll_controller,
3247 .ndo_get_stats = bcmgenet_get_stats,
3250 /* Array of GENET hardware parameters/characteristics */
3251 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
3257 .bp_in_en_shift = 16,
3258 .bp_in_mask = 0xffff,
3259 .hfb_filter_cnt = 16,
3261 .hfb_offset = 0x1000,
3262 .rdma_offset = 0x2000,
3263 .tdma_offset = 0x3000,
3271 .bp_in_en_shift = 16,
3272 .bp_in_mask = 0xffff,
3273 .hfb_filter_cnt = 16,
3275 .tbuf_offset = 0x0600,
3276 .hfb_offset = 0x1000,
3277 .hfb_reg_offset = 0x2000,
3278 .rdma_offset = 0x3000,
3279 .tdma_offset = 0x4000,
3281 .flags = GENET_HAS_EXT,
3288 .bp_in_en_shift = 17,
3289 .bp_in_mask = 0x1ffff,
3290 .hfb_filter_cnt = 48,
3291 .hfb_filter_size = 128,
3293 .tbuf_offset = 0x0600,
3294 .hfb_offset = 0x8000,
3295 .hfb_reg_offset = 0xfc00,
3296 .rdma_offset = 0x10000,
3297 .tdma_offset = 0x11000,
3299 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3300 GENET_HAS_MOCA_LINK_DET,
3307 .bp_in_en_shift = 17,
3308 .bp_in_mask = 0x1ffff,
3309 .hfb_filter_cnt = 48,
3310 .hfb_filter_size = 128,
3312 .tbuf_offset = 0x0600,
3313 .hfb_offset = 0x8000,
3314 .hfb_reg_offset = 0xfc00,
3315 .rdma_offset = 0x2000,
3316 .tdma_offset = 0x4000,
3318 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3319 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3326 .bp_in_en_shift = 17,
3327 .bp_in_mask = 0x1ffff,
3328 .hfb_filter_cnt = 48,
3329 .hfb_filter_size = 128,
3331 .tbuf_offset = 0x0600,
3332 .hfb_offset = 0x8000,
3333 .hfb_reg_offset = 0xfc00,
3334 .rdma_offset = 0x2000,
3335 .tdma_offset = 0x4000,
3337 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3338 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3342 /* Infer hardware parameters from the detected GENET version */
3343 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3345 struct bcmgenet_hw_params *params;
3350 if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
3351 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3352 genet_dma_ring_regs = genet_dma_ring_regs_v4;
3353 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3354 } else if (GENET_IS_V3(priv)) {
3355 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3356 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3357 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3358 } else if (GENET_IS_V2(priv)) {
3359 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3360 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3361 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3362 } else if (GENET_IS_V1(priv)) {
3363 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3364 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3365 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3368 /* enum genet_version starts at 1 */
3369 priv->hw_params = &bcmgenet_hw_params[priv->version];
3370 params = priv->hw_params;
3372 /* Read GENET HW version */
3373 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3374 major = (reg >> 24 & 0x0f);
3377 else if (major == 5)
3379 else if (major == 0)
3381 if (major != priv->version) {
3382 dev_err(&priv->pdev->dev,
3383 "GENET version mismatch, got: %d, configured for: %d\n",
3384 major, priv->version);
3387 /* Print the GENET core version */
3388 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3389 major, (reg >> 16) & 0x0f, reg & 0xffff);
3391 /* Store the integrated PHY revision for the MDIO probing function
3392 * to pass this information to the PHY driver. The PHY driver expects
3393 * to find the PHY major revision in bits 15:8 while the GENET register
3394 * stores that information in bits 7:0, account for that.
3396 * On newer chips, starting with PHY revision G0, a new scheme is
3397 * deployed similar to the Starfighter 2 switch with GPHY major
3398 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3399 * is reserved as well as special value 0x01ff, we have a small
3400 * heuristic to check for the new GPHY revision and re-arrange things
3401 * so the GPHY driver is happy.
3403 gphy_rev = reg & 0xffff;
3405 if (GENET_IS_V5(priv)) {
3406 /* The EPHY revision should come from the MDIO registers of
3407 * the PHY not from GENET.
3409 if (gphy_rev != 0) {
3410 pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
3413 /* This is reserved so should require special treatment */
3414 } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3415 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3417 /* This is the good old scheme, just GPHY major, no minor nor patch */
3418 } else if ((gphy_rev & 0xf0) != 0) {
3419 priv->gphy_rev = gphy_rev << 8;
3420 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3421 } else if ((gphy_rev & 0xff00) != 0) {
3422 priv->gphy_rev = gphy_rev;
3425 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3426 if (!(params->flags & GENET_HAS_40BITS))
3427 pr_warn("GENET does not support 40-bits PA\n");
3430 pr_debug("Configuration for version: %d\n"
3431 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3432 "BP << en: %2d, BP msk: 0x%05x\n"
3433 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3434 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3435 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3438 params->tx_queues, params->tx_bds_per_q,
3439 params->rx_queues, params->rx_bds_per_q,
3440 params->bp_in_en_shift, params->bp_in_mask,
3441 params->hfb_filter_cnt, params->qtag_mask,
3442 params->tbuf_offset, params->hfb_offset,
3443 params->hfb_reg_offset,
3444 params->rdma_offset, params->tdma_offset,
3445 params->words_per_bd);
3448 static const struct of_device_id bcmgenet_match[] = {
3449 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
3450 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
3451 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
3452 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
3453 { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
3456 MODULE_DEVICE_TABLE(of, bcmgenet_match);
3458 static int bcmgenet_probe(struct platform_device *pdev)
3460 struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
3461 struct device_node *dn = pdev->dev.of_node;
3462 const struct of_device_id *of_id = NULL;
3463 struct bcmgenet_priv *priv;
3464 struct net_device *dev;
3465 const void *macaddr;
3468 const char *phy_mode_str;
3470 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3471 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3472 GENET_MAX_MQ_CNT + 1);
3474 dev_err(&pdev->dev, "can't allocate net device\n");
3479 of_id = of_match_node(bcmgenet_match, dn);
3484 priv = netdev_priv(dev);
3485 priv->irq0 = platform_get_irq(pdev, 0);
3486 priv->irq1 = platform_get_irq(pdev, 1);
3487 priv->wol_irq = platform_get_irq(pdev, 2);
3488 if (!priv->irq0 || !priv->irq1) {
3489 dev_err(&pdev->dev, "can't find IRQs\n");
3495 macaddr = of_get_mac_address(dn);
3497 dev_err(&pdev->dev, "can't find MAC address\n");
3502 macaddr = pd->mac_address;
3505 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3506 priv->base = devm_ioremap_resource(&pdev->dev, r);
3507 if (IS_ERR(priv->base)) {
3508 err = PTR_ERR(priv->base);
3512 spin_lock_init(&priv->lock);
3514 SET_NETDEV_DEV(dev, &pdev->dev);
3515 dev_set_drvdata(&pdev->dev, dev);
3516 ether_addr_copy(dev->dev_addr, macaddr);
3517 dev->watchdog_timeo = 2 * HZ;
3518 dev->ethtool_ops = &bcmgenet_ethtool_ops;
3519 dev->netdev_ops = &bcmgenet_netdev_ops;
3521 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
3523 /* Set hardware features */
3524 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
3525 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
3527 /* Request the WOL interrupt and advertise suspend if available */
3528 priv->wol_irq_disabled = true;
3529 if (priv->wol_irq > 0) {
3530 err = devm_request_irq(&pdev->dev, priv->wol_irq,
3531 bcmgenet_wol_isr, 0, dev->name, priv);
3533 device_set_wakeup_capable(&pdev->dev, 1);
3536 /* Set the needed headroom to account for any possible
3537 * features enabling/disabling at runtime
3539 dev->needed_headroom += 64;
3541 netdev_boot_setup_check(dev);
3546 priv->version = (enum bcmgenet_version)of_id->data;
3548 priv->version = pd->genet_version;
3550 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
3551 if (IS_ERR(priv->clk)) {
3552 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
3556 clk_prepare_enable(priv->clk);
3558 bcmgenet_set_hw_params(priv);
3560 /* Mii wait queue */
3561 init_waitqueue_head(&priv->wq);
3562 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
3563 priv->rx_buf_len = RX_BUF_LENGTH;
3564 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
3566 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
3567 if (IS_ERR(priv->clk_wol)) {
3568 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
3569 priv->clk_wol = NULL;
3572 priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
3573 if (IS_ERR(priv->clk_eee)) {
3574 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
3575 priv->clk_eee = NULL;
3578 /* If this is an internal GPHY, power it on now, before UniMAC is
3579 * brought out of reset as absolutely no UniMAC activity is allowed
3581 if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
3582 !strcasecmp(phy_mode_str, "internal"))
3583 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3585 err = reset_umac(priv);
3587 goto err_clk_disable;
3589 err = bcmgenet_mii_init(dev);
3591 goto err_clk_disable;
3593 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
3594 * just the ring 16 descriptor based TX
3596 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
3597 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
3599 /* libphy will determine the link state */
3600 netif_carrier_off(dev);
3602 /* Turn off the main clock, WOL clock is handled separately */
3603 clk_disable_unprepare(priv->clk);
3605 err = register_netdev(dev);
3607 bcmgenet_mii_exit(dev);
3614 clk_disable_unprepare(priv->clk);
3620 static int bcmgenet_remove(struct platform_device *pdev)
3622 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
3624 dev_set_drvdata(&pdev->dev, NULL);
3625 unregister_netdev(priv->dev);
3626 bcmgenet_mii_exit(priv->dev);
3627 free_netdev(priv->dev);
3632 #ifdef CONFIG_PM_SLEEP
3633 static int bcmgenet_suspend(struct device *d)
3635 struct net_device *dev = dev_get_drvdata(d);
3636 struct bcmgenet_priv *priv = netdev_priv(dev);
3639 if (!netif_running(dev))
3642 bcmgenet_netif_stop(dev);
3644 if (!device_may_wakeup(d))
3645 phy_suspend(priv->phydev);
3647 netif_device_detach(dev);
3649 /* Disable MAC receive */
3650 umac_enable_set(priv, CMD_RX_EN, false);
3652 ret = bcmgenet_dma_teardown(priv);
3656 /* Disable MAC transmit. TX DMA disabled must be done before this */
3657 umac_enable_set(priv, CMD_TX_EN, false);
3660 bcmgenet_tx_reclaim_all(dev);
3661 bcmgenet_fini_dma(priv);
3663 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
3664 if (device_may_wakeup(d) && priv->wolopts) {
3665 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
3666 clk_prepare_enable(priv->clk_wol);
3667 } else if (priv->internal_phy) {
3668 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3671 /* Turn off the clocks */
3672 clk_disable_unprepare(priv->clk);
3677 static int bcmgenet_resume(struct device *d)
3679 struct net_device *dev = dev_get_drvdata(d);
3680 struct bcmgenet_priv *priv = netdev_priv(dev);
3681 unsigned long dma_ctrl;
3684 if (!netif_running(dev))
3687 /* Turn on the clock */
3688 ret = clk_prepare_enable(priv->clk);
3692 /* If this is an internal GPHY, power it back on now, before UniMAC is
3693 * brought out of reset as absolutely no UniMAC activity is allowed
3695 if (priv->internal_phy)
3696 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3698 bcmgenet_umac_reset(priv);
3700 ret = init_umac(priv);
3702 goto out_clk_disable;
3704 /* From WOL-enabled suspend, switch to regular clock */
3706 clk_disable_unprepare(priv->clk_wol);
3708 phy_init_hw(priv->phydev);
3709 /* Speed settings must be restored */
3710 genphy_config_aneg(dev->phydev);
3711 bcmgenet_mii_config(priv->dev, false);
3713 /* disable ethernet MAC while updating its registers */
3714 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
3716 bcmgenet_set_hw_addr(priv, dev->dev_addr);
3719 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
3721 /* Disable RX/TX DMA and flush TX queues */
3722 dma_ctrl = bcmgenet_dma_disable(priv);
3724 /* Reinitialize TDMA and RDMA and SW housekeeping */
3725 ret = bcmgenet_init_dma(priv);
3727 netdev_err(dev, "failed to initialize DMA\n");
3728 goto out_clk_disable;
3731 /* Always enable ring 16 - descriptor ring */
3732 bcmgenet_enable_dma(priv, dma_ctrl);
3734 netif_device_attach(dev);
3736 if (!device_may_wakeup(d))
3737 phy_resume(priv->phydev);
3739 if (priv->eee.eee_enabled)
3740 bcmgenet_eee_enable_set(dev, true);
3742 bcmgenet_netif_start(dev);
3747 if (priv->internal_phy)
3748 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3749 clk_disable_unprepare(priv->clk);
3752 #endif /* CONFIG_PM_SLEEP */
3754 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
3756 static struct platform_driver bcmgenet_driver = {
3757 .probe = bcmgenet_probe,
3758 .remove = bcmgenet_remove,
3761 .of_match_table = bcmgenet_match,
3762 .pm = &bcmgenet_pm_ops,
3765 module_platform_driver(bcmgenet_driver);
3767 MODULE_AUTHOR("Broadcom Corporation");
3768 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3769 MODULE_ALIAS("platform:bcmgenet");
3770 MODULE_LICENSE("GPL");
3771 MODULE_SOFTDEP("pre: mdio-bcm-unimac");