1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/regmap.h>
14 #include <linux/clk.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/if_vlan.h>
17 #include <linux/reset.h>
18 #include <linux/tcp.h>
19 #include <linux/interrupt.h>
20 #include <linux/pinctrl/devinfo.h>
21 #include <linux/phylink.h>
23 #include "mtk_eth_soc.h"
25 static int mtk_msg_level = -1;
26 module_param_named(msg_level, mtk_msg_level, int, 0);
27 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
29 #define MTK_ETHTOOL_STAT(x) { #x, \
30 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
32 /* strings used by ethtool */
33 static const struct mtk_ethtool_stats {
34 char str[ETH_GSTRING_LEN];
36 } mtk_ethtool_stats[] = {
37 MTK_ETHTOOL_STAT(tx_bytes),
38 MTK_ETHTOOL_STAT(tx_packets),
39 MTK_ETHTOOL_STAT(tx_skip),
40 MTK_ETHTOOL_STAT(tx_collisions),
41 MTK_ETHTOOL_STAT(rx_bytes),
42 MTK_ETHTOOL_STAT(rx_packets),
43 MTK_ETHTOOL_STAT(rx_overflow),
44 MTK_ETHTOOL_STAT(rx_fcs_errors),
45 MTK_ETHTOOL_STAT(rx_short_errors),
46 MTK_ETHTOOL_STAT(rx_long_errors),
47 MTK_ETHTOOL_STAT(rx_checksum_errors),
48 MTK_ETHTOOL_STAT(rx_flow_control_packets),
51 static const char * const mtk_clks_source_name[] = {
52 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
53 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
54 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
55 "sgmii_ck", "eth2pll",
58 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
60 __raw_writel(val, eth->base + reg);
63 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
65 return __raw_readl(eth->base + reg);
68 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
72 val = mtk_r32(eth, reg);
75 mtk_w32(eth, val, reg);
79 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
81 unsigned long t_start = jiffies;
84 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
86 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
91 dev_err(eth->dev, "mdio: MDIO timeout\n");
95 static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
96 u32 phy_register, u32 write_data)
98 if (mtk_mdio_busy_wait(eth))
101 write_data &= 0xffff;
103 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
104 (phy_register << PHY_IAC_REG_SHIFT) |
105 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
108 if (mtk_mdio_busy_wait(eth))
114 static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
118 if (mtk_mdio_busy_wait(eth))
121 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
122 (phy_reg << PHY_IAC_REG_SHIFT) |
123 (phy_addr << PHY_IAC_ADDR_SHIFT),
126 if (mtk_mdio_busy_wait(eth))
129 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
134 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
135 int phy_reg, u16 val)
137 struct mtk_eth *eth = bus->priv;
139 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
142 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
144 struct mtk_eth *eth = bus->priv;
146 return _mtk_mdio_read(eth, phy_addr, phy_reg);
149 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
150 phy_interface_t interface)
154 /* Check DDR memory type.
155 * Currently TRGMII mode with DDR2 memory is not supported.
157 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
158 if (interface == PHY_INTERFACE_MODE_TRGMII &&
159 val & SYSCFG_DRAM_TYPE_DDR2) {
161 "TRGMII mode with DDR2 memory is not supported!\n");
165 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
166 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
168 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
169 ETHSYS_TRGMII_MT7621_MASK, val);
174 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
175 phy_interface_t interface, int speed)
180 if (interface == PHY_INTERFACE_MODE_TRGMII) {
181 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
183 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
185 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
189 val = (speed == SPEED_1000) ?
190 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
191 mtk_w32(eth, val, INTF_MODE);
193 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
194 ETHSYS_TRGMII_CLK_SEL362_5,
195 ETHSYS_TRGMII_CLK_SEL362_5);
197 val = (speed == SPEED_1000) ? 250000000 : 500000000;
198 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
200 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
202 val = (speed == SPEED_1000) ?
203 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
204 mtk_w32(eth, val, TRGMII_RCK_CTRL);
206 val = (speed == SPEED_1000) ?
207 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
208 mtk_w32(eth, val, TRGMII_TCK_CTRL);
211 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
212 const struct phylink_link_state *state)
214 struct mtk_mac *mac = container_of(config, struct mtk_mac,
216 struct mtk_eth *eth = mac->hw;
217 u32 mcr_cur, mcr_new, sid, i;
218 int val, ge_mode, err = 0;
220 /* MT76x8 has no hardware settings between for the MAC */
221 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
222 mac->interface != state->interface) {
223 /* Setup soc pin functions */
224 switch (state->interface) {
225 case PHY_INTERFACE_MODE_TRGMII:
228 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
232 case PHY_INTERFACE_MODE_RGMII_TXID:
233 case PHY_INTERFACE_MODE_RGMII_RXID:
234 case PHY_INTERFACE_MODE_RGMII_ID:
235 case PHY_INTERFACE_MODE_RGMII:
236 case PHY_INTERFACE_MODE_MII:
237 case PHY_INTERFACE_MODE_REVMII:
238 case PHY_INTERFACE_MODE_RMII:
239 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
240 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
245 case PHY_INTERFACE_MODE_1000BASEX:
246 case PHY_INTERFACE_MODE_2500BASEX:
247 case PHY_INTERFACE_MODE_SGMII:
248 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
249 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
254 case PHY_INTERFACE_MODE_GMII:
255 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
256 err = mtk_gmac_gephy_path_setup(eth, mac->id);
265 /* Setup clock for 1st gmac */
266 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
267 !phy_interface_mode_is_8023z(state->interface) &&
268 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
269 if (MTK_HAS_CAPS(mac->hw->soc->caps,
270 MTK_TRGMII_MT7621_CLK)) {
271 if (mt7621_gmac0_rgmii_adjust(mac->hw,
275 mtk_gmac0_rgmii_adjust(mac->hw,
279 /* mt7623_pad_clk_setup */
280 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
282 TD_DM_DRVP(8) | TD_DM_DRVN(8),
285 /* Assert/release MT7623 RXC reset */
286 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
288 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
293 switch (state->interface) {
294 case PHY_INTERFACE_MODE_MII:
295 case PHY_INTERFACE_MODE_GMII:
298 case PHY_INTERFACE_MODE_REVMII:
301 case PHY_INTERFACE_MODE_RMII:
310 /* put the gmac into the right mode */
311 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
312 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
313 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
314 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
316 mac->interface = state->interface;
320 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
321 phy_interface_mode_is_8023z(state->interface)) {
322 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
325 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
327 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
329 ~(u32)SYSCFG0_SGMII_MASK);
331 /* Decide how GMAC and SGMIISYS be mapped */
332 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
335 /* Setup SGMIISYS with the determined property */
336 if (state->interface != PHY_INTERFACE_MODE_SGMII)
337 err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
339 else if (phylink_autoneg_inband(mode))
340 err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
345 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
346 SYSCFG0_SGMII_MASK, val);
347 } else if (phylink_autoneg_inband(mode)) {
349 "In-band mode not supported in non SGMII mode!\n");
354 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
356 mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
357 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
358 MAC_MCR_RX_FIFO_CLR_DIS;
360 /* Only update control register when needed! */
361 if (mcr_new != mcr_cur)
362 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
367 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
368 mac->id, phy_modes(state->interface));
372 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
373 mac->id, phy_modes(state->interface), err);
376 static void mtk_mac_pcs_get_state(struct phylink_config *config,
377 struct phylink_link_state *state)
379 struct mtk_mac *mac = container_of(config, struct mtk_mac,
381 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
383 state->link = (pmsr & MAC_MSR_LINK);
384 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
386 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
388 state->speed = SPEED_10;
390 case MAC_MSR_SPEED_100:
391 state->speed = SPEED_100;
393 case MAC_MSR_SPEED_1000:
394 state->speed = SPEED_1000;
397 state->speed = SPEED_UNKNOWN;
401 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
402 if (pmsr & MAC_MSR_RX_FC)
403 state->pause |= MLO_PAUSE_RX;
404 if (pmsr & MAC_MSR_TX_FC)
405 state->pause |= MLO_PAUSE_TX;
408 static void mtk_mac_an_restart(struct phylink_config *config)
410 struct mtk_mac *mac = container_of(config, struct mtk_mac,
413 mtk_sgmii_restart_an(mac->hw, mac->id);
416 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
417 phy_interface_t interface)
419 struct mtk_mac *mac = container_of(config, struct mtk_mac,
421 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
423 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
424 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
427 static void mtk_mac_link_up(struct phylink_config *config,
428 struct phy_device *phy,
429 unsigned int mode, phy_interface_t interface,
430 int speed, int duplex, bool tx_pause, bool rx_pause)
432 struct mtk_mac *mac = container_of(config, struct mtk_mac,
434 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
436 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
437 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
438 MAC_MCR_FORCE_RX_FC);
440 /* Configure speed */
444 mcr |= MAC_MCR_SPEED_1000;
447 mcr |= MAC_MCR_SPEED_100;
451 /* Configure duplex */
452 if (duplex == DUPLEX_FULL)
453 mcr |= MAC_MCR_FORCE_DPX;
455 /* Configure pause modes - phylink will avoid these for half duplex */
457 mcr |= MAC_MCR_FORCE_TX_FC;
459 mcr |= MAC_MCR_FORCE_RX_FC;
461 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
462 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
465 static void mtk_validate(struct phylink_config *config,
466 unsigned long *supported,
467 struct phylink_link_state *state)
469 struct mtk_mac *mac = container_of(config, struct mtk_mac,
471 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
473 if (state->interface != PHY_INTERFACE_MODE_NA &&
474 state->interface != PHY_INTERFACE_MODE_MII &&
475 state->interface != PHY_INTERFACE_MODE_GMII &&
476 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
477 phy_interface_mode_is_rgmii(state->interface)) &&
478 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
479 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
480 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
481 (state->interface == PHY_INTERFACE_MODE_SGMII ||
482 phy_interface_mode_is_8023z(state->interface)))) {
483 linkmode_zero(supported);
487 phylink_set_port_modes(mask);
488 phylink_set(mask, Autoneg);
490 switch (state->interface) {
491 case PHY_INTERFACE_MODE_TRGMII:
492 phylink_set(mask, 1000baseT_Full);
494 case PHY_INTERFACE_MODE_1000BASEX:
495 case PHY_INTERFACE_MODE_2500BASEX:
496 phylink_set(mask, 1000baseX_Full);
497 phylink_set(mask, 2500baseX_Full);
499 case PHY_INTERFACE_MODE_GMII:
500 case PHY_INTERFACE_MODE_RGMII:
501 case PHY_INTERFACE_MODE_RGMII_ID:
502 case PHY_INTERFACE_MODE_RGMII_RXID:
503 case PHY_INTERFACE_MODE_RGMII_TXID:
504 phylink_set(mask, 1000baseT_Half);
506 case PHY_INTERFACE_MODE_SGMII:
507 phylink_set(mask, 1000baseT_Full);
508 phylink_set(mask, 1000baseX_Full);
510 case PHY_INTERFACE_MODE_MII:
511 case PHY_INTERFACE_MODE_RMII:
512 case PHY_INTERFACE_MODE_REVMII:
513 case PHY_INTERFACE_MODE_NA:
515 phylink_set(mask, 10baseT_Half);
516 phylink_set(mask, 10baseT_Full);
517 phylink_set(mask, 100baseT_Half);
518 phylink_set(mask, 100baseT_Full);
522 if (state->interface == PHY_INTERFACE_MODE_NA) {
523 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
524 phylink_set(mask, 1000baseT_Full);
525 phylink_set(mask, 1000baseX_Full);
526 phylink_set(mask, 2500baseX_Full);
528 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
529 phylink_set(mask, 1000baseT_Full);
530 phylink_set(mask, 1000baseT_Half);
531 phylink_set(mask, 1000baseX_Full);
533 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
534 phylink_set(mask, 1000baseT_Full);
535 phylink_set(mask, 1000baseT_Half);
539 phylink_set(mask, Pause);
540 phylink_set(mask, Asym_Pause);
542 linkmode_and(supported, supported, mask);
543 linkmode_and(state->advertising, state->advertising, mask);
545 /* We can only operate at 2500BaseX or 1000BaseX. If requested
546 * to advertise both, only report advertising at 2500BaseX.
548 phylink_helper_basex_speed(state);
551 static const struct phylink_mac_ops mtk_phylink_ops = {
552 .validate = mtk_validate,
553 .mac_pcs_get_state = mtk_mac_pcs_get_state,
554 .mac_an_restart = mtk_mac_an_restart,
555 .mac_config = mtk_mac_config,
556 .mac_link_down = mtk_mac_link_down,
557 .mac_link_up = mtk_mac_link_up,
560 static int mtk_mdio_init(struct mtk_eth *eth)
562 struct device_node *mii_np;
565 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
567 dev_err(eth->dev, "no %s child node found", "mdio-bus");
571 if (!of_device_is_available(mii_np)) {
576 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
582 eth->mii_bus->name = "mdio";
583 eth->mii_bus->read = mtk_mdio_read;
584 eth->mii_bus->write = mtk_mdio_write;
585 eth->mii_bus->priv = eth;
586 eth->mii_bus->parent = eth->dev;
588 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
589 ret = of_mdiobus_register(eth->mii_bus, mii_np);
596 static void mtk_mdio_cleanup(struct mtk_eth *eth)
601 mdiobus_unregister(eth->mii_bus);
604 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
609 spin_lock_irqsave(ð->tx_irq_lock, flags);
610 val = mtk_r32(eth, eth->tx_int_mask_reg);
611 mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
612 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
615 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
620 spin_lock_irqsave(ð->tx_irq_lock, flags);
621 val = mtk_r32(eth, eth->tx_int_mask_reg);
622 mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
623 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
626 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
631 spin_lock_irqsave(ð->rx_irq_lock, flags);
632 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
633 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
634 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
637 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
642 spin_lock_irqsave(ð->rx_irq_lock, flags);
643 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
644 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
645 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
648 static int mtk_set_mac_address(struct net_device *dev, void *p)
650 int ret = eth_mac_addr(dev, p);
651 struct mtk_mac *mac = netdev_priv(dev);
652 struct mtk_eth *eth = mac->hw;
653 const char *macaddr = dev->dev_addr;
658 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
661 spin_lock_bh(&mac->hw->page_lock);
662 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
663 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
664 MT7628_SDM_MAC_ADRH);
665 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
666 (macaddr[4] << 8) | macaddr[5],
667 MT7628_SDM_MAC_ADRL);
669 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
670 MTK_GDMA_MAC_ADRH(mac->id));
671 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
672 (macaddr[4] << 8) | macaddr[5],
673 MTK_GDMA_MAC_ADRL(mac->id));
675 spin_unlock_bh(&mac->hw->page_lock);
680 void mtk_stats_update_mac(struct mtk_mac *mac)
682 struct mtk_hw_stats *hw_stats = mac->hw_stats;
683 struct mtk_eth *eth = mac->hw;
685 u64_stats_update_begin(&hw_stats->syncp);
687 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
688 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
689 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
690 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
691 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
692 hw_stats->rx_checksum_errors +=
693 mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
695 unsigned int offs = hw_stats->reg_offset;
698 hw_stats->rx_bytes += mtk_r32(mac->hw,
699 MTK_GDM1_RX_GBCNT_L + offs);
700 stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
702 hw_stats->rx_bytes += (stats << 32);
703 hw_stats->rx_packets +=
704 mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
705 hw_stats->rx_overflow +=
706 mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
707 hw_stats->rx_fcs_errors +=
708 mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
709 hw_stats->rx_short_errors +=
710 mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
711 hw_stats->rx_long_errors +=
712 mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
713 hw_stats->rx_checksum_errors +=
714 mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
715 hw_stats->rx_flow_control_packets +=
716 mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
718 mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
719 hw_stats->tx_collisions +=
720 mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
721 hw_stats->tx_bytes +=
722 mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
723 stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
725 hw_stats->tx_bytes += (stats << 32);
726 hw_stats->tx_packets +=
727 mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
730 u64_stats_update_end(&hw_stats->syncp);
733 static void mtk_stats_update(struct mtk_eth *eth)
737 for (i = 0; i < MTK_MAC_COUNT; i++) {
738 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
740 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
741 mtk_stats_update_mac(eth->mac[i]);
742 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
747 static void mtk_get_stats64(struct net_device *dev,
748 struct rtnl_link_stats64 *storage)
750 struct mtk_mac *mac = netdev_priv(dev);
751 struct mtk_hw_stats *hw_stats = mac->hw_stats;
754 if (netif_running(dev) && netif_device_present(dev)) {
755 if (spin_trylock_bh(&hw_stats->stats_lock)) {
756 mtk_stats_update_mac(mac);
757 spin_unlock_bh(&hw_stats->stats_lock);
762 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
763 storage->rx_packets = hw_stats->rx_packets;
764 storage->tx_packets = hw_stats->tx_packets;
765 storage->rx_bytes = hw_stats->rx_bytes;
766 storage->tx_bytes = hw_stats->tx_bytes;
767 storage->collisions = hw_stats->tx_collisions;
768 storage->rx_length_errors = hw_stats->rx_short_errors +
769 hw_stats->rx_long_errors;
770 storage->rx_over_errors = hw_stats->rx_overflow;
771 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
772 storage->rx_errors = hw_stats->rx_checksum_errors;
773 storage->tx_aborted_errors = hw_stats->tx_skip;
774 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
776 storage->tx_errors = dev->stats.tx_errors;
777 storage->rx_dropped = dev->stats.rx_dropped;
778 storage->tx_dropped = dev->stats.tx_dropped;
781 static inline int mtk_max_frag_size(int mtu)
783 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
784 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
785 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
787 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
788 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
791 static inline int mtk_max_buf_size(int frag_size)
793 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
794 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
796 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
801 static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
802 struct mtk_rx_dma *dma_rxd)
804 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
805 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
806 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
807 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
810 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
812 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
815 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
821 /* the qdma core needs scratch memory to be setup */
822 static int mtk_init_fq_dma(struct mtk_eth *eth)
824 dma_addr_t phy_ring_tail;
825 int cnt = MTK_DMA_SIZE;
829 eth->scratch_ring = dma_alloc_coherent(eth->dev,
830 cnt * sizeof(struct mtk_tx_dma),
831 ð->phy_scratch_ring,
833 if (unlikely(!eth->scratch_ring))
836 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
838 if (unlikely(!eth->scratch_head))
841 dma_addr = dma_map_single(eth->dev,
842 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
844 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
847 phy_ring_tail = eth->phy_scratch_ring +
848 (sizeof(struct mtk_tx_dma) * (cnt - 1));
850 for (i = 0; i < cnt; i++) {
851 eth->scratch_ring[i].txd1 =
852 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
854 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
855 ((i + 1) * sizeof(struct mtk_tx_dma)));
856 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
859 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
860 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
861 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
862 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
867 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
869 void *ret = ring->dma;
871 return ret + (desc - ring->phys);
874 static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
875 struct mtk_tx_dma *txd)
877 int idx = txd - ring->dma;
879 return &ring->buf[idx];
882 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
883 struct mtk_tx_dma *dma)
885 return ring->dma_pdma - ring->dma + dma;
888 static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
890 return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
893 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
895 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
896 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
897 dma_unmap_single(eth->dev,
898 dma_unmap_addr(tx_buf, dma_addr0),
899 dma_unmap_len(tx_buf, dma_len0),
901 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
902 dma_unmap_page(eth->dev,
903 dma_unmap_addr(tx_buf, dma_addr0),
904 dma_unmap_len(tx_buf, dma_len0),
908 if (dma_unmap_len(tx_buf, dma_len0)) {
909 dma_unmap_page(eth->dev,
910 dma_unmap_addr(tx_buf, dma_addr0),
911 dma_unmap_len(tx_buf, dma_len0),
915 if (dma_unmap_len(tx_buf, dma_len1)) {
916 dma_unmap_page(eth->dev,
917 dma_unmap_addr(tx_buf, dma_addr1),
918 dma_unmap_len(tx_buf, dma_len1),
925 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
926 dev_kfree_skb_any(tx_buf->skb);
930 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
931 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
932 size_t size, int idx)
934 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
935 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
936 dma_unmap_len_set(tx_buf, dma_len0, size);
939 txd->txd3 = mapped_addr;
940 txd->txd2 |= TX_DMA_PLEN1(size);
941 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
942 dma_unmap_len_set(tx_buf, dma_len1, size);
944 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
945 txd->txd1 = mapped_addr;
946 txd->txd2 = TX_DMA_PLEN0(size);
947 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
948 dma_unmap_len_set(tx_buf, dma_len0, size);
953 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
954 int tx_num, struct mtk_tx_ring *ring, bool gso)
956 struct mtk_mac *mac = netdev_priv(dev);
957 struct mtk_eth *eth = mac->hw;
958 struct mtk_tx_dma *itxd, *txd;
959 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
960 struct mtk_tx_buf *itx_buf, *tx_buf;
961 dma_addr_t mapped_addr;
962 unsigned int nr_frags;
967 itxd = ring->next_free;
968 itxd_pdma = qdma_to_pdma(ring, itxd);
969 if (itxd == ring->last_free)
972 /* set the forward port */
973 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
976 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
977 memset(itx_buf, 0, sizeof(*itx_buf));
982 /* TX Checksum offload */
983 if (skb->ip_summed == CHECKSUM_PARTIAL)
984 txd4 |= TX_DMA_CHKSUM;
986 /* VLAN header offload */
987 if (skb_vlan_tag_present(skb))
988 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
990 mapped_addr = dma_map_single(eth->dev, skb->data,
991 skb_headlen(skb), DMA_TO_DEVICE);
992 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
995 WRITE_ONCE(itxd->txd1, mapped_addr);
996 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
997 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
999 setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
1004 txd_pdma = qdma_to_pdma(ring, txd);
1005 nr_frags = skb_shinfo(skb)->nr_frags;
1007 for (i = 0; i < nr_frags; i++) {
1008 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1009 unsigned int offset = 0;
1010 int frag_size = skb_frag_size(frag);
1013 bool last_frag = false;
1014 unsigned int frag_map_size;
1015 bool new_desc = true;
1017 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
1019 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1020 txd_pdma = qdma_to_pdma(ring, txd);
1021 if (txd == ring->last_free)
1030 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1031 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
1034 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
1037 if (i == nr_frags - 1 &&
1038 (frag_size - frag_map_size) == 0)
1041 WRITE_ONCE(txd->txd1, mapped_addr);
1042 WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
1043 TX_DMA_PLEN0(frag_map_size) |
1044 last_frag * TX_DMA_LS0));
1045 WRITE_ONCE(txd->txd4, fport);
1047 tx_buf = mtk_desc_to_tx_buf(ring, txd);
1049 memset(tx_buf, 0, sizeof(*tx_buf));
1050 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1051 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1052 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1053 MTK_TX_FLAGS_FPORT1;
1055 setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
1056 frag_map_size, k++);
1058 frag_size -= frag_map_size;
1059 offset += frag_map_size;
1063 /* store skb to cleanup */
1066 WRITE_ONCE(itxd->txd4, txd4);
1067 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
1068 (!nr_frags * TX_DMA_LS0)));
1069 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1071 txd_pdma->txd2 |= TX_DMA_LS0;
1073 txd_pdma->txd2 |= TX_DMA_LS1;
1076 netdev_sent_queue(dev, skb->len);
1077 skb_tx_timestamp(skb);
1079 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1080 atomic_sub(n_desc, &ring->free_count);
1082 /* make sure that all changes to the dma ring are flushed before we
1087 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1088 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1089 !netdev_xmit_more())
1090 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1092 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
1094 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1101 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
1104 mtk_tx_unmap(eth, tx_buf);
1106 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1107 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1108 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1110 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1111 itxd_pdma = qdma_to_pdma(ring, itxd);
1112 } while (itxd != txd);
1117 static inline int mtk_cal_txd_req(struct sk_buff *skb)
1123 if (skb_is_gso(skb)) {
1124 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1125 frag = &skb_shinfo(skb)->frags[i];
1126 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1127 MTK_TX_DMA_BUF_LEN);
1130 nfrags += skb_shinfo(skb)->nr_frags;
1136 static int mtk_queue_stopped(struct mtk_eth *eth)
1140 for (i = 0; i < MTK_MAC_COUNT; i++) {
1141 if (!eth->netdev[i])
1143 if (netif_queue_stopped(eth->netdev[i]))
1150 static void mtk_wake_queue(struct mtk_eth *eth)
1154 for (i = 0; i < MTK_MAC_COUNT; i++) {
1155 if (!eth->netdev[i])
1157 netif_wake_queue(eth->netdev[i]);
1161 static void mtk_stop_queue(struct mtk_eth *eth)
1165 for (i = 0; i < MTK_MAC_COUNT; i++) {
1166 if (!eth->netdev[i])
1168 netif_stop_queue(eth->netdev[i]);
1172 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1174 struct mtk_mac *mac = netdev_priv(dev);
1175 struct mtk_eth *eth = mac->hw;
1176 struct mtk_tx_ring *ring = ð->tx_ring;
1177 struct net_device_stats *stats = &dev->stats;
1181 /* normally we can rely on the stack not calling this more than once,
1182 * however we have 2 queues running on the same ring so we need to lock
1185 spin_lock(ð->page_lock);
1187 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1190 tx_num = mtk_cal_txd_req(skb);
1191 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1192 mtk_stop_queue(eth);
1193 netif_err(eth, tx_queued, dev,
1194 "Tx Ring full when queue awake!\n");
1195 spin_unlock(ð->page_lock);
1196 return NETDEV_TX_BUSY;
1199 /* TSO: fill MSS info in tcp checksum field */
1200 if (skb_is_gso(skb)) {
1201 if (skb_cow_head(skb, 0)) {
1202 netif_warn(eth, tx_err, dev,
1203 "GSO expand head fail.\n");
1207 if (skb_shinfo(skb)->gso_type &
1208 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1210 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1214 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1217 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1218 mtk_stop_queue(eth);
1220 spin_unlock(ð->page_lock);
1222 return NETDEV_TX_OK;
1225 spin_unlock(ð->page_lock);
1226 stats->tx_dropped++;
1227 dev_kfree_skb_any(skb);
1228 return NETDEV_TX_OK;
1231 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1234 struct mtk_rx_ring *ring;
1238 return ð->rx_ring[0];
1240 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1241 ring = ð->rx_ring[i];
1242 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1243 if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
1244 ring->calc_idx_update = true;
1252 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1254 struct mtk_rx_ring *ring;
1258 ring = ð->rx_ring[0];
1259 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1261 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1262 ring = ð->rx_ring[i];
1263 if (ring->calc_idx_update) {
1264 ring->calc_idx_update = false;
1265 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1271 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1272 struct mtk_eth *eth)
1274 struct mtk_rx_ring *ring;
1276 struct sk_buff *skb;
1277 u8 *data, *new_data;
1278 struct mtk_rx_dma *rxd, trxd;
1281 while (done < budget) {
1282 struct net_device *netdev;
1283 unsigned int pktlen;
1284 dma_addr_t dma_addr;
1287 ring = mtk_get_rx_ring(eth);
1288 if (unlikely(!ring))
1291 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1292 rxd = &ring->dma[idx];
1293 data = ring->data[idx];
1295 mtk_rx_get_desc(&trxd, rxd);
1296 if (!(trxd.rxd2 & RX_DMA_DONE))
1299 /* find out which mac the packet come from. values start at 1 */
1300 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1303 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1308 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1312 netdev = eth->netdev[mac];
1314 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1317 /* alloc new buffer */
1318 if (ring->frag_size <= PAGE_SIZE)
1319 new_data = napi_alloc_frag(ring->frag_size);
1321 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
1322 if (unlikely(!new_data)) {
1323 netdev->stats.rx_dropped++;
1326 dma_addr = dma_map_single(eth->dev,
1327 new_data + NET_SKB_PAD +
1331 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1332 skb_free_frag(new_data);
1333 netdev->stats.rx_dropped++;
1338 skb = build_skb(data, ring->frag_size);
1339 if (unlikely(!skb)) {
1340 skb_free_frag(new_data);
1341 netdev->stats.rx_dropped++;
1344 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1346 dma_unmap_single(eth->dev, trxd.rxd1,
1347 ring->buf_size, DMA_FROM_DEVICE);
1348 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1350 skb_put(skb, pktlen);
1351 if (trxd.rxd4 & eth->rx_dma_l4_valid)
1352 skb->ip_summed = CHECKSUM_UNNECESSARY;
1354 skb_checksum_none_assert(skb);
1355 skb->protocol = eth_type_trans(skb, netdev);
1357 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1358 (trxd.rxd2 & RX_DMA_VTAG))
1359 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1360 RX_DMA_VID(trxd.rxd3));
1361 skb_record_rx_queue(skb, 0);
1362 napi_gro_receive(napi, skb);
1364 ring->data[idx] = new_data;
1365 rxd->rxd1 = (unsigned int)dma_addr;
1368 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1369 rxd->rxd2 = RX_DMA_LSO;
1371 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1373 ring->calc_idx = idx;
1380 /* make sure that all changes to the dma ring are flushed before
1384 mtk_update_rx_cpu_idx(eth);
1390 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
1391 unsigned int *done, unsigned int *bytes)
1393 struct mtk_tx_ring *ring = ð->tx_ring;
1394 struct mtk_tx_dma *desc;
1395 struct sk_buff *skb;
1396 struct mtk_tx_buf *tx_buf;
1399 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1400 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1402 desc = mtk_qdma_phys_to_virt(ring, cpu);
1404 while ((cpu != dma) && budget) {
1405 u32 next_cpu = desc->txd2;
1408 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1409 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1412 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1413 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1420 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1421 bytes[mac] += skb->len;
1425 mtk_tx_unmap(eth, tx_buf);
1427 ring->last_free = desc;
1428 atomic_inc(&ring->free_count);
1433 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1438 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
1439 unsigned int *done, unsigned int *bytes)
1441 struct mtk_tx_ring *ring = ð->tx_ring;
1442 struct mtk_tx_dma *desc;
1443 struct sk_buff *skb;
1444 struct mtk_tx_buf *tx_buf;
1447 cpu = ring->cpu_idx;
1448 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1450 while ((cpu != dma) && budget) {
1451 tx_buf = &ring->buf[cpu];
1456 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1457 bytes[0] += skb->len;
1462 mtk_tx_unmap(eth, tx_buf);
1464 desc = &ring->dma[cpu];
1465 ring->last_free = desc;
1466 atomic_inc(&ring->free_count);
1468 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1471 ring->cpu_idx = cpu;
1476 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1478 struct mtk_tx_ring *ring = ð->tx_ring;
1479 unsigned int done[MTK_MAX_DEVS];
1480 unsigned int bytes[MTK_MAX_DEVS];
1483 memset(done, 0, sizeof(done));
1484 memset(bytes, 0, sizeof(bytes));
1486 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1487 budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
1489 budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
1491 for (i = 0; i < MTK_MAC_COUNT; i++) {
1492 if (!eth->netdev[i] || !done[i])
1494 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1498 if (mtk_queue_stopped(eth) &&
1499 (atomic_read(&ring->free_count) > ring->thresh))
1500 mtk_wake_queue(eth);
1505 static void mtk_handle_status_irq(struct mtk_eth *eth)
1507 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1509 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1510 mtk_stats_update(eth);
1511 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1516 static int mtk_napi_tx(struct napi_struct *napi, int budget)
1518 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1522 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1523 mtk_handle_status_irq(eth);
1524 mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1525 tx_done = mtk_poll_tx(eth, budget);
1527 if (unlikely(netif_msg_intr(eth))) {
1528 status = mtk_r32(eth, eth->tx_int_status_reg);
1529 mask = mtk_r32(eth, eth->tx_int_mask_reg);
1531 "done tx %d, intr 0x%08x/0x%x\n",
1532 tx_done, status, mask);
1535 if (tx_done == budget)
1538 status = mtk_r32(eth, eth->tx_int_status_reg);
1539 if (status & MTK_TX_DONE_INT)
1542 napi_complete(napi);
1543 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1548 static int mtk_napi_rx(struct napi_struct *napi, int budget)
1550 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1553 int remain_budget = budget;
1555 mtk_handle_status_irq(eth);
1558 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1559 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1561 if (unlikely(netif_msg_intr(eth))) {
1562 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1563 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1565 "done rx %d, intr 0x%08x/0x%x\n",
1566 rx_done, status, mask);
1568 if (rx_done == remain_budget)
1571 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1572 if (status & MTK_RX_DONE_INT) {
1573 remain_budget -= rx_done;
1576 napi_complete(napi);
1577 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1579 return rx_done + budget - remain_budget;
1582 static int mtk_tx_alloc(struct mtk_eth *eth)
1584 struct mtk_tx_ring *ring = ð->tx_ring;
1585 int i, sz = sizeof(*ring->dma);
1587 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1592 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1593 &ring->phys, GFP_ATOMIC);
1597 for (i = 0; i < MTK_DMA_SIZE; i++) {
1598 int next = (i + 1) % MTK_DMA_SIZE;
1599 u32 next_ptr = ring->phys + next * sz;
1601 ring->dma[i].txd2 = next_ptr;
1602 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1605 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1606 * only as the framework. The real HW descriptors are the PDMA
1607 * descriptors in ring->dma_pdma.
1609 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1610 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1613 if (!ring->dma_pdma)
1616 for (i = 0; i < MTK_DMA_SIZE; i++) {
1617 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
1618 ring->dma_pdma[i].txd4 = 0;
1622 ring->dma_size = MTK_DMA_SIZE;
1623 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1624 ring->next_free = &ring->dma[0];
1625 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1626 ring->thresh = MAX_SKB_FRAGS;
1628 /* make sure that all changes to the dma ring are flushed before we
1633 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1634 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1635 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1637 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1640 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1642 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1645 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1646 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1647 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1648 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1657 static void mtk_tx_clean(struct mtk_eth *eth)
1659 struct mtk_tx_ring *ring = ð->tx_ring;
1663 for (i = 0; i < MTK_DMA_SIZE; i++)
1664 mtk_tx_unmap(eth, &ring->buf[i]);
1670 dma_free_coherent(eth->dev,
1671 MTK_DMA_SIZE * sizeof(*ring->dma),
1677 if (ring->dma_pdma) {
1678 dma_free_coherent(eth->dev,
1679 MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
1682 ring->dma_pdma = NULL;
1686 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1688 struct mtk_rx_ring *ring;
1689 int rx_data_len, rx_dma_size;
1693 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1696 ring = ð->rx_ring_qdma;
1699 ring = ð->rx_ring[ring_no];
1702 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1703 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1704 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1706 rx_data_len = ETH_DATA_LEN;
1707 rx_dma_size = MTK_DMA_SIZE;
1710 ring->frag_size = mtk_max_frag_size(rx_data_len);
1711 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1712 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1717 for (i = 0; i < rx_dma_size; i++) {
1718 if (ring->frag_size <= PAGE_SIZE)
1719 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1721 ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
1726 ring->dma = dma_alloc_coherent(eth->dev,
1727 rx_dma_size * sizeof(*ring->dma),
1728 &ring->phys, GFP_ATOMIC);
1732 for (i = 0; i < rx_dma_size; i++) {
1733 dma_addr_t dma_addr = dma_map_single(eth->dev,
1734 ring->data[i] + NET_SKB_PAD + eth->ip_align,
1737 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1739 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1741 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1742 ring->dma[i].rxd2 = RX_DMA_LSO;
1744 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1746 ring->dma_size = rx_dma_size;
1747 ring->calc_idx_update = false;
1748 ring->calc_idx = rx_dma_size - 1;
1749 ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1750 /* make sure that all changes to the dma ring are flushed before we
1755 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
1756 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
1757 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
1758 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
1763 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
1767 if (ring->data && ring->dma) {
1768 for (i = 0; i < ring->dma_size; i++) {
1771 if (!ring->dma[i].rxd1)
1773 dma_unmap_single(eth->dev,
1777 skb_free_frag(ring->data[i]);
1784 dma_free_coherent(eth->dev,
1785 ring->dma_size * sizeof(*ring->dma),
1792 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1795 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1796 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1798 /* set LRO rings to auto-learn modes */
1799 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1801 /* validate LRO ring */
1802 ring_ctrl_dw2 |= MTK_RING_VLD;
1804 /* set AGE timer (unit: 20us) */
1805 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1806 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1808 /* set max AGG timer (unit: 20us) */
1809 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1811 /* set max LRO AGG count */
1812 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1813 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1815 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1816 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1817 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1818 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1821 /* IPv4 checksum update enable */
1822 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1824 /* switch priority comparison to packet count mode */
1825 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1827 /* bandwidth threshold setting */
1828 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1830 /* auto-learn score delta setting */
1831 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1833 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1834 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1835 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1837 /* set HW LRO mode & the max aggregation count for rx packets */
1838 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1840 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
1841 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1844 lro_ctrl_dw0 |= MTK_LRO_EN;
1846 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1847 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1852 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1857 /* relinquish lro rings, flush aggregated packets */
1858 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1860 /* wait for relinquishments done */
1861 for (i = 0; i < 10; i++) {
1862 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1863 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1870 /* invalidate lro rings */
1871 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1872 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1874 /* disable HW LRO */
1875 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1878 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1882 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1884 /* invalidate the IP setting */
1885 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1887 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1889 /* validate the IP setting */
1890 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1893 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1897 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1899 /* invalidate the IP setting */
1900 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1902 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1905 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1910 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1911 if (mac->hwlro_ip[i])
1918 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1919 struct ethtool_rxnfc *cmd)
1921 struct ethtool_rx_flow_spec *fsp =
1922 (struct ethtool_rx_flow_spec *)&cmd->fs;
1923 struct mtk_mac *mac = netdev_priv(dev);
1924 struct mtk_eth *eth = mac->hw;
1927 if ((fsp->flow_type != TCP_V4_FLOW) ||
1928 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1929 (fsp->location > 1))
1932 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1933 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1935 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1937 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1942 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1943 struct ethtool_rxnfc *cmd)
1945 struct ethtool_rx_flow_spec *fsp =
1946 (struct ethtool_rx_flow_spec *)&cmd->fs;
1947 struct mtk_mac *mac = netdev_priv(dev);
1948 struct mtk_eth *eth = mac->hw;
1951 if (fsp->location > 1)
1954 mac->hwlro_ip[fsp->location] = 0;
1955 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1957 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1959 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1964 static void mtk_hwlro_netdev_disable(struct net_device *dev)
1966 struct mtk_mac *mac = netdev_priv(dev);
1967 struct mtk_eth *eth = mac->hw;
1970 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1971 mac->hwlro_ip[i] = 0;
1972 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1974 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1977 mac->hwlro_ip_cnt = 0;
1980 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1981 struct ethtool_rxnfc *cmd)
1983 struct mtk_mac *mac = netdev_priv(dev);
1984 struct ethtool_rx_flow_spec *fsp =
1985 (struct ethtool_rx_flow_spec *)&cmd->fs;
1987 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
1990 /* only tcp dst ipv4 is meaningful, others are meaningless */
1991 fsp->flow_type = TCP_V4_FLOW;
1992 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1993 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1995 fsp->h_u.tcp_ip4_spec.ip4src = 0;
1996 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1997 fsp->h_u.tcp_ip4_spec.psrc = 0;
1998 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1999 fsp->h_u.tcp_ip4_spec.pdst = 0;
2000 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2001 fsp->h_u.tcp_ip4_spec.tos = 0;
2002 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2007 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2008 struct ethtool_rxnfc *cmd,
2011 struct mtk_mac *mac = netdev_priv(dev);
2015 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2016 if (cnt == cmd->rule_cnt)
2019 if (mac->hwlro_ip[i]) {
2025 cmd->rule_cnt = cnt;
2030 static netdev_features_t mtk_fix_features(struct net_device *dev,
2031 netdev_features_t features)
2033 if (!(features & NETIF_F_LRO)) {
2034 struct mtk_mac *mac = netdev_priv(dev);
2035 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2038 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2040 features |= NETIF_F_LRO;
2047 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2051 if (!((dev->features ^ features) & NETIF_F_LRO))
2054 if (!(features & NETIF_F_LRO))
2055 mtk_hwlro_netdev_disable(dev);
2060 /* wait for DMA to finish whatever it is doing before we start using it again */
2061 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2063 unsigned long t_start = jiffies;
2066 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2067 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2068 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2071 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2072 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2076 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2080 dev_err(eth->dev, "DMA init timeout\n");
2084 static int mtk_dma_init(struct mtk_eth *eth)
2089 if (mtk_dma_busy_wait(eth))
2092 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2093 /* QDMA needs scratch memory for internal reordering of the
2096 err = mtk_init_fq_dma(eth);
2101 err = mtk_tx_alloc(eth);
2105 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2106 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2111 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2116 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2117 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2121 err = mtk_hwlro_rx_init(eth);
2126 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2127 /* Enable random early drop and set drop threshold
2130 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2131 FC_THRES_MIN, MTK_QDMA_FC_THRES);
2132 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2138 static void mtk_dma_free(struct mtk_eth *eth)
2142 for (i = 0; i < MTK_MAC_COUNT; i++)
2144 netdev_reset_queue(eth->netdev[i]);
2145 if (eth->scratch_ring) {
2146 dma_free_coherent(eth->dev,
2147 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
2149 eth->phy_scratch_ring);
2150 eth->scratch_ring = NULL;
2151 eth->phy_scratch_ring = 0;
2154 mtk_rx_clean(eth, ð->rx_ring[0]);
2155 mtk_rx_clean(eth, ð->rx_ring_qdma);
2158 mtk_hwlro_rx_uninit(eth);
2159 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2160 mtk_rx_clean(eth, ð->rx_ring[i]);
2163 kfree(eth->scratch_head);
2166 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
2168 struct mtk_mac *mac = netdev_priv(dev);
2169 struct mtk_eth *eth = mac->hw;
2171 eth->netdev[mac->id]->stats.tx_errors++;
2172 netif_err(eth, tx_err, dev,
2173 "transmit timed out\n");
2174 schedule_work(ð->pending_work);
2177 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
2179 struct mtk_eth *eth = _eth;
2181 if (likely(napi_schedule_prep(ð->rx_napi))) {
2182 __napi_schedule(ð->rx_napi);
2183 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2189 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2191 struct mtk_eth *eth = _eth;
2193 if (likely(napi_schedule_prep(ð->tx_napi))) {
2194 __napi_schedule(ð->tx_napi);
2195 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2201 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2203 struct mtk_eth *eth = _eth;
2205 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
2206 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
2207 mtk_handle_irq_rx(irq, _eth);
2209 if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2210 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2211 mtk_handle_irq_tx(irq, _eth);
2217 #ifdef CONFIG_NET_POLL_CONTROLLER
2218 static void mtk_poll_controller(struct net_device *dev)
2220 struct mtk_mac *mac = netdev_priv(dev);
2221 struct mtk_eth *eth = mac->hw;
2223 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2224 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2225 mtk_handle_irq_rx(eth->irq[2], dev);
2226 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2227 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2231 static int mtk_start_dma(struct mtk_eth *eth)
2233 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
2236 err = mtk_dma_init(eth);
2242 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2244 MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
2245 MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
2246 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2251 MTK_RX_DMA_EN | rx_2b_offset |
2252 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2255 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2256 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2263 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
2267 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2270 for (i = 0; i < MTK_MAC_COUNT; i++) {
2271 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2273 /* default setup the forward port to send frame to PDMA */
2276 /* Enable RX checksum */
2277 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2281 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2283 /* Reset and enable PSE */
2284 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2285 mtk_w32(eth, 0, MTK_RST_GL);
2288 static int mtk_open(struct net_device *dev)
2290 struct mtk_mac *mac = netdev_priv(dev);
2291 struct mtk_eth *eth = mac->hw;
2294 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2296 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2301 /* we run 2 netdevs on the same dma ring so we only bring it up once */
2302 if (!refcount_read(ð->dma_refcnt)) {
2303 int err = mtk_start_dma(eth);
2306 phylink_disconnect_phy(mac->phylink);
2310 mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
2312 napi_enable(ð->tx_napi);
2313 napi_enable(ð->rx_napi);
2314 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2315 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2316 refcount_set(ð->dma_refcnt, 1);
2319 refcount_inc(ð->dma_refcnt);
2321 phylink_start(mac->phylink);
2322 netif_start_queue(dev);
2326 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2331 /* stop the dma engine */
2332 spin_lock_bh(ð->page_lock);
2333 val = mtk_r32(eth, glo_cfg);
2334 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2336 spin_unlock_bh(ð->page_lock);
2338 /* wait for dma stop */
2339 for (i = 0; i < 10; i++) {
2340 val = mtk_r32(eth, glo_cfg);
2341 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
2349 static int mtk_stop(struct net_device *dev)
2351 struct mtk_mac *mac = netdev_priv(dev);
2352 struct mtk_eth *eth = mac->hw;
2354 phylink_stop(mac->phylink);
2356 netif_tx_disable(dev);
2358 phylink_disconnect_phy(mac->phylink);
2360 /* only shutdown DMA if this is the last user */
2361 if (!refcount_dec_and_test(ð->dma_refcnt))
2364 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2366 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2367 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2368 napi_disable(ð->tx_napi);
2369 napi_disable(ð->rx_napi);
2371 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2372 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2373 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2380 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
2382 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2386 usleep_range(1000, 1100);
2387 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2393 static void mtk_clk_disable(struct mtk_eth *eth)
2397 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2398 clk_disable_unprepare(eth->clks[clk]);
2401 static int mtk_clk_enable(struct mtk_eth *eth)
2405 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2406 ret = clk_prepare_enable(eth->clks[clk]);
2408 goto err_disable_clks;
2415 clk_disable_unprepare(eth->clks[clk]);
2420 static int mtk_hw_init(struct mtk_eth *eth)
2424 if (test_and_set_bit(MTK_HW_INIT, ð->state))
2427 pm_runtime_enable(eth->dev);
2428 pm_runtime_get_sync(eth->dev);
2430 ret = mtk_clk_enable(eth);
2432 goto err_disable_pm;
2434 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2435 ret = device_reset(eth->dev);
2437 dev_err(eth->dev, "MAC reset failed!\n");
2438 goto err_disable_pm;
2441 /* enable interrupt delay for RX */
2442 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2444 /* disable delay and normal interrupt */
2445 mtk_tx_irq_disable(eth, ~0);
2446 mtk_rx_irq_disable(eth, ~0);
2451 /* Non-MT7628 handling... */
2452 ethsys_reset(eth, RSTCTRL_FE);
2453 ethsys_reset(eth, RSTCTRL_PPE);
2456 /* Set GE2 driving and slew rate */
2457 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2460 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
2463 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
2466 /* Set linkdown as the default for each GMAC. Its own MCR would be set
2467 * up with the more appropriate value when mtk_mac_config call is being
2470 for (i = 0; i < MTK_MAC_COUNT; i++)
2471 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
2473 /* Indicates CDM to parse the MTK special tag from CPU
2474 * which also is working out for untag packets.
2476 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2477 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2479 /* Enable RX VLan Offloading */
2480 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2482 /* enable interrupt delay for RX */
2483 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2485 /* disable delay and normal interrupt */
2486 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
2487 mtk_tx_irq_disable(eth, ~0);
2488 mtk_rx_irq_disable(eth, ~0);
2490 /* FE int grouping */
2491 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
2492 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
2493 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
2494 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
2495 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
2500 pm_runtime_put_sync(eth->dev);
2501 pm_runtime_disable(eth->dev);
2506 static int mtk_hw_deinit(struct mtk_eth *eth)
2508 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
2511 mtk_clk_disable(eth);
2513 pm_runtime_put_sync(eth->dev);
2514 pm_runtime_disable(eth->dev);
2519 static int __init mtk_init(struct net_device *dev)
2521 struct mtk_mac *mac = netdev_priv(dev);
2522 struct mtk_eth *eth = mac->hw;
2523 const char *mac_addr;
2525 mac_addr = of_get_mac_address(mac->of_node);
2526 if (!IS_ERR(mac_addr))
2527 ether_addr_copy(dev->dev_addr, mac_addr);
2529 /* If the mac address is invalid, use random mac address */
2530 if (!is_valid_ether_addr(dev->dev_addr)) {
2531 eth_hw_addr_random(dev);
2532 dev_err(eth->dev, "generated random MAC address %pM\n",
2539 static void mtk_uninit(struct net_device *dev)
2541 struct mtk_mac *mac = netdev_priv(dev);
2542 struct mtk_eth *eth = mac->hw;
2544 phylink_disconnect_phy(mac->phylink);
2545 mtk_tx_irq_disable(eth, ~0);
2546 mtk_rx_irq_disable(eth, ~0);
2549 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2551 struct mtk_mac *mac = netdev_priv(dev);
2557 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
2565 static void mtk_pending_work(struct work_struct *work)
2567 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2569 unsigned long restart = 0;
2573 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2575 while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
2578 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
2579 /* stop all devices to make sure that dma is properly shut down */
2580 for (i = 0; i < MTK_MAC_COUNT; i++) {
2581 if (!eth->netdev[i])
2583 mtk_stop(eth->netdev[i]);
2584 __set_bit(i, &restart);
2586 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
2588 /* restart underlying hardware such as power, clock, pin mux
2589 * and the connected phy
2594 pinctrl_select_state(eth->dev->pins->p,
2595 eth->dev->pins->default_state);
2598 /* restart DMA and enable IRQs */
2599 for (i = 0; i < MTK_MAC_COUNT; i++) {
2600 if (!test_bit(i, &restart))
2602 err = mtk_open(eth->netdev[i]);
2604 netif_alert(eth, ifup, eth->netdev[i],
2605 "Driver up/down cycle failed, closing device.\n");
2606 dev_close(eth->netdev[i]);
2610 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
2612 clear_bit_unlock(MTK_RESETTING, ð->state);
2617 static int mtk_free_dev(struct mtk_eth *eth)
2621 for (i = 0; i < MTK_MAC_COUNT; i++) {
2622 if (!eth->netdev[i])
2624 free_netdev(eth->netdev[i]);
2630 static int mtk_unreg_dev(struct mtk_eth *eth)
2634 for (i = 0; i < MTK_MAC_COUNT; i++) {
2635 if (!eth->netdev[i])
2637 unregister_netdev(eth->netdev[i]);
2643 static int mtk_cleanup(struct mtk_eth *eth)
2647 cancel_work_sync(ð->pending_work);
2652 static int mtk_get_link_ksettings(struct net_device *ndev,
2653 struct ethtool_link_ksettings *cmd)
2655 struct mtk_mac *mac = netdev_priv(ndev);
2657 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2660 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
2663 static int mtk_set_link_ksettings(struct net_device *ndev,
2664 const struct ethtool_link_ksettings *cmd)
2666 struct mtk_mac *mac = netdev_priv(ndev);
2668 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2671 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
2674 static void mtk_get_drvinfo(struct net_device *dev,
2675 struct ethtool_drvinfo *info)
2677 struct mtk_mac *mac = netdev_priv(dev);
2679 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2680 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2681 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2684 static u32 mtk_get_msglevel(struct net_device *dev)
2686 struct mtk_mac *mac = netdev_priv(dev);
2688 return mac->hw->msg_enable;
2691 static void mtk_set_msglevel(struct net_device *dev, u32 value)
2693 struct mtk_mac *mac = netdev_priv(dev);
2695 mac->hw->msg_enable = value;
2698 static int mtk_nway_reset(struct net_device *dev)
2700 struct mtk_mac *mac = netdev_priv(dev);
2702 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2708 return phylink_ethtool_nway_reset(mac->phylink);
2711 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2715 switch (stringset) {
2717 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2718 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2719 data += ETH_GSTRING_LEN;
2725 static int mtk_get_sset_count(struct net_device *dev, int sset)
2729 return ARRAY_SIZE(mtk_ethtool_stats);
2735 static void mtk_get_ethtool_stats(struct net_device *dev,
2736 struct ethtool_stats *stats, u64 *data)
2738 struct mtk_mac *mac = netdev_priv(dev);
2739 struct mtk_hw_stats *hwstats = mac->hw_stats;
2740 u64 *data_src, *data_dst;
2744 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2747 if (netif_running(dev) && netif_device_present(dev)) {
2748 if (spin_trylock_bh(&hwstats->stats_lock)) {
2749 mtk_stats_update_mac(mac);
2750 spin_unlock_bh(&hwstats->stats_lock);
2754 data_src = (u64 *)hwstats;
2758 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2760 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2761 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2762 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2765 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2768 int ret = -EOPNOTSUPP;
2771 case ETHTOOL_GRXRINGS:
2772 if (dev->hw_features & NETIF_F_LRO) {
2773 cmd->data = MTK_MAX_RX_RING_NUM;
2777 case ETHTOOL_GRXCLSRLCNT:
2778 if (dev->hw_features & NETIF_F_LRO) {
2779 struct mtk_mac *mac = netdev_priv(dev);
2781 cmd->rule_cnt = mac->hwlro_ip_cnt;
2785 case ETHTOOL_GRXCLSRULE:
2786 if (dev->hw_features & NETIF_F_LRO)
2787 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2789 case ETHTOOL_GRXCLSRLALL:
2790 if (dev->hw_features & NETIF_F_LRO)
2791 ret = mtk_hwlro_get_fdir_all(dev, cmd,
2801 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2803 int ret = -EOPNOTSUPP;
2806 case ETHTOOL_SRXCLSRLINS:
2807 if (dev->hw_features & NETIF_F_LRO)
2808 ret = mtk_hwlro_add_ipaddr(dev, cmd);
2810 case ETHTOOL_SRXCLSRLDEL:
2811 if (dev->hw_features & NETIF_F_LRO)
2812 ret = mtk_hwlro_del_ipaddr(dev, cmd);
2821 static const struct ethtool_ops mtk_ethtool_ops = {
2822 .get_link_ksettings = mtk_get_link_ksettings,
2823 .set_link_ksettings = mtk_set_link_ksettings,
2824 .get_drvinfo = mtk_get_drvinfo,
2825 .get_msglevel = mtk_get_msglevel,
2826 .set_msglevel = mtk_set_msglevel,
2827 .nway_reset = mtk_nway_reset,
2828 .get_link = ethtool_op_get_link,
2829 .get_strings = mtk_get_strings,
2830 .get_sset_count = mtk_get_sset_count,
2831 .get_ethtool_stats = mtk_get_ethtool_stats,
2832 .get_rxnfc = mtk_get_rxnfc,
2833 .set_rxnfc = mtk_set_rxnfc,
2836 static const struct net_device_ops mtk_netdev_ops = {
2837 .ndo_init = mtk_init,
2838 .ndo_uninit = mtk_uninit,
2839 .ndo_open = mtk_open,
2840 .ndo_stop = mtk_stop,
2841 .ndo_start_xmit = mtk_start_xmit,
2842 .ndo_set_mac_address = mtk_set_mac_address,
2843 .ndo_validate_addr = eth_validate_addr,
2844 .ndo_do_ioctl = mtk_do_ioctl,
2845 .ndo_tx_timeout = mtk_tx_timeout,
2846 .ndo_get_stats64 = mtk_get_stats64,
2847 .ndo_fix_features = mtk_fix_features,
2848 .ndo_set_features = mtk_set_features,
2849 #ifdef CONFIG_NET_POLL_CONTROLLER
2850 .ndo_poll_controller = mtk_poll_controller,
2854 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2856 const __be32 *_id = of_get_property(np, "reg", NULL);
2857 phy_interface_t phy_mode;
2858 struct phylink *phylink;
2859 struct mtk_mac *mac;
2863 dev_err(eth->dev, "missing mac id\n");
2867 id = be32_to_cpup(_id);
2868 if (id >= MTK_MAC_COUNT) {
2869 dev_err(eth->dev, "%d is not a valid mac id\n", id);
2873 if (eth->netdev[id]) {
2874 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2878 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2879 if (!eth->netdev[id]) {
2880 dev_err(eth->dev, "alloc_etherdev failed\n");
2883 mac = netdev_priv(eth->netdev[id]);
2889 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2890 mac->hwlro_ip_cnt = 0;
2892 mac->hw_stats = devm_kzalloc(eth->dev,
2893 sizeof(*mac->hw_stats),
2895 if (!mac->hw_stats) {
2896 dev_err(eth->dev, "failed to allocate counter memory\n");
2900 spin_lock_init(&mac->hw_stats->stats_lock);
2901 u64_stats_init(&mac->hw_stats->syncp);
2902 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2904 /* phylink create */
2905 err = of_get_phy_mode(np, &phy_mode);
2907 dev_err(eth->dev, "incorrect phy-mode\n");
2911 /* mac config is not set */
2912 mac->interface = PHY_INTERFACE_MODE_NA;
2913 mac->mode = MLO_AN_PHY;
2914 mac->speed = SPEED_UNKNOWN;
2916 mac->phylink_config.dev = ð->netdev[id]->dev;
2917 mac->phylink_config.type = PHYLINK_NETDEV;
2919 phylink = phylink_create(&mac->phylink_config,
2920 of_fwnode_handle(mac->of_node),
2921 phy_mode, &mtk_phylink_ops);
2922 if (IS_ERR(phylink)) {
2923 err = PTR_ERR(phylink);
2927 mac->phylink = phylink;
2929 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2930 eth->netdev[id]->watchdog_timeo = 5 * HZ;
2931 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2932 eth->netdev[id]->base_addr = (unsigned long)eth->base;
2934 eth->netdev[id]->hw_features = eth->soc->hw_features;
2936 eth->netdev[id]->hw_features |= NETIF_F_LRO;
2938 eth->netdev[id]->vlan_features = eth->soc->hw_features &
2939 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2940 eth->netdev[id]->features |= eth->soc->hw_features;
2941 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
2943 eth->netdev[id]->irq = eth->irq[0];
2944 eth->netdev[id]->dev.of_node = np;
2946 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
2951 free_netdev(eth->netdev[id]);
2955 static int mtk_probe(struct platform_device *pdev)
2957 struct device_node *mac_np;
2958 struct mtk_eth *eth;
2961 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2965 eth->soc = of_device_get_match_data(&pdev->dev);
2967 eth->dev = &pdev->dev;
2968 eth->base = devm_platform_ioremap_resource(pdev, 0);
2969 if (IS_ERR(eth->base))
2970 return PTR_ERR(eth->base);
2972 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2973 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
2974 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
2976 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
2977 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
2980 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2981 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
2982 eth->ip_align = NET_IP_ALIGN;
2984 eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
2987 spin_lock_init(ð->page_lock);
2988 spin_lock_init(ð->tx_irq_lock);
2989 spin_lock_init(ð->rx_irq_lock);
2991 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2992 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2994 if (IS_ERR(eth->ethsys)) {
2995 dev_err(&pdev->dev, "no ethsys regmap found\n");
2996 return PTR_ERR(eth->ethsys);
3000 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
3001 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3002 "mediatek,infracfg");
3003 if (IS_ERR(eth->infra)) {
3004 dev_err(&pdev->dev, "no infracfg regmap found\n");
3005 return PTR_ERR(eth->infra);
3009 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
3010 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
3015 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
3016 eth->soc->ana_rgc3);
3022 if (eth->soc->required_pctl) {
3023 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3025 if (IS_ERR(eth->pctl)) {
3026 dev_err(&pdev->dev, "no pctl regmap found\n");
3027 return PTR_ERR(eth->pctl);
3031 for (i = 0; i < 3; i++) {
3032 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
3033 eth->irq[i] = eth->irq[0];
3035 eth->irq[i] = platform_get_irq(pdev, i);
3036 if (eth->irq[i] < 0) {
3037 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3041 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
3042 eth->clks[i] = devm_clk_get(eth->dev,
3043 mtk_clks_source_name[i]);
3044 if (IS_ERR(eth->clks[i])) {
3045 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
3046 return -EPROBE_DEFER;
3047 if (eth->soc->required_clks & BIT(i)) {
3048 dev_err(&pdev->dev, "clock %s not found\n",
3049 mtk_clks_source_name[i]);
3052 eth->clks[i] = NULL;
3056 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
3057 INIT_WORK(ð->pending_work, mtk_pending_work);
3059 err = mtk_hw_init(eth);
3063 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
3065 for_each_child_of_node(pdev->dev.of_node, mac_np) {
3066 if (!of_device_is_compatible(mac_np,
3067 "mediatek,eth-mac"))
3070 if (!of_device_is_available(mac_np))
3073 err = mtk_add_mac(eth, mac_np);
3075 of_node_put(mac_np);
3080 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3081 err = devm_request_irq(eth->dev, eth->irq[0],
3083 dev_name(eth->dev), eth);
3085 err = devm_request_irq(eth->dev, eth->irq[1],
3086 mtk_handle_irq_tx, 0,
3087 dev_name(eth->dev), eth);
3091 err = devm_request_irq(eth->dev, eth->irq[2],
3092 mtk_handle_irq_rx, 0,
3093 dev_name(eth->dev), eth);
3098 /* No MT7628/88 support yet */
3099 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3100 err = mtk_mdio_init(eth);
3105 for (i = 0; i < MTK_MAX_DEVS; i++) {
3106 if (!eth->netdev[i])
3109 err = register_netdev(eth->netdev[i]);
3111 dev_err(eth->dev, "error bringing up device\n");
3112 goto err_deinit_mdio;
3114 netif_info(eth, probe, eth->netdev[i],
3115 "mediatek frame engine at 0x%08lx, irq %d\n",
3116 eth->netdev[i]->base_addr, eth->irq[0]);
3119 /* we run 2 devices on the same DMA ring so we need a dummy device
3122 init_dummy_netdev(ð->dummy_dev);
3123 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
3125 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
3128 platform_set_drvdata(pdev, eth);
3133 mtk_mdio_cleanup(eth);
3142 static int mtk_remove(struct platform_device *pdev)
3144 struct mtk_eth *eth = platform_get_drvdata(pdev);
3145 struct mtk_mac *mac;
3148 /* stop all devices to make sure that dma is properly shut down */
3149 for (i = 0; i < MTK_MAC_COUNT; i++) {
3150 if (!eth->netdev[i])
3152 mtk_stop(eth->netdev[i]);
3153 mac = netdev_priv(eth->netdev[i]);
3154 phylink_disconnect_phy(mac->phylink);
3159 netif_napi_del(ð->tx_napi);
3160 netif_napi_del(ð->rx_napi);
3162 mtk_mdio_cleanup(eth);
3167 static const struct mtk_soc_data mt2701_data = {
3168 .caps = MT7623_CAPS | MTK_HWLRO,
3169 .hw_features = MTK_HW_FEATURES,
3170 .required_clks = MT7623_CLKS_BITMAP,
3171 .required_pctl = true,
3174 static const struct mtk_soc_data mt7621_data = {
3175 .caps = MT7621_CAPS,
3176 .hw_features = MTK_HW_FEATURES,
3177 .required_clks = MT7621_CLKS_BITMAP,
3178 .required_pctl = false,
3181 static const struct mtk_soc_data mt7622_data = {
3183 .caps = MT7622_CAPS | MTK_HWLRO,
3184 .hw_features = MTK_HW_FEATURES,
3185 .required_clks = MT7622_CLKS_BITMAP,
3186 .required_pctl = false,
3189 static const struct mtk_soc_data mt7623_data = {
3190 .caps = MT7623_CAPS | MTK_HWLRO,
3191 .hw_features = MTK_HW_FEATURES,
3192 .required_clks = MT7623_CLKS_BITMAP,
3193 .required_pctl = true,
3196 static const struct mtk_soc_data mt7629_data = {
3198 .caps = MT7629_CAPS | MTK_HWLRO,
3199 .hw_features = MTK_HW_FEATURES,
3200 .required_clks = MT7629_CLKS_BITMAP,
3201 .required_pctl = false,
3204 static const struct mtk_soc_data rt5350_data = {
3205 .caps = MT7628_CAPS,
3206 .hw_features = MTK_HW_FEATURES_MT7628,
3207 .required_clks = MT7628_CLKS_BITMAP,
3208 .required_pctl = false,
3211 const struct of_device_id of_mtk_match[] = {
3212 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
3213 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
3214 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
3215 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
3216 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
3217 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
3220 MODULE_DEVICE_TABLE(of, of_mtk_match);
3222 static struct platform_driver mtk_driver = {
3224 .remove = mtk_remove,
3226 .name = "mtk_soc_eth",
3227 .of_match_table = of_mtk_match,
3231 module_platform_driver(mtk_driver);
3233 MODULE_LICENSE("GPL");
3234 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
3235 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");