1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
15 #include <linux/of_device.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/regmap.h>
20 #include <linux/clk.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/if_vlan.h>
23 #include <linux/reset.h>
24 #include <linux/tcp.h>
26 #include "mtk_eth_soc.h"
28 static int mtk_msg_level = -1;
29 module_param_named(msg_level, mtk_msg_level, int, 0);
30 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
32 #define MTK_ETHTOOL_STAT(x) { #x, \
33 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
35 /* strings used by ethtool */
36 static const struct mtk_ethtool_stats {
37 char str[ETH_GSTRING_LEN];
39 } mtk_ethtool_stats[] = {
40 MTK_ETHTOOL_STAT(tx_bytes),
41 MTK_ETHTOOL_STAT(tx_packets),
42 MTK_ETHTOOL_STAT(tx_skip),
43 MTK_ETHTOOL_STAT(tx_collisions),
44 MTK_ETHTOOL_STAT(rx_bytes),
45 MTK_ETHTOOL_STAT(rx_packets),
46 MTK_ETHTOOL_STAT(rx_overflow),
47 MTK_ETHTOOL_STAT(rx_fcs_errors),
48 MTK_ETHTOOL_STAT(rx_short_errors),
49 MTK_ETHTOOL_STAT(rx_long_errors),
50 MTK_ETHTOOL_STAT(rx_checksum_errors),
51 MTK_ETHTOOL_STAT(rx_flow_control_packets),
54 static const char * const mtk_clks_source_name[] = {
55 "ethif", "esw", "gp1", "gp2", "trgpll"
58 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
60 __raw_writel(val, eth->base + reg);
63 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
65 return __raw_readl(eth->base + reg);
68 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
70 unsigned long t_start = jiffies;
73 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
75 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
80 dev_err(eth->dev, "mdio: MDIO timeout\n");
84 static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
85 u32 phy_register, u32 write_data)
87 if (mtk_mdio_busy_wait(eth))
92 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
93 (phy_register << PHY_IAC_REG_SHIFT) |
94 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
97 if (mtk_mdio_busy_wait(eth))
103 static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
107 if (mtk_mdio_busy_wait(eth))
110 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
111 (phy_reg << PHY_IAC_REG_SHIFT) |
112 (phy_addr << PHY_IAC_ADDR_SHIFT),
115 if (mtk_mdio_busy_wait(eth))
118 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
123 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
124 int phy_reg, u16 val)
126 struct mtk_eth *eth = bus->priv;
128 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
131 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
133 struct mtk_eth *eth = bus->priv;
135 return _mtk_mdio_read(eth, phy_addr, phy_reg);
138 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
143 val = (speed == SPEED_1000) ?
144 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
145 mtk_w32(eth, val, INTF_MODE);
147 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
148 ETHSYS_TRGMII_CLK_SEL362_5,
149 ETHSYS_TRGMII_CLK_SEL362_5);
151 val = (speed == SPEED_1000) ? 250000000 : 500000000;
152 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
154 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
156 val = (speed == SPEED_1000) ?
157 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
158 mtk_w32(eth, val, TRGMII_RCK_CTRL);
160 val = (speed == SPEED_1000) ?
161 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
162 mtk_w32(eth, val, TRGMII_TCK_CTRL);
165 static void mtk_phy_link_adjust(struct net_device *dev)
167 struct mtk_mac *mac = netdev_priv(dev);
168 u16 lcl_adv = 0, rmt_adv = 0;
170 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
171 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
172 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
175 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
178 switch (dev->phydev->speed) {
180 mcr |= MAC_MCR_SPEED_1000;
183 mcr |= MAC_MCR_SPEED_100;
187 if (mac->id == 0 && !mac->trgmii)
188 mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
190 if (dev->phydev->link)
191 mcr |= MAC_MCR_FORCE_LINK;
193 if (dev->phydev->duplex) {
194 mcr |= MAC_MCR_FORCE_DPX;
196 if (dev->phydev->pause)
197 rmt_adv = LPA_PAUSE_CAP;
198 if (dev->phydev->asym_pause)
199 rmt_adv |= LPA_PAUSE_ASYM;
201 if (dev->phydev->advertising & ADVERTISED_Pause)
202 lcl_adv |= ADVERTISE_PAUSE_CAP;
203 if (dev->phydev->advertising & ADVERTISED_Asym_Pause)
204 lcl_adv |= ADVERTISE_PAUSE_ASYM;
206 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
208 if (flowctrl & FLOW_CTRL_TX)
209 mcr |= MAC_MCR_FORCE_TX_FC;
210 if (flowctrl & FLOW_CTRL_RX)
211 mcr |= MAC_MCR_FORCE_RX_FC;
213 netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
214 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
215 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
218 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
220 if (dev->phydev->link)
221 netif_carrier_on(dev);
223 netif_carrier_off(dev);
226 static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
227 struct device_node *phy_node)
229 struct phy_device *phydev;
232 phy_mode = of_get_phy_mode(phy_node);
234 dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
238 phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
239 mtk_phy_link_adjust, 0, phy_mode);
241 dev_err(eth->dev, "could not connect to PHY\n");
246 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
247 mac->id, phydev_name(phydev), phydev->phy_id,
253 static int mtk_phy_connect(struct net_device *dev)
255 struct mtk_mac *mac = netdev_priv(dev);
257 struct device_node *np;
261 np = of_parse_phandle(mac->of_node, "phy-handle", 0);
262 if (!np && of_phy_is_fixed_link(mac->of_node))
263 if (!of_phy_register_fixed_link(mac->of_node))
264 np = of_node_get(mac->of_node);
268 switch (of_get_phy_mode(np)) {
269 case PHY_INTERFACE_MODE_TRGMII:
271 case PHY_INTERFACE_MODE_RGMII_TXID:
272 case PHY_INTERFACE_MODE_RGMII_RXID:
273 case PHY_INTERFACE_MODE_RGMII_ID:
274 case PHY_INTERFACE_MODE_RGMII:
277 case PHY_INTERFACE_MODE_MII:
280 case PHY_INTERFACE_MODE_REVMII:
283 case PHY_INTERFACE_MODE_RMII:
292 /* put the gmac into the right mode */
293 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
294 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
295 val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
296 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
298 /* couple phydev to net_device */
299 if (mtk_phy_connect_node(eth, mac, np))
302 dev->phydev->autoneg = AUTONEG_ENABLE;
303 dev->phydev->speed = 0;
304 dev->phydev->duplex = 0;
306 if (of_phy_is_fixed_link(mac->of_node))
307 dev->phydev->supported |=
308 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
310 dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
311 SUPPORTED_Asym_Pause;
312 dev->phydev->advertising = dev->phydev->supported |
314 phy_start_aneg(dev->phydev);
321 if (of_phy_is_fixed_link(mac->of_node))
322 of_phy_deregister_fixed_link(mac->of_node);
324 dev_err(eth->dev, "%s: invalid phy\n", __func__);
328 static int mtk_mdio_init(struct mtk_eth *eth)
330 struct device_node *mii_np;
333 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
335 dev_err(eth->dev, "no %s child node found", "mdio-bus");
339 if (!of_device_is_available(mii_np)) {
344 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
350 eth->mii_bus->name = "mdio";
351 eth->mii_bus->read = mtk_mdio_read;
352 eth->mii_bus->write = mtk_mdio_write;
353 eth->mii_bus->priv = eth;
354 eth->mii_bus->parent = eth->dev;
356 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
357 ret = of_mdiobus_register(eth->mii_bus, mii_np);
364 static void mtk_mdio_cleanup(struct mtk_eth *eth)
369 mdiobus_unregister(eth->mii_bus);
372 static inline void mtk_irq_disable(struct mtk_eth *eth,
373 unsigned reg, u32 mask)
378 spin_lock_irqsave(ð->irq_lock, flags);
379 val = mtk_r32(eth, reg);
380 mtk_w32(eth, val & ~mask, reg);
381 spin_unlock_irqrestore(ð->irq_lock, flags);
384 static inline void mtk_irq_enable(struct mtk_eth *eth,
385 unsigned reg, u32 mask)
390 spin_lock_irqsave(ð->irq_lock, flags);
391 val = mtk_r32(eth, reg);
392 mtk_w32(eth, val | mask, reg);
393 spin_unlock_irqrestore(ð->irq_lock, flags);
396 static int mtk_set_mac_address(struct net_device *dev, void *p)
398 int ret = eth_mac_addr(dev, p);
399 struct mtk_mac *mac = netdev_priv(dev);
400 const char *macaddr = dev->dev_addr;
405 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
408 spin_lock_bh(&mac->hw->page_lock);
409 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
410 MTK_GDMA_MAC_ADRH(mac->id));
411 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
412 (macaddr[4] << 8) | macaddr[5],
413 MTK_GDMA_MAC_ADRL(mac->id));
414 spin_unlock_bh(&mac->hw->page_lock);
419 void mtk_stats_update_mac(struct mtk_mac *mac)
421 struct mtk_hw_stats *hw_stats = mac->hw_stats;
422 unsigned int base = MTK_GDM1_TX_GBCNT;
425 base += hw_stats->reg_offset;
427 u64_stats_update_begin(&hw_stats->syncp);
429 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
430 stats = mtk_r32(mac->hw, base + 0x04);
432 hw_stats->rx_bytes += (stats << 32);
433 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
434 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
435 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
436 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
437 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
438 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
439 hw_stats->rx_flow_control_packets +=
440 mtk_r32(mac->hw, base + 0x24);
441 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
442 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
443 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
444 stats = mtk_r32(mac->hw, base + 0x34);
446 hw_stats->tx_bytes += (stats << 32);
447 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
448 u64_stats_update_end(&hw_stats->syncp);
451 static void mtk_stats_update(struct mtk_eth *eth)
455 for (i = 0; i < MTK_MAC_COUNT; i++) {
456 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
458 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
459 mtk_stats_update_mac(eth->mac[i]);
460 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
465 static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
466 struct rtnl_link_stats64 *storage)
468 struct mtk_mac *mac = netdev_priv(dev);
469 struct mtk_hw_stats *hw_stats = mac->hw_stats;
472 if (netif_running(dev) && netif_device_present(dev)) {
473 if (spin_trylock(&hw_stats->stats_lock)) {
474 mtk_stats_update_mac(mac);
475 spin_unlock(&hw_stats->stats_lock);
480 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
481 storage->rx_packets = hw_stats->rx_packets;
482 storage->tx_packets = hw_stats->tx_packets;
483 storage->rx_bytes = hw_stats->rx_bytes;
484 storage->tx_bytes = hw_stats->tx_bytes;
485 storage->collisions = hw_stats->tx_collisions;
486 storage->rx_length_errors = hw_stats->rx_short_errors +
487 hw_stats->rx_long_errors;
488 storage->rx_over_errors = hw_stats->rx_overflow;
489 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
490 storage->rx_errors = hw_stats->rx_checksum_errors;
491 storage->tx_aborted_errors = hw_stats->tx_skip;
492 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
494 storage->tx_errors = dev->stats.tx_errors;
495 storage->rx_dropped = dev->stats.rx_dropped;
496 storage->tx_dropped = dev->stats.tx_dropped;
501 static inline int mtk_max_frag_size(int mtu)
503 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
504 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
505 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
507 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
508 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
511 static inline int mtk_max_buf_size(int frag_size)
513 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
514 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
516 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
521 static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
522 struct mtk_rx_dma *dma_rxd)
524 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
525 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
526 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
527 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
530 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
532 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
535 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
541 /* the qdma core needs scratch memory to be setup */
542 static int mtk_init_fq_dma(struct mtk_eth *eth)
544 dma_addr_t phy_ring_tail;
545 int cnt = MTK_DMA_SIZE;
549 eth->scratch_ring = dma_alloc_coherent(eth->dev,
550 cnt * sizeof(struct mtk_tx_dma),
551 ð->phy_scratch_ring,
552 GFP_ATOMIC | __GFP_ZERO);
553 if (unlikely(!eth->scratch_ring))
556 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
558 if (unlikely(!eth->scratch_head))
561 dma_addr = dma_map_single(eth->dev,
562 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
564 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
567 memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
568 phy_ring_tail = eth->phy_scratch_ring +
569 (sizeof(struct mtk_tx_dma) * (cnt - 1));
571 for (i = 0; i < cnt; i++) {
572 eth->scratch_ring[i].txd1 =
573 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
575 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
576 ((i + 1) * sizeof(struct mtk_tx_dma)));
577 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
580 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
581 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
582 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
583 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
588 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
590 void *ret = ring->dma;
592 return ret + (desc - ring->phys);
595 static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
596 struct mtk_tx_dma *txd)
598 int idx = txd - ring->dma;
600 return &ring->buf[idx];
603 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
605 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
606 dma_unmap_single(eth->dev,
607 dma_unmap_addr(tx_buf, dma_addr0),
608 dma_unmap_len(tx_buf, dma_len0),
610 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
611 dma_unmap_page(eth->dev,
612 dma_unmap_addr(tx_buf, dma_addr0),
613 dma_unmap_len(tx_buf, dma_len0),
618 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
619 dev_kfree_skb_any(tx_buf->skb);
623 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
624 int tx_num, struct mtk_tx_ring *ring, bool gso)
626 struct mtk_mac *mac = netdev_priv(dev);
627 struct mtk_eth *eth = mac->hw;
628 struct mtk_tx_dma *itxd, *txd;
629 struct mtk_tx_buf *tx_buf;
630 dma_addr_t mapped_addr;
631 unsigned int nr_frags;
635 itxd = ring->next_free;
636 if (itxd == ring->last_free)
639 /* set the forward port */
640 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
643 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
644 memset(tx_buf, 0, sizeof(*tx_buf));
649 /* TX Checksum offload */
650 if (skb->ip_summed == CHECKSUM_PARTIAL)
651 txd4 |= TX_DMA_CHKSUM;
653 /* VLAN header offload */
654 if (skb_vlan_tag_present(skb))
655 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
657 mapped_addr = dma_map_single(eth->dev, skb->data,
658 skb_headlen(skb), DMA_TO_DEVICE);
659 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
662 WRITE_ONCE(itxd->txd1, mapped_addr);
663 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
664 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
665 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
669 nr_frags = skb_shinfo(skb)->nr_frags;
670 for (i = 0; i < nr_frags; i++) {
671 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
672 unsigned int offset = 0;
673 int frag_size = skb_frag_size(frag);
676 bool last_frag = false;
677 unsigned int frag_map_size;
679 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
680 if (txd == ring->last_free)
684 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
685 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
688 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
691 if (i == nr_frags - 1 &&
692 (frag_size - frag_map_size) == 0)
695 WRITE_ONCE(txd->txd1, mapped_addr);
696 WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
697 TX_DMA_PLEN0(frag_map_size) |
698 last_frag * TX_DMA_LS0));
699 WRITE_ONCE(txd->txd4, fport);
701 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
702 tx_buf = mtk_desc_to_tx_buf(ring, txd);
703 memset(tx_buf, 0, sizeof(*tx_buf));
705 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
706 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
707 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
708 frag_size -= frag_map_size;
709 offset += frag_map_size;
713 /* store skb to cleanup */
716 WRITE_ONCE(itxd->txd4, txd4);
717 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
718 (!nr_frags * TX_DMA_LS0)));
720 netdev_sent_queue(dev, skb->len);
721 skb_tx_timestamp(skb);
723 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
724 atomic_sub(n_desc, &ring->free_count);
726 /* make sure that all changes to the dma ring are flushed before we
731 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
732 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
738 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
741 mtk_tx_unmap(eth, tx_buf);
743 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
744 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
745 } while (itxd != txd);
750 static inline int mtk_cal_txd_req(struct sk_buff *skb)
753 struct skb_frag_struct *frag;
756 if (skb_is_gso(skb)) {
757 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
758 frag = &skb_shinfo(skb)->frags[i];
759 nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
762 nfrags += skb_shinfo(skb)->nr_frags;
768 static int mtk_queue_stopped(struct mtk_eth *eth)
772 for (i = 0; i < MTK_MAC_COUNT; i++) {
775 if (netif_queue_stopped(eth->netdev[i]))
782 static void mtk_wake_queue(struct mtk_eth *eth)
786 for (i = 0; i < MTK_MAC_COUNT; i++) {
789 netif_wake_queue(eth->netdev[i]);
793 static void mtk_stop_queue(struct mtk_eth *eth)
797 for (i = 0; i < MTK_MAC_COUNT; i++) {
800 netif_stop_queue(eth->netdev[i]);
804 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
806 struct mtk_mac *mac = netdev_priv(dev);
807 struct mtk_eth *eth = mac->hw;
808 struct mtk_tx_ring *ring = ð->tx_ring;
809 struct net_device_stats *stats = &dev->stats;
813 /* normally we can rely on the stack not calling this more than once,
814 * however we have 2 queues running on the same ring so we need to lock
817 spin_lock(ð->page_lock);
819 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
822 tx_num = mtk_cal_txd_req(skb);
823 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
825 netif_err(eth, tx_queued, dev,
826 "Tx Ring full when queue awake!\n");
827 spin_unlock(ð->page_lock);
828 return NETDEV_TX_BUSY;
831 /* TSO: fill MSS info in tcp checksum field */
832 if (skb_is_gso(skb)) {
833 if (skb_cow_head(skb, 0)) {
834 netif_warn(eth, tx_err, dev,
835 "GSO expand head fail.\n");
839 if (skb_shinfo(skb)->gso_type &
840 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
842 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
846 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
849 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
852 spin_unlock(ð->page_lock);
857 spin_unlock(ð->page_lock);
863 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
866 struct mtk_rx_ring *ring;
870 return ð->rx_ring[0];
872 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
873 ring = ð->rx_ring[i];
874 idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
875 if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
876 ring->calc_idx_update = true;
884 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
886 struct mtk_rx_ring *ring;
890 ring = ð->rx_ring[0];
891 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
893 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
894 ring = ð->rx_ring[i];
895 if (ring->calc_idx_update) {
896 ring->calc_idx_update = false;
897 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
903 static int mtk_poll_rx(struct napi_struct *napi, int budget,
906 struct mtk_rx_ring *ring;
910 struct mtk_rx_dma *rxd, trxd;
913 while (done < budget) {
914 struct net_device *netdev;
919 ring = mtk_get_rx_ring(eth);
923 idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
924 rxd = &ring->dma[idx];
925 data = ring->data[idx];
927 mtk_rx_get_desc(&trxd, rxd);
928 if (!(trxd.rxd2 & RX_DMA_DONE))
931 /* find out which mac the packet come from. values start at 1 */
932 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
936 netdev = eth->netdev[mac];
938 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
941 /* alloc new buffer */
942 if (ring->frag_size <= PAGE_SIZE)
943 new_data = napi_alloc_frag(ring->frag_size);
945 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
946 if (unlikely(!new_data)) {
947 netdev->stats.rx_dropped++;
950 dma_addr = dma_map_single(eth->dev,
951 new_data + NET_SKB_PAD,
954 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
955 skb_free_frag(new_data);
956 netdev->stats.rx_dropped++;
961 skb = build_skb(data, ring->frag_size);
962 if (unlikely(!skb)) {
963 skb_free_frag(new_data);
964 netdev->stats.rx_dropped++;
967 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
969 dma_unmap_single(eth->dev, trxd.rxd1,
970 ring->buf_size, DMA_FROM_DEVICE);
971 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
973 skb_put(skb, pktlen);
974 if (trxd.rxd4 & RX_DMA_L4_VALID)
975 skb->ip_summed = CHECKSUM_UNNECESSARY;
977 skb_checksum_none_assert(skb);
978 skb->protocol = eth_type_trans(skb, netdev);
980 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
981 (trxd.rxd2 & RX_DMA_VTAG))
982 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
983 RX_DMA_VID(trxd.rxd3));
984 napi_gro_receive(napi, skb);
986 ring->data[idx] = new_data;
987 rxd->rxd1 = (unsigned int)dma_addr;
990 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
992 ring->calc_idx = idx;
999 /* make sure that all changes to the dma ring are flushed before
1003 mtk_update_rx_cpu_idx(eth);
1009 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1011 struct mtk_tx_ring *ring = ð->tx_ring;
1012 struct mtk_tx_dma *desc;
1013 struct sk_buff *skb;
1014 struct mtk_tx_buf *tx_buf;
1015 unsigned int done[MTK_MAX_DEVS];
1016 unsigned int bytes[MTK_MAX_DEVS];
1018 static int condition;
1021 memset(done, 0, sizeof(done));
1022 memset(bytes, 0, sizeof(bytes));
1024 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1025 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1027 desc = mtk_qdma_phys_to_virt(ring, cpu);
1029 while ((cpu != dma) && budget) {
1030 u32 next_cpu = desc->txd2;
1033 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1034 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1037 mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
1041 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1048 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1049 bytes[mac] += skb->len;
1053 mtk_tx_unmap(eth, tx_buf);
1055 ring->last_free = desc;
1056 atomic_inc(&ring->free_count);
1061 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1063 for (i = 0; i < MTK_MAC_COUNT; i++) {
1064 if (!eth->netdev[i] || !done[i])
1066 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1070 if (mtk_queue_stopped(eth) &&
1071 (atomic_read(&ring->free_count) > ring->thresh))
1072 mtk_wake_queue(eth);
1077 static void mtk_handle_status_irq(struct mtk_eth *eth)
1079 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1081 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1082 mtk_stats_update(eth);
1083 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1088 static int mtk_napi_tx(struct napi_struct *napi, int budget)
1090 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1094 mtk_handle_status_irq(eth);
1095 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
1096 tx_done = mtk_poll_tx(eth, budget);
1098 if (unlikely(netif_msg_intr(eth))) {
1099 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1100 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
1102 "done tx %d, intr 0x%08x/0x%x\n",
1103 tx_done, status, mask);
1106 if (tx_done == budget)
1109 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1110 if (status & MTK_TX_DONE_INT)
1113 napi_complete(napi);
1114 mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1119 static int mtk_napi_rx(struct napi_struct *napi, int budget)
1121 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1124 int remain_budget = budget;
1126 mtk_handle_status_irq(eth);
1129 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1130 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1132 if (unlikely(netif_msg_intr(eth))) {
1133 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1134 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1136 "done rx %d, intr 0x%08x/0x%x\n",
1137 rx_done, status, mask);
1139 if (rx_done == remain_budget)
1142 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1143 if (status & MTK_RX_DONE_INT) {
1144 remain_budget -= rx_done;
1147 napi_complete(napi);
1148 mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
1150 return rx_done + budget - remain_budget;
1153 static int mtk_tx_alloc(struct mtk_eth *eth)
1155 struct mtk_tx_ring *ring = ð->tx_ring;
1156 int i, sz = sizeof(*ring->dma);
1158 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1163 ring->dma = dma_alloc_coherent(eth->dev,
1166 GFP_ATOMIC | __GFP_ZERO);
1170 memset(ring->dma, 0, MTK_DMA_SIZE * sz);
1171 for (i = 0; i < MTK_DMA_SIZE; i++) {
1172 int next = (i + 1) % MTK_DMA_SIZE;
1173 u32 next_ptr = ring->phys + next * sz;
1175 ring->dma[i].txd2 = next_ptr;
1176 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1179 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1180 ring->next_free = &ring->dma[0];
1181 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1182 ring->thresh = MAX_SKB_FRAGS;
1184 /* make sure that all changes to the dma ring are flushed before we
1189 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1190 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1192 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1195 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1197 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1205 static void mtk_tx_clean(struct mtk_eth *eth)
1207 struct mtk_tx_ring *ring = ð->tx_ring;
1211 for (i = 0; i < MTK_DMA_SIZE; i++)
1212 mtk_tx_unmap(eth, &ring->buf[i]);
1218 dma_free_coherent(eth->dev,
1219 MTK_DMA_SIZE * sizeof(*ring->dma),
1226 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1228 struct mtk_rx_ring *ring = ð->rx_ring[ring_no];
1229 int rx_data_len, rx_dma_size;
1232 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1233 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1234 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1236 rx_data_len = ETH_DATA_LEN;
1237 rx_dma_size = MTK_DMA_SIZE;
1240 ring->frag_size = mtk_max_frag_size(rx_data_len);
1241 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1242 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1247 for (i = 0; i < rx_dma_size; i++) {
1248 if (ring->frag_size <= PAGE_SIZE)
1249 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1251 ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
1256 ring->dma = dma_alloc_coherent(eth->dev,
1257 rx_dma_size * sizeof(*ring->dma),
1259 GFP_ATOMIC | __GFP_ZERO);
1263 for (i = 0; i < rx_dma_size; i++) {
1264 dma_addr_t dma_addr = dma_map_single(eth->dev,
1265 ring->data[i] + NET_SKB_PAD,
1268 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1270 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1272 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1274 ring->dma_size = rx_dma_size;
1275 ring->calc_idx_update = false;
1276 ring->calc_idx = rx_dma_size - 1;
1277 ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1278 /* make sure that all changes to the dma ring are flushed before we
1283 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
1284 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
1285 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1286 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
1291 static void mtk_rx_clean(struct mtk_eth *eth, int ring_no)
1293 struct mtk_rx_ring *ring = ð->rx_ring[ring_no];
1296 if (ring->data && ring->dma) {
1297 for (i = 0; i < ring->dma_size; i++) {
1300 if (!ring->dma[i].rxd1)
1302 dma_unmap_single(eth->dev,
1306 skb_free_frag(ring->data[i]);
1313 dma_free_coherent(eth->dev,
1314 ring->dma_size * sizeof(*ring->dma),
1321 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1324 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1325 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1327 /* set LRO rings to auto-learn modes */
1328 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1330 /* validate LRO ring */
1331 ring_ctrl_dw2 |= MTK_RING_VLD;
1333 /* set AGE timer (unit: 20us) */
1334 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1335 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1337 /* set max AGG timer (unit: 20us) */
1338 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1340 /* set max LRO AGG count */
1341 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1342 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1344 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1345 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1346 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1347 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1350 /* IPv4 checksum update enable */
1351 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1353 /* switch priority comparison to packet count mode */
1354 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1356 /* bandwidth threshold setting */
1357 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1359 /* auto-learn score delta setting */
1360 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1362 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1363 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1364 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1366 /* set HW LRO mode & the max aggregation count for rx packets */
1367 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1369 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
1370 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1373 lro_ctrl_dw0 |= MTK_LRO_EN;
1375 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1376 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1381 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1386 /* relinquish lro rings, flush aggregated packets */
1387 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1389 /* wait for relinquishments done */
1390 for (i = 0; i < 10; i++) {
1391 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1392 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1399 /* invalidate lro rings */
1400 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1401 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1403 /* disable HW LRO */
1404 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1407 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1411 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1413 /* invalidate the IP setting */
1414 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1416 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1418 /* validate the IP setting */
1419 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1422 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1426 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1428 /* invalidate the IP setting */
1429 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1431 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1434 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1439 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1440 if (mac->hwlro_ip[i])
1447 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1448 struct ethtool_rxnfc *cmd)
1450 struct ethtool_rx_flow_spec *fsp =
1451 (struct ethtool_rx_flow_spec *)&cmd->fs;
1452 struct mtk_mac *mac = netdev_priv(dev);
1453 struct mtk_eth *eth = mac->hw;
1456 if ((fsp->flow_type != TCP_V4_FLOW) ||
1457 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1458 (fsp->location > 1))
1461 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1462 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1464 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1466 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1471 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1472 struct ethtool_rxnfc *cmd)
1474 struct ethtool_rx_flow_spec *fsp =
1475 (struct ethtool_rx_flow_spec *)&cmd->fs;
1476 struct mtk_mac *mac = netdev_priv(dev);
1477 struct mtk_eth *eth = mac->hw;
1480 if (fsp->location > 1)
1483 mac->hwlro_ip[fsp->location] = 0;
1484 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1486 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1488 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1493 static void mtk_hwlro_netdev_disable(struct net_device *dev)
1495 struct mtk_mac *mac = netdev_priv(dev);
1496 struct mtk_eth *eth = mac->hw;
1499 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1500 mac->hwlro_ip[i] = 0;
1501 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1503 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1506 mac->hwlro_ip_cnt = 0;
1509 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1510 struct ethtool_rxnfc *cmd)
1512 struct mtk_mac *mac = netdev_priv(dev);
1513 struct ethtool_rx_flow_spec *fsp =
1514 (struct ethtool_rx_flow_spec *)&cmd->fs;
1516 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
1519 /* only tcp dst ipv4 is meaningful, others are meaningless */
1520 fsp->flow_type = TCP_V4_FLOW;
1521 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1522 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1524 fsp->h_u.tcp_ip4_spec.ip4src = 0;
1525 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1526 fsp->h_u.tcp_ip4_spec.psrc = 0;
1527 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1528 fsp->h_u.tcp_ip4_spec.pdst = 0;
1529 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
1530 fsp->h_u.tcp_ip4_spec.tos = 0;
1531 fsp->m_u.tcp_ip4_spec.tos = 0xff;
1536 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
1537 struct ethtool_rxnfc *cmd,
1540 struct mtk_mac *mac = netdev_priv(dev);
1544 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1545 if (mac->hwlro_ip[i]) {
1551 cmd->rule_cnt = cnt;
1556 static netdev_features_t mtk_fix_features(struct net_device *dev,
1557 netdev_features_t features)
1559 if (!(features & NETIF_F_LRO)) {
1560 struct mtk_mac *mac = netdev_priv(dev);
1561 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1564 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
1566 features |= NETIF_F_LRO;
1573 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
1577 if (!((dev->features ^ features) & NETIF_F_LRO))
1580 if (!(features & NETIF_F_LRO))
1581 mtk_hwlro_netdev_disable(dev);
1586 /* wait for DMA to finish whatever it is doing before we start using it again */
1587 static int mtk_dma_busy_wait(struct mtk_eth *eth)
1589 unsigned long t_start = jiffies;
1592 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
1593 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1595 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
1599 dev_err(eth->dev, "DMA init timeout\n");
1603 static int mtk_dma_init(struct mtk_eth *eth)
1608 if (mtk_dma_busy_wait(eth))
1611 /* QDMA needs scratch memory for internal reordering of the
1614 err = mtk_init_fq_dma(eth);
1618 err = mtk_tx_alloc(eth);
1622 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
1627 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1628 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
1632 err = mtk_hwlro_rx_init(eth);
1637 /* Enable random early drop and set drop threshold automatically */
1638 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
1640 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1645 static void mtk_dma_free(struct mtk_eth *eth)
1649 for (i = 0; i < MTK_MAC_COUNT; i++)
1651 netdev_reset_queue(eth->netdev[i]);
1652 if (eth->scratch_ring) {
1653 dma_free_coherent(eth->dev,
1654 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
1656 eth->phy_scratch_ring);
1657 eth->scratch_ring = NULL;
1658 eth->phy_scratch_ring = 0;
1661 mtk_rx_clean(eth, 0);
1664 mtk_hwlro_rx_uninit(eth);
1665 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1666 mtk_rx_clean(eth, i);
1669 kfree(eth->scratch_head);
1672 static void mtk_tx_timeout(struct net_device *dev)
1674 struct mtk_mac *mac = netdev_priv(dev);
1675 struct mtk_eth *eth = mac->hw;
1677 eth->netdev[mac->id]->stats.tx_errors++;
1678 netif_err(eth, tx_err, dev,
1679 "transmit timed out\n");
1680 schedule_work(ð->pending_work);
1683 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
1685 struct mtk_eth *eth = _eth;
1687 if (likely(napi_schedule_prep(ð->rx_napi))) {
1688 __napi_schedule(ð->rx_napi);
1689 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
1695 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
1697 struct mtk_eth *eth = _eth;
1699 if (likely(napi_schedule_prep(ð->tx_napi))) {
1700 __napi_schedule(ð->tx_napi);
1701 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1707 #ifdef CONFIG_NET_POLL_CONTROLLER
1708 static void mtk_poll_controller(struct net_device *dev)
1710 struct mtk_mac *mac = netdev_priv(dev);
1711 struct mtk_eth *eth = mac->hw;
1713 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1714 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
1715 mtk_handle_irq_rx(eth->irq[2], dev);
1716 mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1717 mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
1721 static int mtk_start_dma(struct mtk_eth *eth)
1723 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
1726 err = mtk_dma_init(eth);
1733 MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
1734 MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO,
1738 MTK_RX_DMA_EN | rx_2b_offset |
1739 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
1745 static int mtk_open(struct net_device *dev)
1747 struct mtk_mac *mac = netdev_priv(dev);
1748 struct mtk_eth *eth = mac->hw;
1750 /* we run 2 netdevs on the same dma ring so we only bring it up once */
1751 if (!atomic_read(ð->dma_refcnt)) {
1752 int err = mtk_start_dma(eth);
1757 napi_enable(ð->tx_napi);
1758 napi_enable(ð->rx_napi);
1759 mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1760 mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
1762 atomic_inc(ð->dma_refcnt);
1764 phy_start(dev->phydev);
1765 netif_start_queue(dev);
1770 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1775 /* stop the dma engine */
1776 spin_lock_bh(ð->page_lock);
1777 val = mtk_r32(eth, glo_cfg);
1778 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1780 spin_unlock_bh(ð->page_lock);
1782 /* wait for dma stop */
1783 for (i = 0; i < 10; i++) {
1784 val = mtk_r32(eth, glo_cfg);
1785 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1793 static int mtk_stop(struct net_device *dev)
1795 struct mtk_mac *mac = netdev_priv(dev);
1796 struct mtk_eth *eth = mac->hw;
1798 netif_tx_disable(dev);
1799 phy_stop(dev->phydev);
1801 /* only shutdown DMA if this is the last user */
1802 if (!atomic_dec_and_test(ð->dma_refcnt))
1805 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1806 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
1807 napi_disable(ð->tx_napi);
1808 napi_disable(ð->rx_napi);
1810 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1811 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
1818 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
1820 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
1824 usleep_range(1000, 1100);
1825 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
1831 static int mtk_hw_init(struct mtk_eth *eth)
1835 if (test_and_set_bit(MTK_HW_INIT, ð->state))
1838 pm_runtime_enable(eth->dev);
1839 pm_runtime_get_sync(eth->dev);
1841 clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
1842 clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
1843 clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
1844 clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
1845 ethsys_reset(eth, RSTCTRL_FE);
1846 ethsys_reset(eth, RSTCTRL_PPE);
1848 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
1849 for (i = 0; i < MTK_MAC_COUNT; i++) {
1852 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
1853 val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
1855 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
1857 /* Set GE2 driving and slew rate */
1858 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
1861 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
1864 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
1866 /* Set linkdown as the default for each GMAC. Its own MCR would be set
1867 * up with the more appropriate value when mtk_phy_link_adjust call is
1870 for (i = 0; i < MTK_MAC_COUNT; i++)
1871 mtk_w32(eth, 0, MTK_MAC_MCR(i));
1873 /* Enable RX VLan Offloading */
1874 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
1876 /* disable delay and normal interrupt */
1877 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1878 mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
1879 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
1880 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
1881 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
1882 mtk_w32(eth, 0, MTK_RST_GL);
1884 /* FE int grouping */
1885 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
1886 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
1887 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
1888 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
1889 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
1891 for (i = 0; i < 2; i++) {
1892 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
1894 /* setup the forward port to send frame to PDMA */
1897 /* Enable RX checksum */
1898 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
1900 /* setup the mac dma */
1901 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
1907 static int mtk_hw_deinit(struct mtk_eth *eth)
1909 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
1912 clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
1913 clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
1914 clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
1915 clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
1917 pm_runtime_put_sync(eth->dev);
1918 pm_runtime_disable(eth->dev);
1923 static int __init mtk_init(struct net_device *dev)
1925 struct mtk_mac *mac = netdev_priv(dev);
1926 struct mtk_eth *eth = mac->hw;
1927 const char *mac_addr;
1929 mac_addr = of_get_mac_address(mac->of_node);
1931 ether_addr_copy(dev->dev_addr, mac_addr);
1933 /* If the mac address is invalid, use random mac address */
1934 if (!is_valid_ether_addr(dev->dev_addr)) {
1935 random_ether_addr(dev->dev_addr);
1936 dev_err(eth->dev, "generated random MAC address %pM\n",
1938 dev->addr_assign_type = NET_ADDR_RANDOM;
1941 return mtk_phy_connect(dev);
1944 static void mtk_uninit(struct net_device *dev)
1946 struct mtk_mac *mac = netdev_priv(dev);
1947 struct mtk_eth *eth = mac->hw;
1949 phy_disconnect(dev->phydev);
1950 if (of_phy_is_fixed_link(mac->of_node))
1951 of_phy_deregister_fixed_link(mac->of_node);
1952 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
1953 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
1956 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1962 return phy_mii_ioctl(dev->phydev, ifr, cmd);
1970 static void mtk_pending_work(struct work_struct *work)
1972 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
1974 unsigned long restart = 0;
1978 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
1980 while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
1983 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
1984 /* stop all devices to make sure that dma is properly shut down */
1985 for (i = 0; i < MTK_MAC_COUNT; i++) {
1986 if (!eth->netdev[i])
1988 mtk_stop(eth->netdev[i]);
1989 __set_bit(i, &restart);
1991 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
1993 /* restart underlying hardware such as power, clock, pin mux
1994 * and the connected phy
1999 pinctrl_select_state(eth->dev->pins->p,
2000 eth->dev->pins->default_state);
2003 for (i = 0; i < MTK_MAC_COUNT; i++) {
2005 of_phy_is_fixed_link(eth->mac[i]->of_node))
2007 err = phy_init_hw(eth->netdev[i]->phydev);
2009 dev_err(eth->dev, "%s: PHY init failed.\n",
2010 eth->netdev[i]->name);
2013 /* restart DMA and enable IRQs */
2014 for (i = 0; i < MTK_MAC_COUNT; i++) {
2015 if (!test_bit(i, &restart))
2017 err = mtk_open(eth->netdev[i]);
2019 netif_alert(eth, ifup, eth->netdev[i],
2020 "Driver up/down cycle failed, closing device.\n");
2021 dev_close(eth->netdev[i]);
2025 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
2027 clear_bit_unlock(MTK_RESETTING, ð->state);
2032 static int mtk_free_dev(struct mtk_eth *eth)
2036 for (i = 0; i < MTK_MAC_COUNT; i++) {
2037 if (!eth->netdev[i])
2039 free_netdev(eth->netdev[i]);
2045 static int mtk_unreg_dev(struct mtk_eth *eth)
2049 for (i = 0; i < MTK_MAC_COUNT; i++) {
2050 if (!eth->netdev[i])
2052 unregister_netdev(eth->netdev[i]);
2058 static int mtk_cleanup(struct mtk_eth *eth)
2062 cancel_work_sync(ð->pending_work);
2067 static int mtk_get_link_ksettings(struct net_device *ndev,
2068 struct ethtool_link_ksettings *cmd)
2070 struct mtk_mac *mac = netdev_priv(ndev);
2072 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2075 return phy_ethtool_ksettings_get(ndev->phydev, cmd);
2078 static int mtk_set_link_ksettings(struct net_device *ndev,
2079 const struct ethtool_link_ksettings *cmd)
2081 struct mtk_mac *mac = netdev_priv(ndev);
2083 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2086 return phy_ethtool_ksettings_set(ndev->phydev, cmd);
2089 static void mtk_get_drvinfo(struct net_device *dev,
2090 struct ethtool_drvinfo *info)
2092 struct mtk_mac *mac = netdev_priv(dev);
2094 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2095 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2096 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2099 static u32 mtk_get_msglevel(struct net_device *dev)
2101 struct mtk_mac *mac = netdev_priv(dev);
2103 return mac->hw->msg_enable;
2106 static void mtk_set_msglevel(struct net_device *dev, u32 value)
2108 struct mtk_mac *mac = netdev_priv(dev);
2110 mac->hw->msg_enable = value;
2113 static int mtk_nway_reset(struct net_device *dev)
2115 struct mtk_mac *mac = netdev_priv(dev);
2117 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2120 return genphy_restart_aneg(dev->phydev);
2123 static u32 mtk_get_link(struct net_device *dev)
2125 struct mtk_mac *mac = netdev_priv(dev);
2128 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2131 err = genphy_update_link(dev->phydev);
2133 return ethtool_op_get_link(dev);
2135 return dev->phydev->link;
2138 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2142 switch (stringset) {
2144 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2145 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2146 data += ETH_GSTRING_LEN;
2152 static int mtk_get_sset_count(struct net_device *dev, int sset)
2156 return ARRAY_SIZE(mtk_ethtool_stats);
2162 static void mtk_get_ethtool_stats(struct net_device *dev,
2163 struct ethtool_stats *stats, u64 *data)
2165 struct mtk_mac *mac = netdev_priv(dev);
2166 struct mtk_hw_stats *hwstats = mac->hw_stats;
2167 u64 *data_src, *data_dst;
2171 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2174 if (netif_running(dev) && netif_device_present(dev)) {
2175 if (spin_trylock(&hwstats->stats_lock)) {
2176 mtk_stats_update_mac(mac);
2177 spin_unlock(&hwstats->stats_lock);
2181 data_src = (u64 *)hwstats;
2185 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2187 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2188 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2189 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2192 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2195 int ret = -EOPNOTSUPP;
2198 case ETHTOOL_GRXRINGS:
2199 if (dev->hw_features & NETIF_F_LRO) {
2200 cmd->data = MTK_MAX_RX_RING_NUM;
2204 case ETHTOOL_GRXCLSRLCNT:
2205 if (dev->hw_features & NETIF_F_LRO) {
2206 struct mtk_mac *mac = netdev_priv(dev);
2208 cmd->rule_cnt = mac->hwlro_ip_cnt;
2212 case ETHTOOL_GRXCLSRULE:
2213 if (dev->hw_features & NETIF_F_LRO)
2214 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2216 case ETHTOOL_GRXCLSRLALL:
2217 if (dev->hw_features & NETIF_F_LRO)
2218 ret = mtk_hwlro_get_fdir_all(dev, cmd,
2228 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2230 int ret = -EOPNOTSUPP;
2233 case ETHTOOL_SRXCLSRLINS:
2234 if (dev->hw_features & NETIF_F_LRO)
2235 ret = mtk_hwlro_add_ipaddr(dev, cmd);
2237 case ETHTOOL_SRXCLSRLDEL:
2238 if (dev->hw_features & NETIF_F_LRO)
2239 ret = mtk_hwlro_del_ipaddr(dev, cmd);
2248 static const struct ethtool_ops mtk_ethtool_ops = {
2249 .get_link_ksettings = mtk_get_link_ksettings,
2250 .set_link_ksettings = mtk_set_link_ksettings,
2251 .get_drvinfo = mtk_get_drvinfo,
2252 .get_msglevel = mtk_get_msglevel,
2253 .set_msglevel = mtk_set_msglevel,
2254 .nway_reset = mtk_nway_reset,
2255 .get_link = mtk_get_link,
2256 .get_strings = mtk_get_strings,
2257 .get_sset_count = mtk_get_sset_count,
2258 .get_ethtool_stats = mtk_get_ethtool_stats,
2259 .get_rxnfc = mtk_get_rxnfc,
2260 .set_rxnfc = mtk_set_rxnfc,
2263 static const struct net_device_ops mtk_netdev_ops = {
2264 .ndo_init = mtk_init,
2265 .ndo_uninit = mtk_uninit,
2266 .ndo_open = mtk_open,
2267 .ndo_stop = mtk_stop,
2268 .ndo_start_xmit = mtk_start_xmit,
2269 .ndo_set_mac_address = mtk_set_mac_address,
2270 .ndo_validate_addr = eth_validate_addr,
2271 .ndo_do_ioctl = mtk_do_ioctl,
2272 .ndo_change_mtu = eth_change_mtu,
2273 .ndo_tx_timeout = mtk_tx_timeout,
2274 .ndo_get_stats64 = mtk_get_stats64,
2275 .ndo_fix_features = mtk_fix_features,
2276 .ndo_set_features = mtk_set_features,
2277 #ifdef CONFIG_NET_POLL_CONTROLLER
2278 .ndo_poll_controller = mtk_poll_controller,
2282 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2284 struct mtk_mac *mac;
2285 const __be32 *_id = of_get_property(np, "reg", NULL);
2289 dev_err(eth->dev, "missing mac id\n");
2293 id = be32_to_cpup(_id);
2294 if (id >= MTK_MAC_COUNT) {
2295 dev_err(eth->dev, "%d is not a valid mac id\n", id);
2299 if (eth->netdev[id]) {
2300 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2304 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2305 if (!eth->netdev[id]) {
2306 dev_err(eth->dev, "alloc_etherdev failed\n");
2309 mac = netdev_priv(eth->netdev[id]);
2315 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2316 mac->hwlro_ip_cnt = 0;
2318 mac->hw_stats = devm_kzalloc(eth->dev,
2319 sizeof(*mac->hw_stats),
2321 if (!mac->hw_stats) {
2322 dev_err(eth->dev, "failed to allocate counter memory\n");
2326 spin_lock_init(&mac->hw_stats->stats_lock);
2327 u64_stats_init(&mac->hw_stats->syncp);
2328 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2330 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2331 eth->netdev[id]->watchdog_timeo = 5 * HZ;
2332 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2333 eth->netdev[id]->base_addr = (unsigned long)eth->base;
2335 eth->netdev[id]->hw_features = MTK_HW_FEATURES;
2337 eth->netdev[id]->hw_features |= NETIF_F_LRO;
2339 eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
2340 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2341 eth->netdev[id]->features |= MTK_HW_FEATURES;
2342 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
2344 eth->netdev[id]->irq = eth->irq[0];
2348 free_netdev(eth->netdev[id]);
2352 static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id)
2356 regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]);
2357 regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]);
2359 id[3] = ((val[0] >> 16) & 0xff) - '0';
2360 id[2] = ((val[0] >> 24) & 0xff) - '0';
2361 id[1] = (val[1] & 0xff) - '0';
2362 id[0] = ((val[1] >> 8) & 0xff) - '0';
2364 *chip_id = (id[3] * 1000) + (id[2] * 100) +
2365 (id[1] * 10) + id[0];
2368 dev_err(eth->dev, "failed to get chip id\n");
2372 dev_info(eth->dev, "chip id = %d\n", *chip_id);
2377 static bool mtk_is_hwlro_supported(struct mtk_eth *eth)
2379 switch (eth->chip_id) {
2387 static int mtk_probe(struct platform_device *pdev)
2389 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2390 struct device_node *mac_np;
2391 const struct of_device_id *match;
2392 struct mtk_soc_data *soc;
2393 struct mtk_eth *eth;
2397 match = of_match_device(of_mtk_match, &pdev->dev);
2398 soc = (struct mtk_soc_data *)match->data;
2400 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2404 eth->dev = &pdev->dev;
2405 eth->base = devm_ioremap_resource(&pdev->dev, res);
2406 if (IS_ERR(eth->base))
2407 return PTR_ERR(eth->base);
2409 spin_lock_init(ð->page_lock);
2410 spin_lock_init(ð->irq_lock);
2412 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2414 if (IS_ERR(eth->ethsys)) {
2415 dev_err(&pdev->dev, "no ethsys regmap found\n");
2416 return PTR_ERR(eth->ethsys);
2419 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2421 if (IS_ERR(eth->pctl)) {
2422 dev_err(&pdev->dev, "no pctl regmap found\n");
2423 return PTR_ERR(eth->pctl);
2426 for (i = 0; i < 3; i++) {
2427 eth->irq[i] = platform_get_irq(pdev, i);
2428 if (eth->irq[i] < 0) {
2429 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
2433 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
2434 eth->clks[i] = devm_clk_get(eth->dev,
2435 mtk_clks_source_name[i]);
2436 if (IS_ERR(eth->clks[i])) {
2437 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
2438 return -EPROBE_DEFER;
2443 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
2444 INIT_WORK(ð->pending_work, mtk_pending_work);
2446 err = mtk_hw_init(eth);
2450 err = mtk_get_chip_id(eth, ð->chip_id);
2454 eth->hwlro = mtk_is_hwlro_supported(eth);
2456 for_each_child_of_node(pdev->dev.of_node, mac_np) {
2457 if (!of_device_is_compatible(mac_np,
2458 "mediatek,eth-mac"))
2461 if (!of_device_is_available(mac_np))
2464 err = mtk_add_mac(eth, mac_np);
2469 err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
2470 dev_name(eth->dev), eth);
2474 err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
2475 dev_name(eth->dev), eth);
2479 err = mtk_mdio_init(eth);
2483 for (i = 0; i < MTK_MAX_DEVS; i++) {
2484 if (!eth->netdev[i])
2487 err = register_netdev(eth->netdev[i]);
2489 dev_err(eth->dev, "error bringing up device\n");
2490 goto err_deinit_mdio;
2492 netif_info(eth, probe, eth->netdev[i],
2493 "mediatek frame engine at 0x%08lx, irq %d\n",
2494 eth->netdev[i]->base_addr, eth->irq[0]);
2497 /* we run 2 devices on the same DMA ring so we need a dummy device
2500 init_dummy_netdev(ð->dummy_dev);
2501 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
2503 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
2506 platform_set_drvdata(pdev, eth);
2511 mtk_mdio_cleanup(eth);
2520 static int mtk_remove(struct platform_device *pdev)
2522 struct mtk_eth *eth = platform_get_drvdata(pdev);
2525 /* stop all devices to make sure that dma is properly shut down */
2526 for (i = 0; i < MTK_MAC_COUNT; i++) {
2527 if (!eth->netdev[i])
2529 mtk_stop(eth->netdev[i]);
2534 netif_napi_del(ð->tx_napi);
2535 netif_napi_del(ð->rx_napi);
2537 mtk_mdio_cleanup(eth);
2542 const struct of_device_id of_mtk_match[] = {
2543 { .compatible = "mediatek,mt2701-eth" },
2546 MODULE_DEVICE_TABLE(of, of_mtk_match);
2548 static struct platform_driver mtk_driver = {
2550 .remove = mtk_remove,
2552 .name = "mtk_soc_eth",
2553 .of_match_table = of_mtk_match,
2557 module_platform_driver(mtk_driver);
2559 MODULE_LICENSE("GPL");
2560 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
2561 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");