1 // SPDX-License-Identifier: GPL-2.0
3 * Lantiq / Intel PMAC driver for XRX200 SoCs
5 * Copyright (C) 2010 Lantiq Deutschland
6 * Copyright (C) 2012 John Crispin <john@phrozen.org>
7 * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
10 #include <linux/etherdevice.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/interrupt.h>
14 #include <linux/clk.h>
15 #include <linux/delay.h>
17 #include <linux/if_vlan.h>
19 #include <linux/of_net.h>
20 #include <linux/of_platform.h>
25 #define XRX200_DMA_DATA_LEN (SZ_64K - 1)
26 #define XRX200_DMA_RX 0
27 #define XRX200_DMA_TX 1
28 #define XRX200_DMA_BURST_LEN 8
30 #define XRX200_DMA_PACKET_COMPLETE 0
31 #define XRX200_DMA_PACKET_IN_PROGRESS 1
34 #define PMAC_RX_IPG 0x0024
35 #define PMAC_RX_IPG_MASK 0xf
37 #define PMAC_HD_CTL 0x0000
38 /* Add Ethernet header to packets from DMA to PMAC */
39 #define PMAC_HD_CTL_ADD BIT(0)
40 /* Add VLAN tag to Packets from DMA to PMAC */
41 #define PMAC_HD_CTL_TAG BIT(1)
42 /* Add CRC to packets from DMA to PMAC */
43 #define PMAC_HD_CTL_AC BIT(2)
44 /* Add status header to packets from PMAC to DMA */
45 #define PMAC_HD_CTL_AS BIT(3)
46 /* Remove CRC from packets from PMAC to DMA */
47 #define PMAC_HD_CTL_RC BIT(4)
48 /* Remove Layer-2 header from packets from PMAC to DMA */
49 #define PMAC_HD_CTL_RL2 BIT(5)
50 /* Status header is present from DMA to PMAC */
51 #define PMAC_HD_CTL_RXSH BIT(6)
52 /* Add special tag from PMAC to switch */
53 #define PMAC_HD_CTL_AST BIT(7)
54 /* Remove specail Tag from PMAC to DMA */
55 #define PMAC_HD_CTL_RST BIT(8)
56 /* Check CRC from DMA to PMAC */
57 #define PMAC_HD_CTL_CCRC BIT(9)
58 /* Enable reaction to Pause frames in the PMAC */
59 #define PMAC_HD_CTL_FC BIT(10)
64 struct napi_struct napi;
65 struct ltq_dma_channel dma;
68 struct sk_buff *skb[LTQ_DESC_NUM];
69 void *rx_buff[LTQ_DESC_NUM];
72 struct sk_buff *skb_head;
73 struct sk_buff *skb_tail;
75 struct xrx200_priv *priv;
81 struct xrx200_chan chan_tx;
82 struct xrx200_chan chan_rx;
87 struct net_device *net_dev;
90 __iomem void *pmac_reg;
93 static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset)
95 return __raw_readl(priv->pmac_reg + offset);
98 static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset)
100 __raw_writel(val, priv->pmac_reg + offset);
103 static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
106 u32 val = xrx200_pmac_r32(priv, offset);
110 xrx200_pmac_w32(priv, val, offset);
113 static int xrx200_max_frame_len(int mtu)
115 return VLAN_ETH_HLEN + mtu;
118 static int xrx200_buffer_size(int mtu)
120 return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
123 static int xrx200_skb_size(u16 buf_size)
125 return SKB_DATA_ALIGN(buf_size + NET_SKB_PAD + NET_IP_ALIGN) +
126 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
129 /* drop all the packets from the DMA ring */
130 static void xrx200_flush_dma(struct xrx200_chan *ch)
134 for (i = 0; i < LTQ_DESC_NUM; i++) {
135 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
137 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
140 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
141 ch->priv->rx_buf_size;
143 ch->dma.desc %= LTQ_DESC_NUM;
147 static int xrx200_open(struct net_device *net_dev)
149 struct xrx200_priv *priv = netdev_priv(net_dev);
151 napi_enable(&priv->chan_tx.napi);
152 ltq_dma_open(&priv->chan_tx.dma);
153 ltq_dma_enable_irq(&priv->chan_tx.dma);
155 napi_enable(&priv->chan_rx.napi);
156 ltq_dma_open(&priv->chan_rx.dma);
157 /* The boot loader does not always deactivate the receiving of frames
158 * on the ports and then some packets queue up in the PPE buffers.
159 * They already passed the PMAC so they do not have the tags
160 * configured here. Read the these packets here and drop them.
161 * The HW should have written them into memory after 10us
163 usleep_range(20, 40);
164 xrx200_flush_dma(&priv->chan_rx);
165 ltq_dma_enable_irq(&priv->chan_rx.dma);
167 netif_wake_queue(net_dev);
172 static int xrx200_close(struct net_device *net_dev)
174 struct xrx200_priv *priv = netdev_priv(net_dev);
176 netif_stop_queue(net_dev);
178 napi_disable(&priv->chan_rx.napi);
179 ltq_dma_close(&priv->chan_rx.dma);
181 napi_disable(&priv->chan_tx.napi);
182 ltq_dma_close(&priv->chan_tx.dma);
187 static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int size))
189 void *buf = ch->rx_buff[ch->dma.desc];
190 struct xrx200_priv *priv = ch->priv;
194 ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
195 if (!ch->rx_buff[ch->dma.desc]) {
200 mapping = dma_map_single(priv->dev, ch->rx_buff[ch->dma.desc],
201 priv->rx_buf_size, DMA_FROM_DEVICE);
202 if (unlikely(dma_mapping_error(priv->dev, mapping))) {
203 skb_free_frag(ch->rx_buff[ch->dma.desc]);
204 ch->rx_buff[ch->dma.desc] = buf;
209 ch->dma.desc_base[ch->dma.desc].addr = mapping + NET_SKB_PAD + NET_IP_ALIGN;
210 /* Make sure the address is written before we give it to HW */
213 ch->dma.desc_base[ch->dma.desc].ctl =
214 LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
219 static int xrx200_hw_receive(struct xrx200_chan *ch)
221 struct xrx200_priv *priv = ch->priv;
222 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
223 void *buf = ch->rx_buff[ch->dma.desc];
225 int len = (ctl & LTQ_DMA_SIZE_MASK);
226 struct net_device *net_dev = priv->net_dev;
230 ret = xrx200_alloc_buf(ch, napi_alloc_frag);
233 ch->dma.desc %= LTQ_DESC_NUM;
236 net_dev->stats.rx_dropped++;
237 netdev_err(net_dev, "failed to allocate new rx buffer\n");
241 skb = build_skb(buf, priv->rx_skb_size);
242 skb_reserve(skb, NET_SKB_PAD);
245 /* add buffers to skb via skb->frag_list */
246 if (ctl & LTQ_DMA_SOP) {
249 skb_reserve(skb, NET_IP_ALIGN);
250 } else if (ch->skb_head) {
251 if (ch->skb_head == ch->skb_tail)
252 skb_shinfo(ch->skb_tail)->frag_list = skb;
254 ch->skb_tail->next = skb;
256 ch->skb_head->len += skb->len;
257 ch->skb_head->data_len += skb->len;
258 ch->skb_head->truesize += skb->truesize;
261 if (ctl & LTQ_DMA_EOP) {
262 ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
263 net_dev->stats.rx_packets++;
264 net_dev->stats.rx_bytes += ch->skb_head->len;
265 netif_receive_skb(ch->skb_head);
268 ret = XRX200_DMA_PACKET_COMPLETE;
270 ret = XRX200_DMA_PACKET_IN_PROGRESS;
276 static int xrx200_poll_rx(struct napi_struct *napi, int budget)
278 struct xrx200_chan *ch = container_of(napi,
279 struct xrx200_chan, napi);
283 while (rx < budget) {
284 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
286 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
287 ret = xrx200_hw_receive(ch);
288 if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
290 if (ret != XRX200_DMA_PACKET_COMPLETE)
299 if (napi_complete_done(&ch->napi, rx))
300 ltq_dma_enable_irq(&ch->dma);
306 static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
308 struct xrx200_chan *ch = container_of(napi,
309 struct xrx200_chan, napi);
310 struct net_device *net_dev = ch->priv->net_dev;
314 netif_tx_lock(net_dev);
315 while (pkts < budget) {
316 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
318 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
319 struct sk_buff *skb = ch->skb[ch->tx_free];
323 ch->skb[ch->tx_free] = NULL;
325 memset(&ch->dma.desc_base[ch->tx_free], 0,
326 sizeof(struct ltq_dma_desc));
328 ch->tx_free %= LTQ_DESC_NUM;
334 net_dev->stats.tx_packets += pkts;
335 net_dev->stats.tx_bytes += bytes;
336 netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
338 netif_tx_unlock(net_dev);
339 if (netif_queue_stopped(net_dev))
340 netif_wake_queue(net_dev);
343 if (napi_complete_done(&ch->napi, pkts))
344 ltq_dma_enable_irq(&ch->dma);
350 static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
351 struct net_device *net_dev)
353 struct xrx200_priv *priv = netdev_priv(net_dev);
354 struct xrx200_chan *ch = &priv->chan_tx;
355 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
361 if (skb_put_padto(skb, ETH_ZLEN)) {
362 net_dev->stats.tx_dropped++;
368 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
369 netdev_err(net_dev, "tx ring full\n");
370 netif_stop_queue(net_dev);
371 return NETDEV_TX_BUSY;
374 ch->skb[ch->dma.desc] = skb;
376 mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
377 if (unlikely(dma_mapping_error(priv->dev, mapping)))
380 /* dma needs to start on a burst length value aligned address */
381 byte_offset = mapping % (XRX200_DMA_BURST_LEN * 4);
383 desc->addr = mapping - byte_offset;
384 /* Make sure the address is written before we give it to HW */
386 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
387 LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
389 ch->dma.desc %= LTQ_DESC_NUM;
390 if (ch->dma.desc == ch->tx_free)
391 netif_stop_queue(net_dev);
393 netdev_sent_queue(net_dev, len);
399 net_dev->stats.tx_dropped++;
400 net_dev->stats.tx_errors++;
405 xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
407 struct xrx200_priv *priv = netdev_priv(net_dev);
408 struct xrx200_chan *ch_rx = &priv->chan_rx;
409 int old_mtu = net_dev->mtu;
410 bool running = false;
415 net_dev->mtu = new_mtu;
416 priv->rx_buf_size = xrx200_buffer_size(new_mtu);
417 priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
419 if (new_mtu <= old_mtu)
422 running = netif_running(net_dev);
424 napi_disable(&ch_rx->napi);
425 ltq_dma_close(&ch_rx->dma);
428 xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM);
429 curr_desc = ch_rx->dma.desc;
431 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
433 buff = ch_rx->rx_buff[ch_rx->dma.desc];
434 ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
436 net_dev->mtu = old_mtu;
437 priv->rx_buf_size = xrx200_buffer_size(old_mtu);
438 priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
444 ch_rx->dma.desc = curr_desc;
446 napi_enable(&ch_rx->napi);
447 ltq_dma_open(&ch_rx->dma);
448 ltq_dma_enable_irq(&ch_rx->dma);
454 static const struct net_device_ops xrx200_netdev_ops = {
455 .ndo_open = xrx200_open,
456 .ndo_stop = xrx200_close,
457 .ndo_start_xmit = xrx200_start_xmit,
458 .ndo_change_mtu = xrx200_change_mtu,
459 .ndo_set_mac_address = eth_mac_addr,
460 .ndo_validate_addr = eth_validate_addr,
463 static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
465 struct xrx200_chan *ch = ptr;
467 if (napi_schedule_prep(&ch->napi)) {
468 ltq_dma_disable_irq(&ch->dma);
469 __napi_schedule(&ch->napi);
472 ltq_dma_ack_irq(&ch->dma);
477 static int xrx200_dma_init(struct xrx200_priv *priv)
479 struct xrx200_chan *ch_rx = &priv->chan_rx;
480 struct xrx200_chan *ch_tx = &priv->chan_tx;
484 ltq_dma_init_port(DMA_PORT_ETOP, XRX200_DMA_BURST_LEN,
485 XRX200_DMA_BURST_LEN);
487 ch_rx->dma.nr = XRX200_DMA_RX;
488 ch_rx->dma.dev = priv->dev;
491 ltq_dma_alloc_rx(&ch_rx->dma);
492 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
494 ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
499 ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
500 "xrx200_net_rx", &priv->chan_rx);
502 dev_err(priv->dev, "failed to request RX irq %d\n",
507 ch_tx->dma.nr = XRX200_DMA_TX;
508 ch_tx->dma.dev = priv->dev;
511 ltq_dma_alloc_tx(&ch_tx->dma);
512 ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
513 "xrx200_net_tx", &priv->chan_tx);
515 dev_err(priv->dev, "failed to request TX irq %d\n",
523 ltq_dma_free(&ch_tx->dma);
526 /* free the allocated RX ring */
527 for (i = 0; i < LTQ_DESC_NUM; i++) {
528 if (priv->chan_rx.skb[i])
529 skb_free_frag(priv->chan_rx.rx_buff[i]);
533 ltq_dma_free(&ch_rx->dma);
537 static void xrx200_hw_cleanup(struct xrx200_priv *priv)
541 ltq_dma_free(&priv->chan_tx.dma);
542 ltq_dma_free(&priv->chan_rx.dma);
544 /* free the allocated RX ring */
545 for (i = 0; i < LTQ_DESC_NUM; i++)
546 skb_free_frag(priv->chan_rx.rx_buff[i]);
549 static int xrx200_probe(struct platform_device *pdev)
551 struct device *dev = &pdev->dev;
552 struct device_node *np = dev->of_node;
553 struct xrx200_priv *priv;
554 struct net_device *net_dev;
557 /* alloc the network device */
558 net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
562 priv = netdev_priv(net_dev);
563 priv->net_dev = net_dev;
566 net_dev->netdev_ops = &xrx200_netdev_ops;
567 SET_NETDEV_DEV(net_dev, dev);
568 net_dev->min_mtu = ETH_ZLEN;
569 net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
570 priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
571 priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
573 /* load the memory ranges */
574 priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
575 if (IS_ERR(priv->pmac_reg))
576 return PTR_ERR(priv->pmac_reg);
578 priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
579 if (priv->chan_rx.dma.irq < 0)
581 priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
582 if (priv->chan_tx.dma.irq < 0)
586 priv->clk = devm_clk_get(dev, NULL);
587 if (IS_ERR(priv->clk)) {
588 dev_err(dev, "failed to get clock\n");
589 return PTR_ERR(priv->clk);
592 err = of_get_ethdev_address(np, net_dev);
594 eth_hw_addr_random(net_dev);
596 /* bring up the dma engine and IP core */
597 err = xrx200_dma_init(priv);
601 /* enable clock gate */
602 err = clk_prepare_enable(priv->clk);
607 xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG);
609 /* enable status header, enable CRC */
610 xrx200_pmac_mask(priv, 0,
611 PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH |
612 PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
616 netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx,
618 netif_napi_add_tx(net_dev, &priv->chan_tx.napi,
619 xrx200_tx_housekeeping);
621 platform_set_drvdata(pdev, priv);
623 err = register_netdev(net_dev);
625 goto err_unprepare_clk;
630 clk_disable_unprepare(priv->clk);
633 xrx200_hw_cleanup(priv);
638 static int xrx200_remove(struct platform_device *pdev)
640 struct xrx200_priv *priv = platform_get_drvdata(pdev);
641 struct net_device *net_dev = priv->net_dev;
643 /* free stack related instances */
644 netif_stop_queue(net_dev);
645 netif_napi_del(&priv->chan_tx.napi);
646 netif_napi_del(&priv->chan_rx.napi);
648 /* remove the actual device */
649 unregister_netdev(net_dev);
651 /* release the clock */
652 clk_disable_unprepare(priv->clk);
654 /* shut down hardware */
655 xrx200_hw_cleanup(priv);
660 static const struct of_device_id xrx200_match[] = {
661 { .compatible = "lantiq,xrx200-net" },
664 MODULE_DEVICE_TABLE(of, xrx200_match);
666 static struct platform_driver xrx200_driver = {
667 .probe = xrx200_probe,
668 .remove = xrx200_remove,
670 .name = "lantiq,xrx200-net",
671 .of_match_table = xrx200_match,
675 module_platform_driver(xrx200_driver);
677 MODULE_AUTHOR("John Crispin <john@phrozen.org>");
678 MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
679 MODULE_LICENSE("GPL");