1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2016-2017, National Instruments Corp.
4 * Author: Moritz Fischer <mdf@kernel.org>
7 #include <linux/etherdevice.h>
8 #include <linux/module.h>
9 #include <linux/netdevice.h>
10 #include <linux/of_address.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13 #include <linux/of_platform.h>
14 #include <linux/of_irq.h>
15 #include <linux/skbuff.h>
16 #include <linux/phy.h>
17 #include <linux/mii.h>
18 #include <linux/nvmem-consumer.h>
19 #include <linux/ethtool.h>
20 #include <linux/iopoll.h>
25 /* Axi DMA Register definitions */
26 #define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */
27 #define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */
28 #define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */
29 #define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */
31 #define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */
32 #define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */
33 #define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */
34 #define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */
36 #define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */
37 #define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */
39 #define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
40 #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
41 #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
42 #define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
44 #define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
45 #define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
47 #define XAXIDMA_DELAY_SHIFT 24
48 #define XAXIDMA_COALESCE_SHIFT 16
50 #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
51 #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
52 #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
53 #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
55 /* Default TX/RX Threshold and waitbound values for SGDMA mode */
56 #define XAXIDMA_DFT_TX_THRESHOLD 24
57 #define XAXIDMA_DFT_TX_WAITBOUND 254
58 #define XAXIDMA_DFT_RX_THRESHOLD 24
59 #define XAXIDMA_DFT_RX_WAITBOUND 254
61 #define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
62 #define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
63 #define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
64 #define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
65 #define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
66 #define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
67 #define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
68 #define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
69 #define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
71 #define NIXGE_REG_CTRL_OFFSET 0x4000
72 #define NIXGE_REG_INFO 0x00
73 #define NIXGE_REG_MAC_CTL 0x04
74 #define NIXGE_REG_PHY_CTL 0x08
75 #define NIXGE_REG_LED_CTL 0x0c
76 #define NIXGE_REG_MDIO_DATA 0x10
77 #define NIXGE_REG_MDIO_ADDR 0x14
78 #define NIXGE_REG_MDIO_OP 0x18
79 #define NIXGE_REG_MDIO_CTRL 0x1c
81 #define NIXGE_ID_LED_CTL_EN BIT(0)
82 #define NIXGE_ID_LED_CTL_VAL BIT(1)
84 #define NIXGE_MDIO_CLAUSE45 BIT(12)
85 #define NIXGE_MDIO_CLAUSE22 0
86 #define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10)
87 #define NIXGE_MDIO_OP_ADDRESS 0
88 #define NIXGE_MDIO_C45_WRITE BIT(0)
89 #define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0))
90 #define NIXGE_MDIO_C22_WRITE BIT(0)
91 #define NIXGE_MDIO_C22_READ BIT(1)
92 #define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5)
93 #define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0)
95 #define NIXGE_REG_MAC_LSB 0x1000
96 #define NIXGE_REG_MAC_MSB 0x1004
98 /* Packet size info */
99 #define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */
100 #define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
101 #define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */
102 #define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
104 #define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
105 #define NIXGE_MAX_JUMBO_FRAME_SIZE \
106 (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
108 struct nixge_hw_dma_bd {
127 struct nixge_tx_skb {
135 struct net_device *ndev;
136 struct napi_struct napi;
139 /* Connection to PHY device */
140 struct device_node *phy_node;
141 phy_interface_t phy_mode;
148 struct mii_bus *mii_bus; /* MII bus reference */
150 /* IO registers, dma functions and IRQs */
151 void __iomem *ctrl_regs;
152 void __iomem *dma_regs;
154 struct tasklet_struct dma_err_tasklet;
159 /* Buffer descriptors */
160 struct nixge_hw_dma_bd *tx_bd_v;
161 struct nixge_tx_skb *tx_skb;
164 struct nixge_hw_dma_bd *rx_bd_v;
170 u32 coalesce_count_rx;
171 u32 coalesce_count_tx;
174 static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
176 writel(val, priv->dma_regs + offset);
179 static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset)
181 return readl(priv->dma_regs + offset);
184 static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
186 writel(val, priv->ctrl_regs + offset);
189 static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
191 return readl(priv->ctrl_regs + offset);
194 #define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
195 readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \
196 (sleep_us), (timeout_us))
198 #define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
199 readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \
200 (sleep_us), (timeout_us))
202 static void nixge_hw_dma_bd_release(struct net_device *ndev)
204 struct nixge_priv *priv = netdev_priv(ndev);
207 for (i = 0; i < RX_BD_NUM; i++) {
208 dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys,
209 NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
210 dev_kfree_skb((struct sk_buff *)
211 (priv->rx_bd_v[i].sw_id_offset));
215 dma_free_coherent(ndev->dev.parent,
216 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
221 devm_kfree(ndev->dev.parent, priv->tx_skb);
224 dma_free_coherent(ndev->dev.parent,
225 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
230 static int nixge_hw_dma_bd_init(struct net_device *ndev)
232 struct nixge_priv *priv = netdev_priv(ndev);
237 /* Reset the indexes which are used for accessing the BDs */
239 priv->tx_bd_tail = 0;
242 /* Allocate the Tx and Rx buffer descriptors. */
243 priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
244 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
245 &priv->tx_bd_p, GFP_KERNEL);
249 priv->tx_skb = devm_kcalloc(ndev->dev.parent,
250 TX_BD_NUM, sizeof(*priv->tx_skb),
255 priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
256 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
257 &priv->rx_bd_p, GFP_KERNEL);
261 for (i = 0; i < TX_BD_NUM; i++) {
262 priv->tx_bd_v[i].next = priv->tx_bd_p +
263 sizeof(*priv->tx_bd_v) *
264 ((i + 1) % TX_BD_NUM);
267 for (i = 0; i < RX_BD_NUM; i++) {
268 priv->rx_bd_v[i].next = priv->rx_bd_p +
269 sizeof(*priv->rx_bd_v) *
270 ((i + 1) % RX_BD_NUM);
272 skb = netdev_alloc_skb_ip_align(ndev,
273 NIXGE_MAX_JUMBO_FRAME_SIZE);
277 priv->rx_bd_v[i].sw_id_offset = (u32)skb;
278 priv->rx_bd_v[i].phys =
279 dma_map_single(ndev->dev.parent,
281 NIXGE_MAX_JUMBO_FRAME_SIZE,
283 priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
286 /* Start updating the Rx channel control register */
287 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
288 /* Update the interrupt coalesce count */
289 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
290 ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
291 /* Update the delay timer count */
292 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
293 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
294 /* Enable coalesce, delay timer and error interrupts */
295 cr |= XAXIDMA_IRQ_ALL_MASK;
296 /* Write to the Rx channel control register */
297 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
299 /* Start updating the Tx channel control register */
300 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
301 /* Update the interrupt coalesce count */
302 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
303 ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
304 /* Update the delay timer count */
305 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
306 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
307 /* Enable coalesce, delay timer and error interrupts */
308 cr |= XAXIDMA_IRQ_ALL_MASK;
309 /* Write to the Tx channel control register */
310 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
312 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
313 * halted state. This will make the Rx side ready for reception.
315 nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
316 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
317 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
318 cr | XAXIDMA_CR_RUNSTOP_MASK);
319 nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
320 (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
322 /* Write to the RS (Run-stop) bit in the Tx channel control register.
323 * Tx channel is now ready to run. But only after we write to the
324 * tail pointer register that the Tx channel will start transmitting.
326 nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
327 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
328 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
329 cr | XAXIDMA_CR_RUNSTOP_MASK);
333 nixge_hw_dma_bd_release(ndev);
337 static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
342 /* Reset Axi DMA. This would reset NIXGE Ethernet core as well.
343 * The reset process of Axi DMA takes a while to complete as all
344 * pending commands/transfers will be flushed or completed during
345 * this reset process.
347 nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
348 err = nixge_dma_poll_timeout(priv, offset, status,
349 !(status & XAXIDMA_CR_RESET_MASK), 10,
352 netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__);
355 static void nixge_device_reset(struct net_device *ndev)
357 struct nixge_priv *priv = netdev_priv(ndev);
359 __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET);
360 __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET);
362 if (nixge_hw_dma_bd_init(ndev))
363 netdev_err(ndev, "%s: descriptor allocation failed\n",
366 netif_trans_update(ndev);
369 static void nixge_handle_link_change(struct net_device *ndev)
371 struct nixge_priv *priv = netdev_priv(ndev);
372 struct phy_device *phydev = ndev->phydev;
374 if (phydev->link != priv->link || phydev->speed != priv->speed ||
375 phydev->duplex != priv->duplex) {
376 priv->link = phydev->link;
377 priv->speed = phydev->speed;
378 priv->duplex = phydev->duplex;
379 phy_print_status(phydev);
383 static void nixge_tx_skb_unmap(struct nixge_priv *priv,
384 struct nixge_tx_skb *tx_skb)
386 if (tx_skb->mapping) {
387 if (tx_skb->mapped_as_page)
388 dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping,
389 tx_skb->size, DMA_TO_DEVICE);
391 dma_unmap_single(priv->ndev->dev.parent,
393 tx_skb->size, DMA_TO_DEVICE);
398 dev_kfree_skb_any(tx_skb->skb);
403 static void nixge_start_xmit_done(struct net_device *ndev)
405 struct nixge_priv *priv = netdev_priv(ndev);
406 struct nixge_hw_dma_bd *cur_p;
407 struct nixge_tx_skb *tx_skb;
408 unsigned int status = 0;
412 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
413 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
415 status = cur_p->status;
417 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
418 nixge_tx_skb_unmap(priv, tx_skb);
421 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
425 priv->tx_bd_ci %= TX_BD_NUM;
426 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
427 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
428 status = cur_p->status;
431 ndev->stats.tx_packets += packets;
432 ndev->stats.tx_bytes += size;
435 netif_wake_queue(ndev);
438 static int nixge_check_tx_bd_space(struct nixge_priv *priv,
441 struct nixge_hw_dma_bd *cur_p;
443 cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
444 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
445 return NETDEV_TX_BUSY;
449 static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
451 struct nixge_priv *priv = netdev_priv(ndev);
452 struct nixge_hw_dma_bd *cur_p;
453 struct nixge_tx_skb *tx_skb;
459 num_frag = skb_shinfo(skb)->nr_frags;
460 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
461 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
463 if (nixge_check_tx_bd_space(priv, num_frag)) {
464 if (!netif_queue_stopped(ndev))
465 netif_stop_queue(ndev);
469 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
470 skb_headlen(skb), DMA_TO_DEVICE);
471 if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
474 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
477 tx_skb->mapping = cur_p->phys;
478 tx_skb->size = skb_headlen(skb);
479 tx_skb->mapped_as_page = false;
481 for (ii = 0; ii < num_frag; ii++) {
483 priv->tx_bd_tail %= TX_BD_NUM;
484 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
485 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
486 frag = &skb_shinfo(skb)->frags[ii];
488 cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0,
491 if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
494 cur_p->cntrl = skb_frag_size(frag);
497 tx_skb->mapping = cur_p->phys;
498 tx_skb->size = skb_frag_size(frag);
499 tx_skb->mapped_as_page = true;
502 /* last buffer of the frame */
505 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
507 tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
508 /* Start the transfer */
509 nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
511 priv->tx_bd_tail %= TX_BD_NUM;
515 for (; ii > 0; ii--) {
516 if (priv->tx_bd_tail)
519 priv->tx_bd_tail = TX_BD_NUM - 1;
521 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
522 nixge_tx_skb_unmap(priv, tx_skb);
524 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
527 dma_unmap_single(priv->ndev->dev.parent,
529 tx_skb->size, DMA_TO_DEVICE);
531 ndev->stats.tx_dropped++;
535 static int nixge_recv(struct net_device *ndev, int budget)
537 struct nixge_priv *priv = netdev_priv(ndev);
538 struct sk_buff *skb, *new_skb;
539 struct nixge_hw_dma_bd *cur_p;
540 dma_addr_t tail_p = 0;
545 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
547 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK &&
549 tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) *
552 skb = (struct sk_buff *)(cur_p->sw_id_offset);
554 length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
555 if (length > NIXGE_MAX_JUMBO_FRAME_SIZE)
556 length = NIXGE_MAX_JUMBO_FRAME_SIZE;
558 dma_unmap_single(ndev->dev.parent, cur_p->phys,
559 NIXGE_MAX_JUMBO_FRAME_SIZE,
562 skb_put(skb, length);
564 skb->protocol = eth_type_trans(skb, ndev);
565 skb_checksum_none_assert(skb);
567 /* For now mark them as CHECKSUM_NONE since
568 * we don't have offload capabilities
570 skb->ip_summed = CHECKSUM_NONE;
572 napi_gro_receive(&priv->napi, skb);
577 new_skb = netdev_alloc_skb_ip_align(ndev,
578 NIXGE_MAX_JUMBO_FRAME_SIZE);
582 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
583 NIXGE_MAX_JUMBO_FRAME_SIZE,
585 if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
586 /* FIXME: bail out and clean up */
587 netdev_err(ndev, "Failed to map ...\n");
589 cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
591 cur_p->sw_id_offset = (u32)new_skb;
594 priv->rx_bd_ci %= RX_BD_NUM;
595 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
598 ndev->stats.rx_packets += packets;
599 ndev->stats.rx_bytes += size;
602 nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
607 static int nixge_poll(struct napi_struct *napi, int budget)
609 struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi);
615 work_done = nixge_recv(priv->ndev, budget);
616 if (work_done < budget) {
617 napi_complete_done(napi, work_done);
618 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
620 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
621 /* If there's more, reschedule, but clear */
622 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
623 napi_reschedule(napi);
625 /* if not, turn on RX IRQs again ... */
626 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
627 cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
628 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
635 static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
637 struct nixge_priv *priv = netdev_priv(_ndev);
638 struct net_device *ndev = _ndev;
642 status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET);
643 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
644 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
645 nixge_start_xmit_done(priv->ndev);
648 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
649 netdev_err(ndev, "No interrupts asserted in Tx path\n");
652 if (status & XAXIDMA_IRQ_ERROR_MASK) {
653 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
654 netdev_err(ndev, "Current BD is at: 0x%x\n",
655 (priv->tx_bd_v[priv->tx_bd_ci]).phys);
657 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
658 /* Disable coalesce, delay timer and error interrupts */
659 cr &= (~XAXIDMA_IRQ_ALL_MASK);
660 /* Write to the Tx channel control register */
661 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
663 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
664 /* Disable coalesce, delay timer and error interrupts */
665 cr &= (~XAXIDMA_IRQ_ALL_MASK);
666 /* Write to the Rx channel control register */
667 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
669 tasklet_schedule(&priv->dma_err_tasklet);
670 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
676 static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
678 struct nixge_priv *priv = netdev_priv(_ndev);
679 struct net_device *ndev = _ndev;
683 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
684 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
685 /* Turn of IRQs because NAPI */
686 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
687 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
688 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
689 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
691 if (napi_schedule_prep(&priv->napi))
692 __napi_schedule(&priv->napi);
695 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
696 netdev_err(ndev, "No interrupts asserted in Rx path\n");
699 if (status & XAXIDMA_IRQ_ERROR_MASK) {
700 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
701 netdev_err(ndev, "Current BD is at: 0x%x\n",
702 (priv->rx_bd_v[priv->rx_bd_ci]).phys);
704 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
705 /* Disable coalesce, delay timer and error interrupts */
706 cr &= (~XAXIDMA_IRQ_ALL_MASK);
707 /* Finally write to the Tx channel control register */
708 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
710 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
711 /* Disable coalesce, delay timer and error interrupts */
712 cr &= (~XAXIDMA_IRQ_ALL_MASK);
713 /* write to the Rx channel control register */
714 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
716 tasklet_schedule(&priv->dma_err_tasklet);
717 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
723 static void nixge_dma_err_handler(unsigned long data)
725 struct nixge_priv *lp = (struct nixge_priv *)data;
726 struct nixge_hw_dma_bd *cur_p;
727 struct nixge_tx_skb *tx_skb;
730 __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
731 __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
733 for (i = 0; i < TX_BD_NUM; i++) {
734 cur_p = &lp->tx_bd_v[i];
735 tx_skb = &lp->tx_skb[i];
736 nixge_tx_skb_unmap(lp, tx_skb);
741 cur_p->sw_id_offset = 0;
744 for (i = 0; i < RX_BD_NUM; i++) {
745 cur_p = &lp->rx_bd_v[i];
753 /* Start updating the Rx channel control register */
754 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
755 /* Update the interrupt coalesce count */
756 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
757 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
758 /* Update the delay timer count */
759 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
760 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
761 /* Enable coalesce, delay timer and error interrupts */
762 cr |= XAXIDMA_IRQ_ALL_MASK;
763 /* Finally write to the Rx channel control register */
764 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr);
766 /* Start updating the Tx channel control register */
767 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
768 /* Update the interrupt coalesce count */
769 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
770 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
771 /* Update the delay timer count */
772 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
773 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
774 /* Enable coalesce, delay timer and error interrupts */
775 cr |= XAXIDMA_IRQ_ALL_MASK;
776 /* Finally write to the Tx channel control register */
777 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr);
779 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
780 * halted state. This will make the Rx side ready for reception.
782 nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
783 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
784 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET,
785 cr | XAXIDMA_CR_RUNSTOP_MASK);
786 nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
787 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
789 /* Write to the RS (Run-stop) bit in the Tx channel control register.
790 * Tx channel is now ready to run. But only after we write to the
791 * tail pointer register that the Tx channel will start transmitting
793 nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
794 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
795 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET,
796 cr | XAXIDMA_CR_RUNSTOP_MASK);
799 static int nixge_open(struct net_device *ndev)
801 struct nixge_priv *priv = netdev_priv(ndev);
802 struct phy_device *phy;
805 nixge_device_reset(ndev);
807 phy = of_phy_connect(ndev, priv->phy_node,
808 &nixge_handle_link_change, 0, priv->phy_mode);
814 /* Enable tasklets for Axi DMA error handling */
815 tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler,
816 (unsigned long)priv);
818 napi_enable(&priv->napi);
820 /* Enable interrupts for Axi DMA Tx */
821 ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev);
824 /* Enable interrupts for Axi DMA Rx */
825 ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev);
829 netif_start_queue(ndev);
834 free_irq(priv->tx_irq, ndev);
838 tasklet_kill(&priv->dma_err_tasklet);
839 netdev_err(ndev, "request_irq() failed\n");
843 static int nixge_stop(struct net_device *ndev)
845 struct nixge_priv *priv = netdev_priv(ndev);
848 netif_stop_queue(ndev);
849 napi_disable(&priv->napi);
852 phy_stop(ndev->phydev);
853 phy_disconnect(ndev->phydev);
856 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
857 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
858 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
859 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
860 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
861 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
863 tasklet_kill(&priv->dma_err_tasklet);
865 free_irq(priv->tx_irq, ndev);
866 free_irq(priv->rx_irq, ndev);
868 nixge_hw_dma_bd_release(ndev);
873 static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
875 if (netif_running(ndev))
878 if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) >
879 NIXGE_MAX_JUMBO_FRAME_SIZE)
887 static s32 __nixge_hw_set_mac_address(struct net_device *ndev)
889 struct nixge_priv *priv = netdev_priv(ndev);
891 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
892 (ndev->dev_addr[2]) << 24 |
893 (ndev->dev_addr[3] << 16) |
894 (ndev->dev_addr[4] << 8) |
895 (ndev->dev_addr[5] << 0));
897 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
898 (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
903 static int nixge_net_set_mac_address(struct net_device *ndev, void *p)
907 err = eth_mac_addr(ndev, p);
909 __nixge_hw_set_mac_address(ndev);
914 static const struct net_device_ops nixge_netdev_ops = {
915 .ndo_open = nixge_open,
916 .ndo_stop = nixge_stop,
917 .ndo_start_xmit = nixge_start_xmit,
918 .ndo_change_mtu = nixge_change_mtu,
919 .ndo_set_mac_address = nixge_net_set_mac_address,
920 .ndo_validate_addr = eth_validate_addr,
923 static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
924 struct ethtool_drvinfo *ed)
926 strlcpy(ed->driver, "nixge", sizeof(ed->driver));
927 strlcpy(ed->bus_info, "platform", sizeof(ed->driver));
930 static int nixge_ethtools_get_coalesce(struct net_device *ndev,
931 struct ethtool_coalesce *ecoalesce)
933 struct nixge_priv *priv = netdev_priv(ndev);
936 regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
937 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
938 >> XAXIDMA_COALESCE_SHIFT;
939 regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
940 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
941 >> XAXIDMA_COALESCE_SHIFT;
945 static int nixge_ethtools_set_coalesce(struct net_device *ndev,
946 struct ethtool_coalesce *ecoalesce)
948 struct nixge_priv *priv = netdev_priv(ndev);
950 if (netif_running(ndev)) {
952 "Please stop netif before applying configuration\n");
956 if (ecoalesce->rx_coalesce_usecs ||
957 ecoalesce->rx_coalesce_usecs_irq ||
958 ecoalesce->rx_max_coalesced_frames_irq ||
959 ecoalesce->tx_coalesce_usecs ||
960 ecoalesce->tx_coalesce_usecs_irq ||
961 ecoalesce->tx_max_coalesced_frames_irq ||
962 ecoalesce->stats_block_coalesce_usecs ||
963 ecoalesce->use_adaptive_rx_coalesce ||
964 ecoalesce->use_adaptive_tx_coalesce ||
965 ecoalesce->pkt_rate_low ||
966 ecoalesce->rx_coalesce_usecs_low ||
967 ecoalesce->rx_max_coalesced_frames_low ||
968 ecoalesce->tx_coalesce_usecs_low ||
969 ecoalesce->tx_max_coalesced_frames_low ||
970 ecoalesce->pkt_rate_high ||
971 ecoalesce->rx_coalesce_usecs_high ||
972 ecoalesce->rx_max_coalesced_frames_high ||
973 ecoalesce->tx_coalesce_usecs_high ||
974 ecoalesce->tx_max_coalesced_frames_high ||
975 ecoalesce->rate_sample_interval)
977 if (ecoalesce->rx_max_coalesced_frames)
978 priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
979 if (ecoalesce->tx_max_coalesced_frames)
980 priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
985 static int nixge_ethtools_set_phys_id(struct net_device *ndev,
986 enum ethtool_phys_id_state state)
988 struct nixge_priv *priv = netdev_priv(ndev);
991 ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL);
993 case ETHTOOL_ID_ACTIVE:
994 ctrl |= NIXGE_ID_LED_CTL_EN;
995 /* Enable identification LED override*/
996 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1000 ctrl |= NIXGE_ID_LED_CTL_VAL;
1001 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1004 case ETHTOOL_ID_OFF:
1005 ctrl &= ~NIXGE_ID_LED_CTL_VAL;
1006 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1009 case ETHTOOL_ID_INACTIVE:
1010 /* Restore LED settings */
1011 ctrl &= ~NIXGE_ID_LED_CTL_EN;
1012 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1019 static const struct ethtool_ops nixge_ethtool_ops = {
1020 .get_drvinfo = nixge_ethtools_get_drvinfo,
1021 .get_coalesce = nixge_ethtools_get_coalesce,
1022 .set_coalesce = nixge_ethtools_set_coalesce,
1023 .set_phys_id = nixge_ethtools_set_phys_id,
1024 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1025 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1026 .get_link = ethtool_op_get_link,
1029 static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
1031 struct nixge_priv *priv = bus->priv;
1036 if (reg & MII_ADDR_C45) {
1037 device = (reg >> 16) & 0x1f;
1039 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1041 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1042 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1044 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1045 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1047 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1050 dev_err(priv->dev, "timeout setting address");
1054 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
1055 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1057 device = reg & 0x1f;
1059 tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
1060 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1063 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1064 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1066 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1069 dev_err(priv->dev, "timeout setting read command");
1073 status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
1078 static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
1080 struct nixge_priv *priv = bus->priv;
1085 if (reg & MII_ADDR_C45) {
1086 device = (reg >> 16) & 0x1f;
1088 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1090 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1091 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1093 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1094 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1096 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1099 dev_err(priv->dev, "timeout setting address");
1103 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE)
1104 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1106 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1107 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1108 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1111 dev_err(priv->dev, "timeout setting write command");
1113 device = reg & 0x1f;
1115 tmp = NIXGE_MDIO_CLAUSE22 |
1116 NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
1117 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1119 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1120 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1121 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1123 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1126 dev_err(priv->dev, "timeout setting write command");
1132 static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
1134 struct mii_bus *bus;
1136 bus = devm_mdiobus_alloc(priv->dev);
1140 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
1142 bus->name = "nixge_mii_bus";
1143 bus->read = nixge_mdio_read;
1144 bus->write = nixge_mdio_write;
1145 bus->parent = priv->dev;
1147 priv->mii_bus = bus;
1149 return of_mdiobus_register(bus, np);
1152 static void *nixge_get_nvmem_address(struct device *dev)
1154 struct nvmem_cell *cell;
1158 cell = nvmem_cell_get(dev, "address");
1162 mac = nvmem_cell_read(cell, &cell_size);
1163 nvmem_cell_put(cell);
1168 static int nixge_probe(struct platform_device *pdev)
1170 struct nixge_priv *priv;
1171 struct net_device *ndev;
1172 struct resource *dmares;
1176 ndev = alloc_etherdev(sizeof(*priv));
1180 platform_set_drvdata(pdev, ndev);
1181 SET_NETDEV_DEV(ndev, &pdev->dev);
1183 ndev->features = NETIF_F_SG;
1184 ndev->netdev_ops = &nixge_netdev_ops;
1185 ndev->ethtool_ops = &nixge_ethtool_ops;
1187 /* MTU range: 64 - 9000 */
1189 ndev->max_mtu = NIXGE_JUMBO_MTU;
1191 mac_addr = nixge_get_nvmem_address(&pdev->dev);
1192 if (mac_addr && is_valid_ether_addr(mac_addr)) {
1193 ether_addr_copy(ndev->dev_addr, mac_addr);
1196 eth_hw_addr_random(ndev);
1199 priv = netdev_priv(ndev);
1201 priv->dev = &pdev->dev;
1203 netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
1205 dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1206 priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
1207 if (IS_ERR(priv->dma_regs)) {
1208 netdev_err(ndev, "failed to map dma regs\n");
1209 return PTR_ERR(priv->dma_regs);
1211 priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET;
1212 __nixge_hw_set_mac_address(ndev);
1214 priv->tx_irq = platform_get_irq_byname(pdev, "tx");
1215 if (priv->tx_irq < 0) {
1216 netdev_err(ndev, "could not find 'tx' irq");
1217 return priv->tx_irq;
1220 priv->rx_irq = platform_get_irq_byname(pdev, "rx");
1221 if (priv->rx_irq < 0) {
1222 netdev_err(ndev, "could not find 'rx' irq");
1223 return priv->rx_irq;
1226 priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1227 priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
1229 err = nixge_mdio_setup(priv, pdev->dev.of_node);
1231 netdev_err(ndev, "error registering mdio bus");
1235 priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
1236 if ((int)priv->phy_mode < 0) {
1237 netdev_err(ndev, "not find \"phy-mode\" property\n");
1239 goto unregister_mdio;
1242 priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1243 if (!priv->phy_node) {
1244 netdev_err(ndev, "not find \"phy-handle\" property\n");
1246 goto unregister_mdio;
1249 err = register_netdev(priv->ndev);
1251 netdev_err(ndev, "register_netdev() error (%i)\n", err);
1252 goto unregister_mdio;
1258 mdiobus_unregister(priv->mii_bus);
1266 static int nixge_remove(struct platform_device *pdev)
1268 struct net_device *ndev = platform_get_drvdata(pdev);
1269 struct nixge_priv *priv = netdev_priv(ndev);
1271 unregister_netdev(ndev);
1273 mdiobus_unregister(priv->mii_bus);
1280 /* Match table for of_platform binding */
1281 static const struct of_device_id nixge_dt_ids[] = {
1282 { .compatible = "ni,xge-enet-2.00", },
1285 MODULE_DEVICE_TABLE(of, nixge_dt_ids);
1287 static struct platform_driver nixge_driver = {
1288 .probe = nixge_probe,
1289 .remove = nixge_remove,
1292 .of_match_table = of_match_ptr(nixge_dt_ids),
1295 module_platform_driver(nixge_driver);
1297 MODULE_LICENSE("GPL v2");
1298 MODULE_DESCRIPTION("National Instruments XGE Management MAC");
1299 MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>");