1 // SPDX-License-Identifier: GPL-2.0-only
3 * Xilinx Axi Ethernet device driver
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9 * Copyright (c) 2010 - 2011 PetaLogix
10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/dmaengine.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma/xilinx_dma.h>
44 #include <linux/circ_buf.h>
45 #include <net/netdev_queues.h>
47 #include "xilinx_axienet.h"
49 /* Descriptors defines for Tx and Rx DMA */
50 #define TX_BD_NUM_DEFAULT 128
51 #define RX_BD_NUM_DEFAULT 1024
52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
53 #define TX_BD_NUM_MAX 4096
54 #define RX_BD_NUM_MAX 4096
55 #define DMA_NUM_APP_WORDS 5
57 #define RX_BUF_NUM_DEFAULT 128
59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60 #define DRIVER_NAME "xaxienet"
61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
62 #define DRIVER_VERSION "1.00a"
64 #define AXIENET_REGS_N 40
66 static void axienet_rx_submit_desc(struct net_device *ndev);
68 /* Match table for of_platform binding */
69 static const struct of_device_id axienet_of_match[] = {
70 { .compatible = "xlnx,axi-ethernet-1.00.a", },
71 { .compatible = "xlnx,axi-ethernet-1.01.a", },
72 { .compatible = "xlnx,axi-ethernet-2.01.a", },
76 MODULE_DEVICE_TABLE(of, axienet_of_match);
78 /* Option table for setting up Axi Ethernet hardware options */
79 static struct axienet_option axienet_options[] = {
80 /* Turn on jumbo packet support for both Rx and Tx */
82 .opt = XAE_OPTION_JUMBO,
84 .m_or = XAE_TC_JUM_MASK,
86 .opt = XAE_OPTION_JUMBO,
87 .reg = XAE_RCW1_OFFSET,
88 .m_or = XAE_RCW1_JUM_MASK,
89 }, { /* Turn on VLAN packet support for both Rx and Tx */
90 .opt = XAE_OPTION_VLAN,
92 .m_or = XAE_TC_VLAN_MASK,
94 .opt = XAE_OPTION_VLAN,
95 .reg = XAE_RCW1_OFFSET,
96 .m_or = XAE_RCW1_VLAN_MASK,
97 }, { /* Turn on FCS stripping on receive packets */
98 .opt = XAE_OPTION_FCS_STRIP,
99 .reg = XAE_RCW1_OFFSET,
100 .m_or = XAE_RCW1_FCS_MASK,
101 }, { /* Turn on FCS insertion on transmit packets */
102 .opt = XAE_OPTION_FCS_INSERT,
103 .reg = XAE_TC_OFFSET,
104 .m_or = XAE_TC_FCS_MASK,
105 }, { /* Turn off length/type field checking on receive packets */
106 .opt = XAE_OPTION_LENTYPE_ERR,
107 .reg = XAE_RCW1_OFFSET,
108 .m_or = XAE_RCW1_LT_DIS_MASK,
109 }, { /* Turn on Rx flow control */
110 .opt = XAE_OPTION_FLOW_CONTROL,
111 .reg = XAE_FCC_OFFSET,
112 .m_or = XAE_FCC_FCRX_MASK,
113 }, { /* Turn on Tx flow control */
114 .opt = XAE_OPTION_FLOW_CONTROL,
115 .reg = XAE_FCC_OFFSET,
116 .m_or = XAE_FCC_FCTX_MASK,
117 }, { /* Turn on promiscuous frame filtering */
118 .opt = XAE_OPTION_PROMISC,
119 .reg = XAE_FMI_OFFSET,
120 .m_or = XAE_FMI_PM_MASK,
121 }, { /* Enable transmitter */
122 .opt = XAE_OPTION_TXEN,
123 .reg = XAE_TC_OFFSET,
124 .m_or = XAE_TC_TX_MASK,
125 }, { /* Enable receiver */
126 .opt = XAE_OPTION_RXEN,
127 .reg = XAE_RCW1_OFFSET,
128 .m_or = XAE_RCW1_RX_MASK,
133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
144 * axienet_dma_in32 - Memory mapped Axi DMA register read
145 * @lp: Pointer to axienet local structure
146 * @reg: Address offset from the base address of the Axi DMA core
148 * Return: The contents of the Axi DMA register
150 * This function returns the contents of the corresponding Axi DMA register.
152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
154 return ioread32(lp->dma_regs + reg);
157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
158 struct axidma_bd *desc)
160 desc->phys = lower_32_bits(addr);
161 if (lp->features & XAE_FEATURE_DMA_64BIT)
162 desc->phys_msb = upper_32_bits(addr);
165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
166 struct axidma_bd *desc)
168 dma_addr_t ret = desc->phys;
170 if (lp->features & XAE_FEATURE_DMA_64BIT)
171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
177 * axienet_dma_bd_release - Release buffer descriptor rings
178 * @ndev: Pointer to the net_device structure
180 * This function is used to release the descriptors allocated in
181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182 * driver stop api is called.
184 static void axienet_dma_bd_release(struct net_device *ndev)
187 struct axienet_local *lp = netdev_priv(ndev);
189 /* If we end up here, tx_bd_v must have been DMA allocated. */
190 dma_free_coherent(lp->dev,
191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
198 for (i = 0; i < lp->rx_bd_num; i++) {
201 /* A NULL skb means this descriptor has not been initialised
204 if (!lp->rx_bd_v[i].skb)
207 dev_kfree_skb(lp->rx_bd_v[i].skb);
209 /* For each descriptor, we programmed cntrl with the (non-zero)
210 * descriptor size, after it had been successfully allocated.
211 * So a non-zero value in there means we need to unmap it.
213 if (lp->rx_bd_v[i].cntrl) {
214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
215 dma_unmap_single(lp->dev, phys,
216 lp->max_frm_size, DMA_FROM_DEVICE);
220 dma_free_coherent(lp->dev,
221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
227 * axienet_usec_to_timer - Calculate IRQ delay timer value
228 * @lp: Pointer to the axienet_local structure
229 * @coalesce_usec: Microseconds to convert into timer value
231 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
234 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
237 clk_rate = clk_get_rate(lp->axi_clk);
239 /* 1 Timeout Interval = 125 * (clock period of SG clock) */
240 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
249 * axienet_dma_start - Set up DMA registers and start DMA operation
250 * @lp: Pointer to the axienet_local structure
252 static void axienet_dma_start(struct axienet_local *lp)
254 /* Start updating the Rx channel control register */
255 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
256 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
257 /* Only set interrupt delay timer if not generating an interrupt on
258 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
260 if (lp->coalesce_count_rx > 1)
261 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
262 << XAXIDMA_DELAY_SHIFT) |
263 XAXIDMA_IRQ_DELAY_MASK;
264 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
266 /* Start updating the Tx channel control register */
267 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
268 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
269 /* Only set interrupt delay timer if not generating an interrupt on
270 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
272 if (lp->coalesce_count_tx > 1)
273 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
274 << XAXIDMA_DELAY_SHIFT) |
275 XAXIDMA_IRQ_DELAY_MASK;
276 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
278 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
279 * halted state. This will make the Rx side ready for reception.
281 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
282 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
283 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
284 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
285 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
287 /* Write to the RS (Run-stop) bit in the Tx channel control register.
288 * Tx channel is now ready to run. But only after we write to the
289 * tail pointer register that the Tx channel will start transmitting.
291 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
292 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
293 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
297 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
298 * @ndev: Pointer to the net_device structure
300 * Return: 0, on success -ENOMEM, on failure
302 * This function is called to initialize the Rx and Tx DMA descriptor
303 * rings. This initializes the descriptors with required default values
304 * and is called when Axi Ethernet driver reset is called.
306 static int axienet_dma_bd_init(struct net_device *ndev)
310 struct axienet_local *lp = netdev_priv(ndev);
312 /* Reset the indexes which are used for accessing the BDs */
317 /* Allocate the Tx and Rx buffer descriptors. */
318 lp->tx_bd_v = dma_alloc_coherent(lp->dev,
319 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
320 &lp->tx_bd_p, GFP_KERNEL);
324 lp->rx_bd_v = dma_alloc_coherent(lp->dev,
325 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
326 &lp->rx_bd_p, GFP_KERNEL);
330 for (i = 0; i < lp->tx_bd_num; i++) {
331 dma_addr_t addr = lp->tx_bd_p +
332 sizeof(*lp->tx_bd_v) *
333 ((i + 1) % lp->tx_bd_num);
335 lp->tx_bd_v[i].next = lower_32_bits(addr);
336 if (lp->features & XAE_FEATURE_DMA_64BIT)
337 lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
340 for (i = 0; i < lp->rx_bd_num; i++) {
343 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
344 ((i + 1) % lp->rx_bd_num);
345 lp->rx_bd_v[i].next = lower_32_bits(addr);
346 if (lp->features & XAE_FEATURE_DMA_64BIT)
347 lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
349 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
353 lp->rx_bd_v[i].skb = skb;
354 addr = dma_map_single(lp->dev, skb->data,
355 lp->max_frm_size, DMA_FROM_DEVICE);
356 if (dma_mapping_error(lp->dev, addr)) {
357 netdev_err(ndev, "DMA mapping error\n");
360 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
362 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
365 axienet_dma_start(lp);
369 axienet_dma_bd_release(ndev);
374 * axienet_set_mac_address - Write the MAC address
375 * @ndev: Pointer to the net_device structure
376 * @address: 6 byte Address to be written as MAC address
378 * This function is called to initialize the MAC address of the Axi Ethernet
379 * core. It writes to the UAW0 and UAW1 registers of the core.
381 static void axienet_set_mac_address(struct net_device *ndev,
384 struct axienet_local *lp = netdev_priv(ndev);
387 eth_hw_addr_set(ndev, address);
388 if (!is_valid_ether_addr(ndev->dev_addr))
389 eth_hw_addr_random(ndev);
391 /* Set up unicast MAC address filter set its mac address */
392 axienet_iow(lp, XAE_UAW0_OFFSET,
393 (ndev->dev_addr[0]) |
394 (ndev->dev_addr[1] << 8) |
395 (ndev->dev_addr[2] << 16) |
396 (ndev->dev_addr[3] << 24));
397 axienet_iow(lp, XAE_UAW1_OFFSET,
398 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
399 ~XAE_UAW1_UNICASTADDR_MASK) |
401 (ndev->dev_addr[5] << 8))));
405 * netdev_set_mac_address - Write the MAC address (from outside the driver)
406 * @ndev: Pointer to the net_device structure
407 * @p: 6 byte Address to be written as MAC address
409 * Return: 0 for all conditions. Presently, there is no failure case.
411 * This function is called to initialize the MAC address of the Axi Ethernet
412 * core. It calls the core specific axienet_set_mac_address. This is the
413 * function that goes into net_device_ops structure entry ndo_set_mac_address.
415 static int netdev_set_mac_address(struct net_device *ndev, void *p)
417 struct sockaddr *addr = p;
418 axienet_set_mac_address(ndev, addr->sa_data);
423 * axienet_set_multicast_list - Prepare the multicast table
424 * @ndev: Pointer to the net_device structure
426 * This function is called to initialize the multicast table during
427 * initialization. The Axi Ethernet basic multicast support has a four-entry
428 * multicast table which is initialized here. Additionally this function
429 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
430 * means whenever the multicast table entries need to be updated this
431 * function gets called.
433 static void axienet_set_multicast_list(struct net_device *ndev)
436 u32 reg, af0reg, af1reg;
437 struct axienet_local *lp = netdev_priv(ndev);
439 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
440 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
441 /* We must make the kernel realize we had to move into
442 * promiscuous mode. If it was a promiscuous mode request
443 * the flag is already set. If not we set it.
445 ndev->flags |= IFF_PROMISC;
446 reg = axienet_ior(lp, XAE_FMI_OFFSET);
447 reg |= XAE_FMI_PM_MASK;
448 axienet_iow(lp, XAE_FMI_OFFSET, reg);
449 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
450 } else if (!netdev_mc_empty(ndev)) {
451 struct netdev_hw_addr *ha;
454 netdev_for_each_mc_addr(ha, ndev) {
455 if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
458 af0reg = (ha->addr[0]);
459 af0reg |= (ha->addr[1] << 8);
460 af0reg |= (ha->addr[2] << 16);
461 af0reg |= (ha->addr[3] << 24);
463 af1reg = (ha->addr[4]);
464 af1reg |= (ha->addr[5] << 8);
466 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
469 axienet_iow(lp, XAE_FMI_OFFSET, reg);
470 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
471 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
475 reg = axienet_ior(lp, XAE_FMI_OFFSET);
476 reg &= ~XAE_FMI_PM_MASK;
478 axienet_iow(lp, XAE_FMI_OFFSET, reg);
480 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
481 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
484 axienet_iow(lp, XAE_FMI_OFFSET, reg);
485 axienet_iow(lp, XAE_AF0_OFFSET, 0);
486 axienet_iow(lp, XAE_AF1_OFFSET, 0);
489 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
494 * axienet_setoptions - Set an Axi Ethernet option
495 * @ndev: Pointer to the net_device structure
496 * @options: Option to be enabled/disabled
498 * The Axi Ethernet core has multiple features which can be selectively turned
499 * on or off. The typical options could be jumbo frame option, basic VLAN
500 * option, promiscuous mode option etc. This function is used to set or clear
501 * these options in the Axi Ethernet hardware. This is done through
502 * axienet_option structure .
504 static void axienet_setoptions(struct net_device *ndev, u32 options)
507 struct axienet_local *lp = netdev_priv(ndev);
508 struct axienet_option *tp = &axienet_options[0];
511 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
512 if (options & tp->opt)
514 axienet_iow(lp, tp->reg, reg);
518 lp->options |= options;
521 static int __axienet_device_reset(struct axienet_local *lp)
526 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
527 * process of Axi DMA takes a while to complete as all pending
528 * commands/transfers will be flushed or completed during this
530 * Note that even though both TX and RX have their own reset register,
531 * they both reset the entire DMA core, so only one needs to be used.
533 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
534 ret = read_poll_timeout(axienet_dma_in32, value,
535 !(value & XAXIDMA_CR_RESET_MASK),
536 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
537 XAXIDMA_TX_CR_OFFSET);
539 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
543 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
544 ret = read_poll_timeout(axienet_ior, value,
545 value & XAE_INT_PHYRSTCMPLT_MASK,
546 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
549 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
557 * axienet_dma_stop - Stop DMA operation
558 * @lp: Pointer to the axienet_local structure
560 static void axienet_dma_stop(struct axienet_local *lp)
565 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
566 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
567 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
568 synchronize_irq(lp->rx_irq);
570 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
571 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
572 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
573 synchronize_irq(lp->tx_irq);
575 /* Give DMAs a chance to halt gracefully */
576 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
577 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
579 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
582 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
583 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
585 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
588 /* Do a reset to ensure DMA is really stopped */
589 axienet_lock_mii(lp);
590 __axienet_device_reset(lp);
591 axienet_unlock_mii(lp);
595 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
596 * @ndev: Pointer to the net_device structure
598 * This function is called to reset and initialize the Axi Ethernet core. This
599 * is typically called during initialization. It does a reset of the Axi DMA
600 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
601 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
602 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
604 * Returns 0 on success or a negative error number otherwise.
606 static int axienet_device_reset(struct net_device *ndev)
609 struct axienet_local *lp = netdev_priv(ndev);
612 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
613 lp->options |= XAE_OPTION_VLAN;
614 lp->options &= (~XAE_OPTION_JUMBO);
616 if ((ndev->mtu > XAE_MTU) &&
617 (ndev->mtu <= XAE_JUMBO_MTU)) {
618 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
621 if (lp->max_frm_size <= lp->rxmem)
622 lp->options |= XAE_OPTION_JUMBO;
625 if (!lp->use_dmaengine) {
626 ret = __axienet_device_reset(lp);
630 ret = axienet_dma_bd_init(ndev);
632 netdev_err(ndev, "%s: descriptor allocation failed\n",
638 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
639 axienet_status &= ~XAE_RCW1_RX_MASK;
640 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
642 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
643 if (axienet_status & XAE_INT_RXRJECT_MASK)
644 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
645 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
646 XAE_INT_RECV_ERROR_MASK : 0);
648 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
650 /* Sync default options with HW but leave receiver and
651 * transmitter disabled.
653 axienet_setoptions(ndev, lp->options &
654 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
655 axienet_set_mac_address(ndev, NULL);
656 axienet_set_multicast_list(ndev);
657 axienet_setoptions(ndev, lp->options);
659 netif_trans_update(ndev);
665 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
666 * @lp: Pointer to the axienet_local structure
667 * @first_bd: Index of first descriptor to clean up
668 * @nr_bds: Max number of descriptors to clean up
669 * @force: Whether to clean descriptors even if not complete
670 * @sizep: Pointer to a u32 filled with the total sum of all bytes
671 * in all cleaned-up descriptors. Ignored if NULL.
672 * @budget: NAPI budget (use 0 when not called from NAPI poll)
674 * Would either be called after a successful transmit operation, or after
675 * there was an error when setting up the chain.
676 * Returns the number of descriptors handled.
678 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
679 int nr_bds, bool force, u32 *sizep, int budget)
681 struct axidma_bd *cur_p;
686 for (i = 0; i < nr_bds; i++) {
687 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
688 status = cur_p->status;
690 /* If force is not specified, clean up only descriptors
691 * that have been completed by the MAC.
693 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
696 /* Ensure we see complete descriptor update */
698 phys = desc_get_phys_addr(lp, cur_p);
699 dma_unmap_single(lp->dev, phys,
700 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
703 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
704 napi_consume_skb(cur_p->skb, budget);
711 /* ensure our transmit path and device don't prematurely see status cleared */
717 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
724 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
725 * @lp: Pointer to the axienet_local structure
726 * @num_frag: The number of BDs to check for
728 * Return: 0, on success
729 * NETDEV_TX_BUSY, if any of the descriptors are not free
731 * This function is invoked before BDs are allocated and transmission starts.
732 * This function returns 0 if a BD or group of BDs can be allocated for
733 * transmission. If the BD or any of the BDs are not free the function
734 * returns a busy status.
736 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
739 struct axidma_bd *cur_p;
741 /* Ensure we see all descriptor updates from device or TX polling */
743 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
746 return NETDEV_TX_BUSY;
751 * axienet_dma_tx_cb - DMA engine callback for TX channel.
752 * @data: Pointer to the axienet_local structure.
753 * @result: error reporting through dmaengine_result.
754 * This function is called by dmaengine driver for TX channel to notify
755 * that the transmit is done.
757 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
759 struct skbuf_dma_descriptor *skbuf_dma;
760 struct axienet_local *lp = data;
761 struct netdev_queue *txq;
764 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
765 len = skbuf_dma->skb->len;
766 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
767 u64_stats_update_begin(&lp->tx_stat_sync);
768 u64_stats_add(&lp->tx_bytes, len);
769 u64_stats_add(&lp->tx_packets, 1);
770 u64_stats_update_end(&lp->tx_stat_sync);
771 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
772 dev_consume_skb_any(skbuf_dma->skb);
773 netif_txq_completed_wake(txq, 1, len,
774 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
779 * axienet_start_xmit_dmaengine - Starts the transmission.
780 * @skb: sk_buff pointer that contains data to be Txed.
781 * @ndev: Pointer to net_device structure.
783 * Return: NETDEV_TX_OK on success or any non space errors.
784 * NETDEV_TX_BUSY when free element in TX skb ring buffer
787 * This function is invoked to initiate transmission. The
788 * function sets the skbs, register dma callback API and submit
789 * the dma transaction.
790 * Additionally if checksum offloading is supported,
791 * it populates AXI Stream Control fields with appropriate values.
794 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
796 struct dma_async_tx_descriptor *dma_tx_desc = NULL;
797 struct axienet_local *lp = netdev_priv(ndev);
798 u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
799 struct skbuf_dma_descriptor *skbuf_dma;
800 struct dma_device *dma_dev;
801 struct netdev_queue *txq;
807 dma_dev = lp->tx_chan->device;
808 sg_len = skb_shinfo(skb)->nr_frags + 1;
809 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
810 netif_stop_queue(ndev);
812 netdev_warn(ndev, "TX ring unexpectedly full\n");
813 return NETDEV_TX_BUSY;
816 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
818 goto xmit_error_drop_skb;
821 sg_init_table(skbuf_dma->sgl, sg_len);
822 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
824 goto xmit_error_drop_skb;
826 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
828 goto xmit_error_drop_skb;
830 /* Fill up app fields for checksum */
831 if (skb->ip_summed == CHECKSUM_PARTIAL) {
832 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
833 /* Tx Full Checksum Offload Enabled */
834 app_metadata[0] |= 2;
835 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
836 csum_start_off = skb_transport_offset(skb);
837 csum_index_off = csum_start_off + skb->csum_offset;
838 /* Tx Partial Checksum Offload Enabled */
839 app_metadata[0] |= 1;
840 app_metadata[1] = (csum_start_off << 16) | csum_index_off;
842 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
843 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
846 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
847 sg_len, DMA_MEM_TO_DEV,
848 DMA_PREP_INTERRUPT, (void *)app_metadata);
850 goto xmit_error_unmap_sg;
852 skbuf_dma->skb = skb;
853 skbuf_dma->sg_len = sg_len;
854 dma_tx_desc->callback_param = lp;
855 dma_tx_desc->callback_result = axienet_dma_tx_cb;
856 dmaengine_submit(dma_tx_desc);
857 dma_async_issue_pending(lp->tx_chan);
858 txq = skb_get_tx_queue(lp->ndev, skb);
859 netdev_tx_sent_queue(txq, skb->len);
860 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
861 MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
866 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
868 dev_kfree_skb_any(skb);
873 * axienet_tx_poll - Invoked once a transmit is completed by the
874 * Axi DMA Tx channel.
875 * @napi: Pointer to NAPI structure.
876 * @budget: Max number of TX packets to process.
878 * Return: Number of TX packets processed.
880 * This function is invoked from the NAPI processing to notify the completion
881 * of transmit operation. It clears fields in the corresponding Tx BDs and
882 * unmaps the corresponding buffer so that CPU can regain ownership of the
883 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
886 static int axienet_tx_poll(struct napi_struct *napi, int budget)
888 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
889 struct net_device *ndev = lp->ndev;
893 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
896 lp->tx_bd_ci += packets;
897 if (lp->tx_bd_ci >= lp->tx_bd_num)
898 lp->tx_bd_ci %= lp->tx_bd_num;
900 u64_stats_update_begin(&lp->tx_stat_sync);
901 u64_stats_add(&lp->tx_packets, packets);
902 u64_stats_add(&lp->tx_bytes, size);
903 u64_stats_update_end(&lp->tx_stat_sync);
905 /* Matches barrier in axienet_start_xmit */
908 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
909 netif_wake_queue(ndev);
912 if (packets < budget && napi_complete_done(napi, packets)) {
913 /* Re-enable TX completion interrupts. This should
914 * cause an immediate interrupt if any TX packets are
917 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
923 * axienet_start_xmit - Starts the transmission.
924 * @skb: sk_buff pointer that contains data to be Txed.
925 * @ndev: Pointer to net_device structure.
927 * Return: NETDEV_TX_OK, on success
928 * NETDEV_TX_BUSY, if any of the descriptors are not free
930 * This function is invoked from upper layers to initiate transmission. The
931 * function uses the next available free BDs and populates their fields to
932 * start the transmission. Additionally if checksum offloading is supported,
933 * it populates AXI Stream Control fields with appropriate values.
936 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
943 dma_addr_t tail_p, phys;
944 u32 orig_tail_ptr, new_tail_ptr;
945 struct axienet_local *lp = netdev_priv(ndev);
946 struct axidma_bd *cur_p;
948 orig_tail_ptr = lp->tx_bd_tail;
949 new_tail_ptr = orig_tail_ptr;
951 num_frag = skb_shinfo(skb)->nr_frags;
952 cur_p = &lp->tx_bd_v[orig_tail_ptr];
954 if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
955 /* Should not happen as last start_xmit call should have
956 * checked for sufficient space and queue should only be
957 * woken when sufficient space is available.
959 netif_stop_queue(ndev);
961 netdev_warn(ndev, "TX ring unexpectedly full\n");
962 return NETDEV_TX_BUSY;
965 if (skb->ip_summed == CHECKSUM_PARTIAL) {
966 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
967 /* Tx Full Checksum Offload Enabled */
969 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
970 csum_start_off = skb_transport_offset(skb);
971 csum_index_off = csum_start_off + skb->csum_offset;
972 /* Tx Partial Checksum Offload Enabled */
974 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
976 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
977 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
980 phys = dma_map_single(lp->dev, skb->data,
981 skb_headlen(skb), DMA_TO_DEVICE);
982 if (unlikely(dma_mapping_error(lp->dev, phys))) {
984 netdev_err(ndev, "TX DMA mapping error\n");
985 ndev->stats.tx_dropped++;
988 desc_set_phys_addr(lp, phys, cur_p);
989 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
991 for (ii = 0; ii < num_frag; ii++) {
992 if (++new_tail_ptr >= lp->tx_bd_num)
994 cur_p = &lp->tx_bd_v[new_tail_ptr];
995 frag = &skb_shinfo(skb)->frags[ii];
996 phys = dma_map_single(lp->dev,
997 skb_frag_address(frag),
1000 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1001 if (net_ratelimit())
1002 netdev_err(ndev, "TX DMA mapping error\n");
1003 ndev->stats.tx_dropped++;
1004 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1006 return NETDEV_TX_OK;
1008 desc_set_phys_addr(lp, phys, cur_p);
1009 cur_p->cntrl = skb_frag_size(frag);
1012 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1015 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1016 if (++new_tail_ptr >= lp->tx_bd_num)
1018 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1020 /* Start the transfer */
1021 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1023 /* Stop queue if next transmit may not have space */
1024 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1025 netif_stop_queue(ndev);
1027 /* Matches barrier in axienet_tx_poll */
1030 /* Space might have just been freed - check again */
1031 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1032 netif_wake_queue(ndev);
1035 return NETDEV_TX_OK;
1039 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1040 * @data: Pointer to the skbuf_dma_descriptor structure.
1041 * @result: error reporting through dmaengine_result.
1042 * This function is called by dmaengine driver for RX channel to notify
1043 * that the packet is received.
1045 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1047 struct skbuf_dma_descriptor *skbuf_dma;
1048 size_t meta_len, meta_max_len, rx_len;
1049 struct axienet_local *lp = data;
1050 struct sk_buff *skb;
1053 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1054 skb = skbuf_dma->skb;
1055 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1057 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1059 /* TODO: Derive app word index programmatically */
1060 rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1061 skb_put(skb, rx_len);
1062 skb->protocol = eth_type_trans(skb, lp->ndev);
1063 skb->ip_summed = CHECKSUM_NONE;
1066 u64_stats_update_begin(&lp->rx_stat_sync);
1067 u64_stats_add(&lp->rx_packets, 1);
1068 u64_stats_add(&lp->rx_bytes, rx_len);
1069 u64_stats_update_end(&lp->rx_stat_sync);
1070 axienet_rx_submit_desc(lp->ndev);
1071 dma_async_issue_pending(lp->rx_chan);
1075 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1076 * @napi: Pointer to NAPI structure.
1077 * @budget: Max number of RX packets to process.
1079 * Return: Number of RX packets processed.
1081 static int axienet_rx_poll(struct napi_struct *napi, int budget)
1087 dma_addr_t tail_p = 0;
1088 struct axidma_bd *cur_p;
1089 struct sk_buff *skb, *new_skb;
1090 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1092 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1094 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1097 /* Ensure we see complete descriptor update */
1103 /* skb could be NULL if a previous pass already received the
1104 * packet for this slot in the ring, but failed to refill it
1105 * with a newly allocated buffer. In this case, don't try to
1109 length = cur_p->app4 & 0x0000FFFF;
1111 phys = desc_get_phys_addr(lp, cur_p);
1112 dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1115 skb_put(skb, length);
1116 skb->protocol = eth_type_trans(skb, lp->ndev);
1117 /*skb_checksum_none_assert(skb);*/
1118 skb->ip_summed = CHECKSUM_NONE;
1120 /* if we're doing Rx csum offload, set it up */
1121 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1122 csumstatus = (cur_p->app2 &
1123 XAE_FULL_CSUM_STATUS_MASK) >> 3;
1124 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1125 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1126 skb->ip_summed = CHECKSUM_UNNECESSARY;
1128 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
1129 skb->protocol == htons(ETH_P_IP) &&
1131 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1132 skb->ip_summed = CHECKSUM_COMPLETE;
1135 napi_gro_receive(napi, skb);
1141 new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1145 phys = dma_map_single(lp->dev, new_skb->data,
1148 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1149 if (net_ratelimit())
1150 netdev_err(lp->ndev, "RX DMA mapping error\n");
1151 dev_kfree_skb(new_skb);
1154 desc_set_phys_addr(lp, phys, cur_p);
1156 cur_p->cntrl = lp->max_frm_size;
1158 cur_p->skb = new_skb;
1160 /* Only update tail_p to mark this slot as usable after it has
1161 * been successfully refilled.
1163 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1165 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1167 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1170 u64_stats_update_begin(&lp->rx_stat_sync);
1171 u64_stats_add(&lp->rx_packets, packets);
1172 u64_stats_add(&lp->rx_bytes, size);
1173 u64_stats_update_end(&lp->rx_stat_sync);
1176 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1178 if (packets < budget && napi_complete_done(napi, packets)) {
1179 /* Re-enable RX completion interrupts. This should
1180 * cause an immediate interrupt if any RX packets are
1183 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1189 * axienet_tx_irq - Tx Done Isr.
1191 * @_ndev: net_device pointer
1193 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1195 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1198 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1200 unsigned int status;
1201 struct net_device *ndev = _ndev;
1202 struct axienet_local *lp = netdev_priv(ndev);
1204 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1206 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1209 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1211 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1212 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1213 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1214 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1215 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1216 schedule_work(&lp->dma_err_task);
1218 /* Disable further TX completion interrupts and schedule
1219 * NAPI to handle the completions.
1221 u32 cr = lp->tx_dma_cr;
1223 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1224 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1226 napi_schedule(&lp->napi_tx);
1233 * axienet_rx_irq - Rx Isr.
1235 * @_ndev: net_device pointer
1237 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1239 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1242 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1244 unsigned int status;
1245 struct net_device *ndev = _ndev;
1246 struct axienet_local *lp = netdev_priv(ndev);
1248 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1250 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1253 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1255 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1256 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1257 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1258 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1259 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1260 schedule_work(&lp->dma_err_task);
1262 /* Disable further RX completion interrupts and schedule
1265 u32 cr = lp->rx_dma_cr;
1267 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1268 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1270 napi_schedule(&lp->napi_rx);
1277 * axienet_eth_irq - Ethernet core Isr.
1279 * @_ndev: net_device pointer
1281 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1283 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1285 static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1287 struct net_device *ndev = _ndev;
1288 struct axienet_local *lp = netdev_priv(ndev);
1289 unsigned int pending;
1291 pending = axienet_ior(lp, XAE_IP_OFFSET);
1295 if (pending & XAE_INT_RXFIFOOVR_MASK)
1296 ndev->stats.rx_missed_errors++;
1298 if (pending & XAE_INT_RXRJECT_MASK)
1299 ndev->stats.rx_frame_errors++;
1301 axienet_iow(lp, XAE_IS_OFFSET, pending);
1305 static void axienet_dma_err_handler(struct work_struct *work);
1308 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1309 * allocate skbuff, map the scatterlist and obtain a descriptor
1310 * and then add the callback information and submit descriptor.
1312 * @ndev: net_device pointer
1315 static void axienet_rx_submit_desc(struct net_device *ndev)
1317 struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1318 struct axienet_local *lp = netdev_priv(ndev);
1319 struct skbuf_dma_descriptor *skbuf_dma;
1320 struct sk_buff *skb;
1323 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1328 skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1332 sg_init_table(skbuf_dma->sgl, 1);
1333 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1334 if (unlikely(dma_mapping_error(lp->dev, addr))) {
1335 if (net_ratelimit())
1336 netdev_err(ndev, "DMA mapping error\n");
1337 goto rx_submit_err_free_skb;
1339 sg_dma_address(skbuf_dma->sgl) = addr;
1340 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1341 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1343 DMA_PREP_INTERRUPT);
1345 goto rx_submit_err_unmap_skb;
1347 skbuf_dma->skb = skb;
1348 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1349 skbuf_dma->desc = dma_rx_desc;
1350 dma_rx_desc->callback_param = lp;
1351 dma_rx_desc->callback_result = axienet_dma_rx_cb;
1352 dmaengine_submit(dma_rx_desc);
1356 rx_submit_err_unmap_skb:
1357 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1358 rx_submit_err_free_skb:
1363 * axienet_init_dmaengine - init the dmaengine code.
1364 * @ndev: Pointer to net_device structure
1366 * Return: 0, on success.
1367 * non-zero error value on failure
1369 * This is the dmaengine initialization code.
1371 static int axienet_init_dmaengine(struct net_device *ndev)
1373 struct axienet_local *lp = netdev_priv(ndev);
1374 struct skbuf_dma_descriptor *skbuf_dma;
1377 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1378 if (IS_ERR(lp->tx_chan)) {
1379 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1380 return PTR_ERR(lp->tx_chan);
1383 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1384 if (IS_ERR(lp->rx_chan)) {
1385 ret = PTR_ERR(lp->rx_chan);
1386 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1387 goto err_dma_release_tx;
1390 lp->tx_ring_tail = 0;
1391 lp->tx_ring_head = 0;
1392 lp->rx_ring_tail = 0;
1393 lp->rx_ring_head = 0;
1394 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1396 if (!lp->tx_skb_ring) {
1398 goto err_dma_release_rx;
1400 for (i = 0; i < TX_BD_NUM_MAX; i++) {
1401 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1404 goto err_free_tx_skb_ring;
1406 lp->tx_skb_ring[i] = skbuf_dma;
1409 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1411 if (!lp->rx_skb_ring) {
1413 goto err_free_tx_skb_ring;
1415 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1416 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1419 goto err_free_rx_skb_ring;
1421 lp->rx_skb_ring[i] = skbuf_dma;
1423 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1424 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1425 axienet_rx_submit_desc(ndev);
1426 dma_async_issue_pending(lp->rx_chan);
1430 err_free_rx_skb_ring:
1431 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1432 kfree(lp->rx_skb_ring[i]);
1433 kfree(lp->rx_skb_ring);
1434 err_free_tx_skb_ring:
1435 for (i = 0; i < TX_BD_NUM_MAX; i++)
1436 kfree(lp->tx_skb_ring[i]);
1437 kfree(lp->tx_skb_ring);
1439 dma_release_channel(lp->rx_chan);
1441 dma_release_channel(lp->tx_chan);
1446 * axienet_init_legacy_dma - init the dma legacy code.
1447 * @ndev: Pointer to net_device structure
1449 * Return: 0, on success.
1450 * non-zero error value on failure
1452 * This is the dma initialization code. It also allocates interrupt
1453 * service routines, enables the interrupt lines and ISR handling.
1456 static int axienet_init_legacy_dma(struct net_device *ndev)
1459 struct axienet_local *lp = netdev_priv(ndev);
1461 /* Enable worker thread for Axi DMA error handling */
1462 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1464 napi_enable(&lp->napi_rx);
1465 napi_enable(&lp->napi_tx);
1467 /* Enable interrupts for Axi DMA Tx */
1468 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1472 /* Enable interrupts for Axi DMA Rx */
1473 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1477 /* Enable interrupts for Axi Ethernet core (if defined) */
1478 if (lp->eth_irq > 0) {
1479 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1488 free_irq(lp->rx_irq, ndev);
1490 free_irq(lp->tx_irq, ndev);
1492 napi_disable(&lp->napi_tx);
1493 napi_disable(&lp->napi_rx);
1494 cancel_work_sync(&lp->dma_err_task);
1495 dev_err(lp->dev, "request_irq() failed\n");
1500 * axienet_open - Driver open routine.
1501 * @ndev: Pointer to net_device structure
1503 * Return: 0, on success.
1504 * non-zero error value on failure
1506 * This is the driver open routine. It calls phylink_start to start the
1508 * It also allocates interrupt service routines, enables the interrupt lines
1509 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1510 * descriptors are initialized.
1512 static int axienet_open(struct net_device *ndev)
1515 struct axienet_local *lp = netdev_priv(ndev);
1517 dev_dbg(&ndev->dev, "%s\n", __func__);
1519 /* When we do an Axi Ethernet reset, it resets the complete core
1520 * including the MDIO. MDIO must be disabled before resetting.
1521 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1523 axienet_lock_mii(lp);
1524 ret = axienet_device_reset(ndev);
1525 axienet_unlock_mii(lp);
1527 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1529 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1533 phylink_start(lp->phylink);
1535 if (lp->use_dmaengine) {
1536 /* Enable interrupts for Axi Ethernet core (if defined) */
1537 if (lp->eth_irq > 0) {
1538 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1544 ret = axienet_init_dmaengine(ndev);
1546 goto err_free_eth_irq;
1548 ret = axienet_init_legacy_dma(ndev);
1556 if (lp->eth_irq > 0)
1557 free_irq(lp->eth_irq, ndev);
1559 phylink_stop(lp->phylink);
1560 phylink_disconnect_phy(lp->phylink);
1565 * axienet_stop - Driver stop routine.
1566 * @ndev: Pointer to net_device structure
1568 * Return: 0, on success.
1570 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1571 * device. It also removes the interrupt handlers and disables the interrupts.
1572 * The Axi DMA Tx/Rx BDs are released.
1574 static int axienet_stop(struct net_device *ndev)
1576 struct axienet_local *lp = netdev_priv(ndev);
1579 dev_dbg(&ndev->dev, "axienet_close()\n");
1581 if (!lp->use_dmaengine) {
1582 napi_disable(&lp->napi_tx);
1583 napi_disable(&lp->napi_rx);
1586 phylink_stop(lp->phylink);
1587 phylink_disconnect_phy(lp->phylink);
1589 axienet_setoptions(ndev, lp->options &
1590 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1592 if (!lp->use_dmaengine) {
1593 axienet_dma_stop(lp);
1594 cancel_work_sync(&lp->dma_err_task);
1595 free_irq(lp->tx_irq, ndev);
1596 free_irq(lp->rx_irq, ndev);
1597 axienet_dma_bd_release(ndev);
1599 dmaengine_terminate_sync(lp->tx_chan);
1600 dmaengine_synchronize(lp->tx_chan);
1601 dmaengine_terminate_sync(lp->rx_chan);
1602 dmaengine_synchronize(lp->rx_chan);
1604 for (i = 0; i < TX_BD_NUM_MAX; i++)
1605 kfree(lp->tx_skb_ring[i]);
1606 kfree(lp->tx_skb_ring);
1607 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1608 kfree(lp->rx_skb_ring[i]);
1609 kfree(lp->rx_skb_ring);
1611 dma_release_channel(lp->rx_chan);
1612 dma_release_channel(lp->tx_chan);
1615 axienet_iow(lp, XAE_IE_OFFSET, 0);
1617 if (lp->eth_irq > 0)
1618 free_irq(lp->eth_irq, ndev);
1623 * axienet_change_mtu - Driver change mtu routine.
1624 * @ndev: Pointer to net_device structure
1625 * @new_mtu: New mtu value to be applied
1627 * Return: Always returns 0 (success).
1629 * This is the change mtu driver routine. It checks if the Axi Ethernet
1630 * hardware supports jumbo frames before changing the mtu. This can be
1631 * called only when the device is not up.
1633 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1635 struct axienet_local *lp = netdev_priv(ndev);
1637 if (netif_running(ndev))
1640 if ((new_mtu + VLAN_ETH_HLEN +
1641 XAE_TRL_SIZE) > lp->rxmem)
1644 ndev->mtu = new_mtu;
1649 #ifdef CONFIG_NET_POLL_CONTROLLER
1651 * axienet_poll_controller - Axi Ethernet poll mechanism.
1652 * @ndev: Pointer to net_device structure
1654 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1655 * to polling the ISRs and are enabled back after the polling is done.
1657 static void axienet_poll_controller(struct net_device *ndev)
1659 struct axienet_local *lp = netdev_priv(ndev);
1660 disable_irq(lp->tx_irq);
1661 disable_irq(lp->rx_irq);
1662 axienet_rx_irq(lp->tx_irq, ndev);
1663 axienet_tx_irq(lp->rx_irq, ndev);
1664 enable_irq(lp->tx_irq);
1665 enable_irq(lp->rx_irq);
1669 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1671 struct axienet_local *lp = netdev_priv(dev);
1673 if (!netif_running(dev))
1676 return phylink_mii_ioctl(lp->phylink, rq, cmd);
1680 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1682 struct axienet_local *lp = netdev_priv(dev);
1685 netdev_stats_to_stats64(stats, &dev->stats);
1688 start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1689 stats->rx_packets = u64_stats_read(&lp->rx_packets);
1690 stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1691 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1694 start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1695 stats->tx_packets = u64_stats_read(&lp->tx_packets);
1696 stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1697 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1700 static const struct net_device_ops axienet_netdev_ops = {
1701 .ndo_open = axienet_open,
1702 .ndo_stop = axienet_stop,
1703 .ndo_start_xmit = axienet_start_xmit,
1704 .ndo_get_stats64 = axienet_get_stats64,
1705 .ndo_change_mtu = axienet_change_mtu,
1706 .ndo_set_mac_address = netdev_set_mac_address,
1707 .ndo_validate_addr = eth_validate_addr,
1708 .ndo_eth_ioctl = axienet_ioctl,
1709 .ndo_set_rx_mode = axienet_set_multicast_list,
1710 #ifdef CONFIG_NET_POLL_CONTROLLER
1711 .ndo_poll_controller = axienet_poll_controller,
1715 static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1716 .ndo_open = axienet_open,
1717 .ndo_stop = axienet_stop,
1718 .ndo_start_xmit = axienet_start_xmit_dmaengine,
1719 .ndo_get_stats64 = axienet_get_stats64,
1720 .ndo_change_mtu = axienet_change_mtu,
1721 .ndo_set_mac_address = netdev_set_mac_address,
1722 .ndo_validate_addr = eth_validate_addr,
1723 .ndo_eth_ioctl = axienet_ioctl,
1724 .ndo_set_rx_mode = axienet_set_multicast_list,
1728 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1729 * @ndev: Pointer to net_device structure
1730 * @ed: Pointer to ethtool_drvinfo structure
1732 * This implements ethtool command for getting the driver information.
1733 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1735 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1736 struct ethtool_drvinfo *ed)
1738 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1739 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1743 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1745 * @ndev: Pointer to net_device structure
1747 * This implements ethtool command for getting the total register length
1750 * Return: the total regs length
1752 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1754 return sizeof(u32) * AXIENET_REGS_N;
1758 * axienet_ethtools_get_regs - Dump the contents of all registers present
1759 * in AxiEthernet core.
1760 * @ndev: Pointer to net_device structure
1761 * @regs: Pointer to ethtool_regs structure
1762 * @ret: Void pointer used to return the contents of the registers.
1764 * This implements ethtool command for getting the Axi Ethernet register dump.
1765 * Issue "ethtool -d ethX" to execute this function.
1767 static void axienet_ethtools_get_regs(struct net_device *ndev,
1768 struct ethtool_regs *regs, void *ret)
1770 u32 *data = (u32 *)ret;
1771 size_t len = sizeof(u32) * AXIENET_REGS_N;
1772 struct axienet_local *lp = netdev_priv(ndev);
1777 memset(data, 0, len);
1778 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1779 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1780 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1781 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1782 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1783 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1784 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1785 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1786 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1787 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1788 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1789 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1790 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1791 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1792 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1793 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1794 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1795 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1796 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1797 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1798 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1799 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1800 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1801 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1802 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1803 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1804 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1805 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1806 if (!lp->use_dmaengine) {
1807 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1808 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1809 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1810 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1811 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1812 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1813 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1814 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1819 axienet_ethtools_get_ringparam(struct net_device *ndev,
1820 struct ethtool_ringparam *ering,
1821 struct kernel_ethtool_ringparam *kernel_ering,
1822 struct netlink_ext_ack *extack)
1824 struct axienet_local *lp = netdev_priv(ndev);
1826 ering->rx_max_pending = RX_BD_NUM_MAX;
1827 ering->rx_mini_max_pending = 0;
1828 ering->rx_jumbo_max_pending = 0;
1829 ering->tx_max_pending = TX_BD_NUM_MAX;
1830 ering->rx_pending = lp->rx_bd_num;
1831 ering->rx_mini_pending = 0;
1832 ering->rx_jumbo_pending = 0;
1833 ering->tx_pending = lp->tx_bd_num;
1837 axienet_ethtools_set_ringparam(struct net_device *ndev,
1838 struct ethtool_ringparam *ering,
1839 struct kernel_ethtool_ringparam *kernel_ering,
1840 struct netlink_ext_ack *extack)
1842 struct axienet_local *lp = netdev_priv(ndev);
1844 if (ering->rx_pending > RX_BD_NUM_MAX ||
1845 ering->rx_mini_pending ||
1846 ering->rx_jumbo_pending ||
1847 ering->tx_pending < TX_BD_NUM_MIN ||
1848 ering->tx_pending > TX_BD_NUM_MAX)
1851 if (netif_running(ndev))
1854 lp->rx_bd_num = ering->rx_pending;
1855 lp->tx_bd_num = ering->tx_pending;
1860 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1862 * @ndev: Pointer to net_device structure
1863 * @epauseparm: Pointer to ethtool_pauseparam structure.
1865 * This implements ethtool command for getting axi ethernet pause frame
1866 * setting. Issue "ethtool -a ethX" to execute this function.
1869 axienet_ethtools_get_pauseparam(struct net_device *ndev,
1870 struct ethtool_pauseparam *epauseparm)
1872 struct axienet_local *lp = netdev_priv(ndev);
1874 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1878 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1880 * @ndev: Pointer to net_device structure
1881 * @epauseparm:Pointer to ethtool_pauseparam structure
1883 * This implements ethtool command for enabling flow control on Rx and Tx
1884 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1887 * Return: 0 on success, -EFAULT if device is running
1890 axienet_ethtools_set_pauseparam(struct net_device *ndev,
1891 struct ethtool_pauseparam *epauseparm)
1893 struct axienet_local *lp = netdev_priv(ndev);
1895 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
1899 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1900 * @ndev: Pointer to net_device structure
1901 * @ecoalesce: Pointer to ethtool_coalesce structure
1902 * @kernel_coal: ethtool CQE mode setting structure
1903 * @extack: extack for reporting error messages
1905 * This implements ethtool command for getting the DMA interrupt coalescing
1906 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1907 * execute this function.
1912 axienet_ethtools_get_coalesce(struct net_device *ndev,
1913 struct ethtool_coalesce *ecoalesce,
1914 struct kernel_ethtool_coalesce *kernel_coal,
1915 struct netlink_ext_ack *extack)
1917 struct axienet_local *lp = netdev_priv(ndev);
1919 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
1920 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
1921 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
1922 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
1927 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1928 * @ndev: Pointer to net_device structure
1929 * @ecoalesce: Pointer to ethtool_coalesce structure
1930 * @kernel_coal: ethtool CQE mode setting structure
1931 * @extack: extack for reporting error messages
1933 * This implements ethtool command for setting the DMA interrupt coalescing
1934 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1935 * prompt to execute this function.
1937 * Return: 0, on success, Non-zero error value on failure.
1940 axienet_ethtools_set_coalesce(struct net_device *ndev,
1941 struct ethtool_coalesce *ecoalesce,
1942 struct kernel_ethtool_coalesce *kernel_coal,
1943 struct netlink_ext_ack *extack)
1945 struct axienet_local *lp = netdev_priv(ndev);
1947 if (netif_running(ndev)) {
1949 "Please stop netif before applying configuration\n");
1953 if (ecoalesce->rx_max_coalesced_frames)
1954 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1955 if (ecoalesce->rx_coalesce_usecs)
1956 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
1957 if (ecoalesce->tx_max_coalesced_frames)
1958 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1959 if (ecoalesce->tx_coalesce_usecs)
1960 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
1966 axienet_ethtools_get_link_ksettings(struct net_device *ndev,
1967 struct ethtool_link_ksettings *cmd)
1969 struct axienet_local *lp = netdev_priv(ndev);
1971 return phylink_ethtool_ksettings_get(lp->phylink, cmd);
1975 axienet_ethtools_set_link_ksettings(struct net_device *ndev,
1976 const struct ethtool_link_ksettings *cmd)
1978 struct axienet_local *lp = netdev_priv(ndev);
1980 return phylink_ethtool_ksettings_set(lp->phylink, cmd);
1983 static int axienet_ethtools_nway_reset(struct net_device *dev)
1985 struct axienet_local *lp = netdev_priv(dev);
1987 return phylink_ethtool_nway_reset(lp->phylink);
1990 static const struct ethtool_ops axienet_ethtool_ops = {
1991 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
1992 ETHTOOL_COALESCE_USECS,
1993 .get_drvinfo = axienet_ethtools_get_drvinfo,
1994 .get_regs_len = axienet_ethtools_get_regs_len,
1995 .get_regs = axienet_ethtools_get_regs,
1996 .get_link = ethtool_op_get_link,
1997 .get_ringparam = axienet_ethtools_get_ringparam,
1998 .set_ringparam = axienet_ethtools_set_ringparam,
1999 .get_pauseparam = axienet_ethtools_get_pauseparam,
2000 .set_pauseparam = axienet_ethtools_set_pauseparam,
2001 .get_coalesce = axienet_ethtools_get_coalesce,
2002 .set_coalesce = axienet_ethtools_set_coalesce,
2003 .get_link_ksettings = axienet_ethtools_get_link_ksettings,
2004 .set_link_ksettings = axienet_ethtools_set_link_ksettings,
2005 .nway_reset = axienet_ethtools_nway_reset,
2008 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2010 return container_of(pcs, struct axienet_local, pcs);
2013 static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2014 struct phylink_link_state *state)
2016 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2018 phylink_mii_c22_pcs_get_state(pcs_phy, state);
2021 static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2023 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2025 phylink_mii_c22_pcs_an_restart(pcs_phy);
2028 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2029 phy_interface_t interface,
2030 const unsigned long *advertising,
2031 bool permit_pause_to_mac)
2033 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2034 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2035 struct axienet_local *lp = netdev_priv(ndev);
2038 if (lp->switch_x_sgmii) {
2039 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2040 interface == PHY_INTERFACE_MODE_SGMII ?
2041 XLNX_MII_STD_SELECT_SGMII : 0);
2044 "Failed to switch PHY interface: %d\n",
2050 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2053 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2058 static const struct phylink_pcs_ops axienet_pcs_ops = {
2059 .pcs_get_state = axienet_pcs_get_state,
2060 .pcs_config = axienet_pcs_config,
2061 .pcs_an_restart = axienet_pcs_an_restart,
2064 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2065 phy_interface_t interface)
2067 struct net_device *ndev = to_net_dev(config->dev);
2068 struct axienet_local *lp = netdev_priv(ndev);
2070 if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2071 interface == PHY_INTERFACE_MODE_SGMII)
2077 static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2078 const struct phylink_link_state *state)
2080 /* nothing meaningful to do */
2083 static void axienet_mac_link_down(struct phylink_config *config,
2085 phy_interface_t interface)
2087 /* nothing meaningful to do */
2090 static void axienet_mac_link_up(struct phylink_config *config,
2091 struct phy_device *phy,
2092 unsigned int mode, phy_interface_t interface,
2093 int speed, int duplex,
2094 bool tx_pause, bool rx_pause)
2096 struct net_device *ndev = to_net_dev(config->dev);
2097 struct axienet_local *lp = netdev_priv(ndev);
2098 u32 emmc_reg, fcc_reg;
2100 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2101 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2105 emmc_reg |= XAE_EMMC_LINKSPD_1000;
2108 emmc_reg |= XAE_EMMC_LINKSPD_100;
2111 emmc_reg |= XAE_EMMC_LINKSPD_10;
2115 "Speed other than 10, 100 or 1Gbps is not supported\n");
2119 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2121 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2123 fcc_reg |= XAE_FCC_FCTX_MASK;
2125 fcc_reg &= ~XAE_FCC_FCTX_MASK;
2127 fcc_reg |= XAE_FCC_FCRX_MASK;
2129 fcc_reg &= ~XAE_FCC_FCRX_MASK;
2130 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2133 static const struct phylink_mac_ops axienet_phylink_ops = {
2134 .mac_select_pcs = axienet_mac_select_pcs,
2135 .mac_config = axienet_mac_config,
2136 .mac_link_down = axienet_mac_link_down,
2137 .mac_link_up = axienet_mac_link_up,
2141 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2142 * @work: pointer to work_struct
2144 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2147 static void axienet_dma_err_handler(struct work_struct *work)
2151 struct axidma_bd *cur_p;
2152 struct axienet_local *lp = container_of(work, struct axienet_local,
2154 struct net_device *ndev = lp->ndev;
2156 napi_disable(&lp->napi_tx);
2157 napi_disable(&lp->napi_rx);
2159 axienet_setoptions(ndev, lp->options &
2160 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2162 axienet_dma_stop(lp);
2164 for (i = 0; i < lp->tx_bd_num; i++) {
2165 cur_p = &lp->tx_bd_v[i];
2167 dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2169 dma_unmap_single(lp->dev, addr,
2171 XAXIDMA_BD_CTRL_LENGTH_MASK),
2175 dev_kfree_skb_irq(cur_p->skb);
2177 cur_p->phys_msb = 0;
2188 for (i = 0; i < lp->rx_bd_num; i++) {
2189 cur_p = &lp->rx_bd_v[i];
2202 axienet_dma_start(lp);
2204 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2205 axienet_status &= ~XAE_RCW1_RX_MASK;
2206 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2208 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2209 if (axienet_status & XAE_INT_RXRJECT_MASK)
2210 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2211 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2212 XAE_INT_RECV_ERROR_MASK : 0);
2213 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2215 /* Sync default options with HW but leave receiver and
2216 * transmitter disabled.
2218 axienet_setoptions(ndev, lp->options &
2219 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2220 axienet_set_mac_address(ndev, NULL);
2221 axienet_set_multicast_list(ndev);
2222 axienet_setoptions(ndev, lp->options);
2223 napi_enable(&lp->napi_rx);
2224 napi_enable(&lp->napi_tx);
2228 * axienet_probe - Axi Ethernet probe function.
2229 * @pdev: Pointer to platform device structure.
2231 * Return: 0, on success
2232 * Non-zero error value on failure.
2234 * This is the probe routine for Axi Ethernet driver. This is called before
2235 * any other driver routines are invoked. It allocates and sets up the Ethernet
2236 * device. Parses through device tree and populates fields of
2237 * axienet_local. It registers the Ethernet device.
2239 static int axienet_probe(struct platform_device *pdev)
2242 struct device_node *np;
2243 struct axienet_local *lp;
2244 struct net_device *ndev;
2245 struct resource *ethres;
2246 u8 mac_addr[ETH_ALEN];
2247 int addr_width = 32;
2250 ndev = alloc_etherdev(sizeof(*lp));
2254 platform_set_drvdata(pdev, ndev);
2256 SET_NETDEV_DEV(ndev, &pdev->dev);
2257 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
2258 ndev->features = NETIF_F_SG;
2259 ndev->ethtool_ops = &axienet_ethtool_ops;
2261 /* MTU range: 64 - 9000 */
2263 ndev->max_mtu = XAE_JUMBO_MTU;
2265 lp = netdev_priv(ndev);
2267 lp->dev = &pdev->dev;
2268 lp->options = XAE_OPTION_DEFAULTS;
2269 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2270 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2272 u64_stats_init(&lp->rx_stat_sync);
2273 u64_stats_init(&lp->tx_stat_sync);
2275 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2277 /* For backward compatibility, if named AXI clock is not present,
2278 * treat the first clock specified as the AXI clock.
2280 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2282 if (IS_ERR(lp->axi_clk)) {
2283 ret = PTR_ERR(lp->axi_clk);
2286 ret = clk_prepare_enable(lp->axi_clk);
2288 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2292 lp->misc_clks[0].id = "axis_clk";
2293 lp->misc_clks[1].id = "ref_clk";
2294 lp->misc_clks[2].id = "mgt_clk";
2296 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2300 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2304 /* Map device registers */
2305 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres);
2306 if (IS_ERR(lp->regs)) {
2307 ret = PTR_ERR(lp->regs);
2310 lp->regs_start = ethres->start;
2312 /* Setup checksum offload, but default to off if not specified */
2315 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2319 lp->csum_offload_on_tx_path =
2320 XAE_FEATURE_PARTIAL_TX_CSUM;
2321 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2322 /* Can checksum TCP/UDP over IPv4. */
2323 ndev->features |= NETIF_F_IP_CSUM;
2326 lp->csum_offload_on_tx_path =
2327 XAE_FEATURE_FULL_TX_CSUM;
2328 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2329 /* Can checksum TCP/UDP over IPv4. */
2330 ndev->features |= NETIF_F_IP_CSUM;
2333 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
2336 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2340 lp->csum_offload_on_rx_path =
2341 XAE_FEATURE_PARTIAL_RX_CSUM;
2342 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2345 lp->csum_offload_on_rx_path =
2346 XAE_FEATURE_FULL_RX_CSUM;
2347 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2350 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
2353 /* For supporting jumbo frames, the Axi Ethernet hardware must have
2354 * a larger Rx/Tx Memory. Typically, the size must be large so that
2355 * we can enable jumbo option and start supporting jumbo frames.
2356 * Here we check for memory allocated for Rx/Tx in the hardware from
2357 * the device-tree and accordingly set flags.
2359 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2361 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2362 "xlnx,switch-x-sgmii");
2364 /* Start with the proprietary, and broken phy_type */
2365 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2367 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2369 case XAE_PHY_TYPE_MII:
2370 lp->phy_mode = PHY_INTERFACE_MODE_MII;
2372 case XAE_PHY_TYPE_GMII:
2373 lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2375 case XAE_PHY_TYPE_RGMII_2_0:
2376 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2378 case XAE_PHY_TYPE_SGMII:
2379 lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2381 case XAE_PHY_TYPE_1000BASE_X:
2382 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2389 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2393 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2394 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2395 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2400 if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) {
2401 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2402 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2405 struct resource dmares;
2407 ret = of_address_to_resource(np, 0, &dmares);
2410 "unable to get DMA resource\n");
2414 lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2416 lp->rx_irq = irq_of_parse_and_map(np, 1);
2417 lp->tx_irq = irq_of_parse_and_map(np, 0);
2419 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2421 /* Check for these resources directly on the Ethernet node. */
2422 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2423 lp->rx_irq = platform_get_irq(pdev, 1);
2424 lp->tx_irq = platform_get_irq(pdev, 0);
2425 lp->eth_irq = platform_get_irq_optional(pdev, 2);
2427 if (IS_ERR(lp->dma_regs)) {
2428 dev_err(&pdev->dev, "could not map DMA regs\n");
2429 ret = PTR_ERR(lp->dma_regs);
2432 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2433 dev_err(&pdev->dev, "could not determine irqs\n");
2438 /* Reset core now that clocks are enabled, prior to accessing MDIO */
2439 ret = __axienet_device_reset(lp);
2443 /* Autodetect the need for 64-bit DMA pointers.
2444 * When the IP is configured for a bus width bigger than 32 bits,
2445 * writing the MSB registers is mandatory, even if they are all 0.
2446 * We can detect this case by writing all 1's to one such register
2447 * and see if that sticks: when the IP is configured for 32 bits
2448 * only, those registers are RES0.
2449 * Those MSB registers were introduced in IP v7.1, which we check first.
2451 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2452 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2454 iowrite32(0x0, desc);
2455 if (ioread32(desc) == 0) { /* sanity check */
2456 iowrite32(0xffffffff, desc);
2457 if (ioread32(desc) > 0) {
2458 lp->features |= XAE_FEATURE_DMA_64BIT;
2460 dev_info(&pdev->dev,
2461 "autodetected 64-bit DMA range\n");
2463 iowrite32(0x0, desc);
2466 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2467 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2472 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2474 dev_err(&pdev->dev, "No suitable DMA available\n");
2477 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2478 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2480 struct xilinx_vdma_config cfg;
2481 struct dma_chan *tx_chan;
2483 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2484 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2488 tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2489 if (IS_ERR(tx_chan)) {
2490 ret = PTR_ERR(tx_chan);
2491 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2496 /* As name says VDMA but it has support for DMA channel reset */
2497 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2499 dev_err(&pdev->dev, "Reset channel failed\n");
2500 dma_release_channel(tx_chan);
2504 dma_release_channel(tx_chan);
2505 lp->use_dmaengine = 1;
2508 if (lp->use_dmaengine)
2509 ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2511 ndev->netdev_ops = &axienet_netdev_ops;
2512 /* Check for Ethernet core IRQ (optional) */
2513 if (lp->eth_irq <= 0)
2514 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2516 /* Retrieve the MAC address */
2517 ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2519 axienet_set_mac_address(ndev, mac_addr);
2521 dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2523 axienet_set_mac_address(ndev, NULL);
2526 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2527 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2528 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2529 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2531 ret = axienet_mdio_setup(lp);
2533 dev_warn(&pdev->dev,
2534 "error registering MDIO bus: %d\n", ret);
2536 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2537 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2538 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2540 /* Deprecated: Always use "pcs-handle" for pcs_phy.
2541 * Falling back to "phy-handle" here is only for
2542 * backward compatibility with old device trees.
2544 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2547 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2551 lp->pcs_phy = of_mdio_find_device(np);
2553 ret = -EPROBE_DEFER;
2558 lp->pcs.ops = &axienet_pcs_ops;
2559 lp->pcs.neg_mode = true;
2560 lp->pcs.poll = true;
2563 lp->phylink_config.dev = &ndev->dev;
2564 lp->phylink_config.type = PHYLINK_NETDEV;
2565 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2566 MAC_10FD | MAC_100FD | MAC_1000FD;
2568 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2569 if (lp->switch_x_sgmii) {
2570 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
2571 lp->phylink_config.supported_interfaces);
2572 __set_bit(PHY_INTERFACE_MODE_SGMII,
2573 lp->phylink_config.supported_interfaces);
2576 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2578 &axienet_phylink_ops);
2579 if (IS_ERR(lp->phylink)) {
2580 ret = PTR_ERR(lp->phylink);
2581 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2585 ret = register_netdev(lp->ndev);
2587 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2588 goto cleanup_phylink;
2594 phylink_destroy(lp->phylink);
2598 put_device(&lp->pcs_phy->dev);
2600 axienet_mdio_teardown(lp);
2602 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2603 clk_disable_unprepare(lp->axi_clk);
2611 static void axienet_remove(struct platform_device *pdev)
2613 struct net_device *ndev = platform_get_drvdata(pdev);
2614 struct axienet_local *lp = netdev_priv(ndev);
2616 unregister_netdev(ndev);
2619 phylink_destroy(lp->phylink);
2622 put_device(&lp->pcs_phy->dev);
2624 axienet_mdio_teardown(lp);
2626 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2627 clk_disable_unprepare(lp->axi_clk);
2632 static void axienet_shutdown(struct platform_device *pdev)
2634 struct net_device *ndev = platform_get_drvdata(pdev);
2637 netif_device_detach(ndev);
2639 if (netif_running(ndev))
2645 static int axienet_suspend(struct device *dev)
2647 struct net_device *ndev = dev_get_drvdata(dev);
2649 if (!netif_running(ndev))
2652 netif_device_detach(ndev);
2661 static int axienet_resume(struct device *dev)
2663 struct net_device *ndev = dev_get_drvdata(dev);
2665 if (!netif_running(ndev))
2672 netif_device_attach(ndev);
2677 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
2678 axienet_suspend, axienet_resume);
2680 static struct platform_driver axienet_driver = {
2681 .probe = axienet_probe,
2682 .remove_new = axienet_remove,
2683 .shutdown = axienet_shutdown,
2685 .name = "xilinx_axienet",
2686 .pm = &axienet_pm_ops,
2687 .of_match_table = axienet_of_match,
2691 module_platform_driver(axienet_driver);
2693 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
2694 MODULE_AUTHOR("Xilinx");
2695 MODULE_LICENSE("GPL");