2 * Xilinx Axi Ethernet device driver
4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
8 * Copyright (c) 2010 - 2011 PetaLogix
9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
15 * - Add Axi Fifo support.
16 * - Factor out Axi DMA code into separate driver.
17 * - Test and fix basic multicast filtering.
18 * - Add support for extended multicast filtering.
19 * - Test basic VLAN support.
20 * - Add support for extended VLAN support.
23 #include <linux/delay.h>
24 #include <linux/etherdevice.h>
25 #include <linux/module.h>
26 #include <linux/netdevice.h>
27 #include <linux/of_mdio.h>
28 #include <linux/of_platform.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_address.h>
31 #include <linux/skbuff.h>
32 #include <linux/spinlock.h>
33 #include <linux/phy.h>
34 #include <linux/mii.h>
35 #include <linux/ethtool.h>
37 #include "xilinx_axienet.h"
39 /* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
43 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
44 #define DRIVER_NAME "xaxienet"
45 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
46 #define DRIVER_VERSION "1.00a"
48 #define AXIENET_REGS_N 32
50 /* Match table for of_platform binding */
51 static const struct of_device_id axienet_of_match[] = {
52 { .compatible = "xlnx,axi-ethernet-1.00.a", },
53 { .compatible = "xlnx,axi-ethernet-1.01.a", },
54 { .compatible = "xlnx,axi-ethernet-2.01.a", },
58 MODULE_DEVICE_TABLE(of, axienet_of_match);
60 /* Option table for setting up Axi Ethernet hardware options */
61 static struct axienet_option axienet_options[] = {
62 /* Turn on jumbo packet support for both Rx and Tx */
64 .opt = XAE_OPTION_JUMBO,
66 .m_or = XAE_TC_JUM_MASK,
68 .opt = XAE_OPTION_JUMBO,
69 .reg = XAE_RCW1_OFFSET,
70 .m_or = XAE_RCW1_JUM_MASK,
71 }, { /* Turn on VLAN packet support for both Rx and Tx */
72 .opt = XAE_OPTION_VLAN,
74 .m_or = XAE_TC_VLAN_MASK,
76 .opt = XAE_OPTION_VLAN,
77 .reg = XAE_RCW1_OFFSET,
78 .m_or = XAE_RCW1_VLAN_MASK,
79 }, { /* Turn on FCS stripping on receive packets */
80 .opt = XAE_OPTION_FCS_STRIP,
81 .reg = XAE_RCW1_OFFSET,
82 .m_or = XAE_RCW1_FCS_MASK,
83 }, { /* Turn on FCS insertion on transmit packets */
84 .opt = XAE_OPTION_FCS_INSERT,
86 .m_or = XAE_TC_FCS_MASK,
87 }, { /* Turn off length/type field checking on receive packets */
88 .opt = XAE_OPTION_LENTYPE_ERR,
89 .reg = XAE_RCW1_OFFSET,
90 .m_or = XAE_RCW1_LT_DIS_MASK,
91 }, { /* Turn on Rx flow control */
92 .opt = XAE_OPTION_FLOW_CONTROL,
93 .reg = XAE_FCC_OFFSET,
94 .m_or = XAE_FCC_FCRX_MASK,
95 }, { /* Turn on Tx flow control */
96 .opt = XAE_OPTION_FLOW_CONTROL,
97 .reg = XAE_FCC_OFFSET,
98 .m_or = XAE_FCC_FCTX_MASK,
99 }, { /* Turn on promiscuous frame filtering */
100 .opt = XAE_OPTION_PROMISC,
101 .reg = XAE_FMI_OFFSET,
102 .m_or = XAE_FMI_PM_MASK,
103 }, { /* Enable transmitter */
104 .opt = XAE_OPTION_TXEN,
105 .reg = XAE_TC_OFFSET,
106 .m_or = XAE_TC_TX_MASK,
107 }, { /* Enable receiver */
108 .opt = XAE_OPTION_RXEN,
109 .reg = XAE_RCW1_OFFSET,
110 .m_or = XAE_RCW1_RX_MASK,
116 * axienet_dma_in32 - Memory mapped Axi DMA register read
117 * @lp: Pointer to axienet local structure
118 * @reg: Address offset from the base address of the Axi DMA core
120 * Return: The contents of the Axi DMA register
122 * This function returns the contents of the corresponding Axi DMA register.
124 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
126 return in_be32(lp->dma_regs + reg);
130 * axienet_dma_out32 - Memory mapped Axi DMA register write.
131 * @lp: Pointer to axienet local structure
132 * @reg: Address offset from the base address of the Axi DMA core
133 * @value: Value to be written into the Axi DMA register
135 * This function writes the desired value into the corresponding Axi DMA
138 static inline void axienet_dma_out32(struct axienet_local *lp,
139 off_t reg, u32 value)
141 out_be32((lp->dma_regs + reg), value);
145 * axienet_dma_bd_release - Release buffer descriptor rings
146 * @ndev: Pointer to the net_device structure
148 * This function is used to release the descriptors allocated in
149 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
150 * driver stop api is called.
152 static void axienet_dma_bd_release(struct net_device *ndev)
155 struct axienet_local *lp = netdev_priv(ndev);
157 for (i = 0; i < RX_BD_NUM; i++) {
158 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
159 lp->max_frm_size, DMA_FROM_DEVICE);
160 dev_kfree_skb((struct sk_buff *)
161 (lp->rx_bd_v[i].sw_id_offset));
165 dma_free_coherent(ndev->dev.parent,
166 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
171 dma_free_coherent(ndev->dev.parent,
172 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
179 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
180 * @ndev: Pointer to the net_device structure
182 * Return: 0, on success -ENOMEM, on failure
184 * This function is called to initialize the Rx and Tx DMA descriptor
185 * rings. This initializes the descriptors with required default values
186 * and is called when Axi Ethernet driver reset is called.
188 static int axienet_dma_bd_init(struct net_device *ndev)
193 struct axienet_local *lp = netdev_priv(ndev);
195 /* Reset the indexes which are used for accessing the BDs */
200 /* Allocate the Tx and Rx buffer descriptors. */
201 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
202 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
203 &lp->tx_bd_p, GFP_KERNEL);
207 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
208 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
209 &lp->rx_bd_p, GFP_KERNEL);
213 for (i = 0; i < TX_BD_NUM; i++) {
214 lp->tx_bd_v[i].next = lp->tx_bd_p +
215 sizeof(*lp->tx_bd_v) *
216 ((i + 1) % TX_BD_NUM);
219 for (i = 0; i < RX_BD_NUM; i++) {
220 lp->rx_bd_v[i].next = lp->rx_bd_p +
221 sizeof(*lp->rx_bd_v) *
222 ((i + 1) % RX_BD_NUM);
224 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
228 lp->rx_bd_v[i].sw_id_offset = (u32) skb;
229 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
233 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
236 /* Start updating the Rx channel control register */
237 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
238 /* Update the interrupt coalesce count */
239 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
240 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
241 /* Update the delay timer count */
242 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
243 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
244 /* Enable coalesce, delay timer and error interrupts */
245 cr |= XAXIDMA_IRQ_ALL_MASK;
246 /* Write to the Rx channel control register */
247 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
249 /* Start updating the Tx channel control register */
250 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
251 /* Update the interrupt coalesce count */
252 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
253 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
254 /* Update the delay timer count */
255 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
256 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
257 /* Enable coalesce, delay timer and error interrupts */
258 cr |= XAXIDMA_IRQ_ALL_MASK;
259 /* Write to the Tx channel control register */
260 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
262 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
263 * halted state. This will make the Rx side ready for reception.
265 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
266 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
267 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
268 cr | XAXIDMA_CR_RUNSTOP_MASK);
269 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
270 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
272 /* Write to the RS (Run-stop) bit in the Tx channel control register.
273 * Tx channel is now ready to run. But only after we write to the
274 * tail pointer register that the Tx channel will start transmitting.
276 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
277 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
278 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
279 cr | XAXIDMA_CR_RUNSTOP_MASK);
283 axienet_dma_bd_release(ndev);
288 * axienet_set_mac_address - Write the MAC address
289 * @ndev: Pointer to the net_device structure
290 * @address: 6 byte Address to be written as MAC address
292 * This function is called to initialize the MAC address of the Axi Ethernet
293 * core. It writes to the UAW0 and UAW1 registers of the core.
295 static void axienet_set_mac_address(struct net_device *ndev, void *address)
297 struct axienet_local *lp = netdev_priv(ndev);
300 memcpy(ndev->dev_addr, address, ETH_ALEN);
301 if (!is_valid_ether_addr(ndev->dev_addr))
302 eth_random_addr(ndev->dev_addr);
304 /* Set up unicast MAC address filter set its mac address */
305 axienet_iow(lp, XAE_UAW0_OFFSET,
306 (ndev->dev_addr[0]) |
307 (ndev->dev_addr[1] << 8) |
308 (ndev->dev_addr[2] << 16) |
309 (ndev->dev_addr[3] << 24));
310 axienet_iow(lp, XAE_UAW1_OFFSET,
311 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
312 ~XAE_UAW1_UNICASTADDR_MASK) |
314 (ndev->dev_addr[5] << 8))));
318 * netdev_set_mac_address - Write the MAC address (from outside the driver)
319 * @ndev: Pointer to the net_device structure
320 * @p: 6 byte Address to be written as MAC address
322 * Return: 0 for all conditions. Presently, there is no failure case.
324 * This function is called to initialize the MAC address of the Axi Ethernet
325 * core. It calls the core specific axienet_set_mac_address. This is the
326 * function that goes into net_device_ops structure entry ndo_set_mac_address.
328 static int netdev_set_mac_address(struct net_device *ndev, void *p)
330 struct sockaddr *addr = p;
331 axienet_set_mac_address(ndev, addr->sa_data);
336 * axienet_set_multicast_list - Prepare the multicast table
337 * @ndev: Pointer to the net_device structure
339 * This function is called to initialize the multicast table during
340 * initialization. The Axi Ethernet basic multicast support has a four-entry
341 * multicast table which is initialized here. Additionally this function
342 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
343 * means whenever the multicast table entries need to be updated this
344 * function gets called.
346 static void axienet_set_multicast_list(struct net_device *ndev)
349 u32 reg, af0reg, af1reg;
350 struct axienet_local *lp = netdev_priv(ndev);
352 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
353 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
354 /* We must make the kernel realize we had to move into
355 * promiscuous mode. If it was a promiscuous mode request
356 * the flag is already set. If not we set it.
358 ndev->flags |= IFF_PROMISC;
359 reg = axienet_ior(lp, XAE_FMI_OFFSET);
360 reg |= XAE_FMI_PM_MASK;
361 axienet_iow(lp, XAE_FMI_OFFSET, reg);
362 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
363 } else if (!netdev_mc_empty(ndev)) {
364 struct netdev_hw_addr *ha;
367 netdev_for_each_mc_addr(ha, ndev) {
368 if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
371 af0reg = (ha->addr[0]);
372 af0reg |= (ha->addr[1] << 8);
373 af0reg |= (ha->addr[2] << 16);
374 af0reg |= (ha->addr[3] << 24);
376 af1reg = (ha->addr[4]);
377 af1reg |= (ha->addr[5] << 8);
379 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
382 axienet_iow(lp, XAE_FMI_OFFSET, reg);
383 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
384 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
388 reg = axienet_ior(lp, XAE_FMI_OFFSET);
389 reg &= ~XAE_FMI_PM_MASK;
391 axienet_iow(lp, XAE_FMI_OFFSET, reg);
393 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
394 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
397 axienet_iow(lp, XAE_FMI_OFFSET, reg);
398 axienet_iow(lp, XAE_AF0_OFFSET, 0);
399 axienet_iow(lp, XAE_AF1_OFFSET, 0);
402 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
407 * axienet_setoptions - Set an Axi Ethernet option
408 * @ndev: Pointer to the net_device structure
409 * @options: Option to be enabled/disabled
411 * The Axi Ethernet core has multiple features which can be selectively turned
412 * on or off. The typical options could be jumbo frame option, basic VLAN
413 * option, promiscuous mode option etc. This function is used to set or clear
414 * these options in the Axi Ethernet hardware. This is done through
415 * axienet_option structure .
417 static void axienet_setoptions(struct net_device *ndev, u32 options)
420 struct axienet_local *lp = netdev_priv(ndev);
421 struct axienet_option *tp = &axienet_options[0];
424 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
425 if (options & tp->opt)
427 axienet_iow(lp, tp->reg, reg);
431 lp->options |= options;
434 static void __axienet_device_reset(struct axienet_local *lp, off_t offset)
437 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
438 * process of Axi DMA takes a while to complete as all pending
439 * commands/transfers will be flushed or completed during this
442 axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
443 timeout = DELAY_OF_ONE_MILLISEC;
444 while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
446 if (--timeout == 0) {
447 netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
455 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
456 * @ndev: Pointer to the net_device structure
458 * This function is called to reset and initialize the Axi Ethernet core. This
459 * is typically called during initialization. It does a reset of the Axi DMA
460 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
461 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
462 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
465 static void axienet_device_reset(struct net_device *ndev)
468 struct axienet_local *lp = netdev_priv(ndev);
470 __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
471 __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
473 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
474 lp->options |= XAE_OPTION_VLAN;
475 lp->options &= (~XAE_OPTION_JUMBO);
477 if ((ndev->mtu > XAE_MTU) &&
478 (ndev->mtu <= XAE_JUMBO_MTU)) {
479 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
482 if (lp->max_frm_size <= lp->rxmem)
483 lp->options |= XAE_OPTION_JUMBO;
486 if (axienet_dma_bd_init(ndev)) {
487 netdev_err(ndev, "%s: descriptor allocation failed\n",
491 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
492 axienet_status &= ~XAE_RCW1_RX_MASK;
493 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
495 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
496 if (axienet_status & XAE_INT_RXRJECT_MASK)
497 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
499 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
501 /* Sync default options with HW but leave receiver and
502 * transmitter disabled.
504 axienet_setoptions(ndev, lp->options &
505 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
506 axienet_set_mac_address(ndev, NULL);
507 axienet_set_multicast_list(ndev);
508 axienet_setoptions(ndev, lp->options);
510 netif_trans_update(ndev);
514 * axienet_adjust_link - Adjust the PHY link speed/duplex.
515 * @ndev: Pointer to the net_device structure
517 * This function is called to change the speed and duplex setting after
518 * auto negotiation is done by the PHY. This is the function that gets
519 * registered with the PHY interface through the "of_phy_connect" call.
521 static void axienet_adjust_link(struct net_device *ndev)
526 struct axienet_local *lp = netdev_priv(ndev);
527 struct phy_device *phy = ndev->phydev;
529 link_state = phy->speed | (phy->duplex << 1) | phy->link;
530 if (lp->last_link != link_state) {
531 if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
532 if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X)
535 if ((phy->speed == SPEED_1000) &&
536 (lp->phy_type == XAE_PHY_TYPE_MII))
541 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
542 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
544 switch (phy->speed) {
546 emmc_reg |= XAE_EMMC_LINKSPD_1000;
549 emmc_reg |= XAE_EMMC_LINKSPD_100;
552 emmc_reg |= XAE_EMMC_LINKSPD_10;
555 dev_err(&ndev->dev, "Speed other than 10, 100 "
556 "or 1Gbps is not supported\n");
560 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
561 lp->last_link = link_state;
562 phy_print_status(phy);
565 "Error setting Axi Ethernet mac speed\n");
571 * axienet_start_xmit_done - Invoked once a transmit is completed by the
572 * Axi DMA Tx channel.
573 * @ndev: Pointer to the net_device structure
575 * This function is invoked from the Axi DMA Tx isr to notify the completion
576 * of transmit operation. It clears fields in the corresponding Tx BDs and
577 * unmaps the corresponding buffer so that CPU can regain ownership of the
578 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
581 static void axienet_start_xmit_done(struct net_device *ndev)
585 struct axienet_local *lp = netdev_priv(ndev);
586 struct axidma_bd *cur_p;
587 unsigned int status = 0;
589 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
590 status = cur_p->status;
591 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
592 dma_unmap_single(ndev->dev.parent, cur_p->phys,
593 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
596 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
604 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
608 lp->tx_bd_ci %= TX_BD_NUM;
609 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
610 status = cur_p->status;
613 ndev->stats.tx_packets += packets;
614 ndev->stats.tx_bytes += size;
616 /* Matches barrier in axienet_start_xmit */
619 netif_wake_queue(ndev);
623 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
624 * @lp: Pointer to the axienet_local structure
625 * @num_frag: The number of BDs to check for
627 * Return: 0, on success
628 * NETDEV_TX_BUSY, if any of the descriptors are not free
630 * This function is invoked before BDs are allocated and transmission starts.
631 * This function returns 0 if a BD or group of BDs can be allocated for
632 * transmission. If the BD or any of the BDs are not free the function
633 * returns a busy status. This is invoked from axienet_start_xmit.
635 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
638 struct axidma_bd *cur_p;
639 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
640 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
641 return NETDEV_TX_BUSY;
646 * axienet_start_xmit - Starts the transmission.
647 * @skb: sk_buff pointer that contains data to be Txed.
648 * @ndev: Pointer to net_device structure.
650 * Return: NETDEV_TX_OK, on success
651 * NETDEV_TX_BUSY, if any of the descriptors are not free
653 * This function is invoked from upper layers to initiate transmission. The
654 * function uses the next available free BDs and populates their fields to
655 * start the transmission. Additionally if checksum offloading is supported,
656 * it populates AXI Stream Control fields with appropriate values.
659 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
667 struct axienet_local *lp = netdev_priv(ndev);
668 struct axidma_bd *cur_p;
670 num_frag = skb_shinfo(skb)->nr_frags;
671 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
673 if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
674 if (netif_queue_stopped(ndev))
675 return NETDEV_TX_BUSY;
677 netif_stop_queue(ndev);
679 /* Matches barrier in axienet_start_xmit_done */
682 /* Space might have just been freed - check again */
683 if (axienet_check_tx_bd_space(lp, num_frag + 1))
684 return NETDEV_TX_BUSY;
686 netif_wake_queue(ndev);
689 if (skb->ip_summed == CHECKSUM_PARTIAL) {
690 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
691 /* Tx Full Checksum Offload Enabled */
693 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
694 csum_start_off = skb_transport_offset(skb);
695 csum_index_off = csum_start_off + skb->csum_offset;
696 /* Tx Partial Checksum Offload Enabled */
698 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
700 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
701 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
704 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
705 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
706 skb_headlen(skb), DMA_TO_DEVICE);
708 for (ii = 0; ii < num_frag; ii++) {
710 lp->tx_bd_tail %= TX_BD_NUM;
711 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
712 frag = &skb_shinfo(skb)->frags[ii];
713 cur_p->phys = dma_map_single(ndev->dev.parent,
714 skb_frag_address(frag),
717 cur_p->cntrl = skb_frag_size(frag);
720 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
721 cur_p->app4 = (unsigned long)skb;
723 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
724 /* Start the transfer */
725 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
727 lp->tx_bd_tail %= TX_BD_NUM;
733 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
735 * @ndev: Pointer to net_device structure.
737 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
738 * does minimal processing and invokes "netif_rx" to complete further
741 static void axienet_recv(struct net_device *ndev)
747 dma_addr_t tail_p = 0;
748 struct axienet_local *lp = netdev_priv(ndev);
749 struct sk_buff *skb, *new_skb;
750 struct axidma_bd *cur_p;
752 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
754 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
755 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
756 skb = (struct sk_buff *) (cur_p->sw_id_offset);
757 length = cur_p->app4 & 0x0000FFFF;
759 dma_unmap_single(ndev->dev.parent, cur_p->phys,
763 skb_put(skb, length);
764 skb->protocol = eth_type_trans(skb, ndev);
765 /*skb_checksum_none_assert(skb);*/
766 skb->ip_summed = CHECKSUM_NONE;
768 /* if we're doing Rx csum offload, set it up */
769 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
770 csumstatus = (cur_p->app2 &
771 XAE_FULL_CSUM_STATUS_MASK) >> 3;
772 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
773 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
774 skb->ip_summed = CHECKSUM_UNNECESSARY;
776 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
777 skb->protocol == htons(ETH_P_IP) &&
779 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
780 skb->ip_summed = CHECKSUM_COMPLETE;
788 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
792 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
795 cur_p->cntrl = lp->max_frm_size;
797 cur_p->sw_id_offset = (u32) new_skb;
800 lp->rx_bd_ci %= RX_BD_NUM;
801 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
804 ndev->stats.rx_packets += packets;
805 ndev->stats.rx_bytes += size;
808 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
812 * axienet_tx_irq - Tx Done Isr.
814 * @_ndev: net_device pointer
816 * Return: IRQ_HANDLED for all cases.
818 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
819 * to complete the BD processing.
821 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
825 struct net_device *ndev = _ndev;
826 struct axienet_local *lp = netdev_priv(ndev);
828 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
829 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
830 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
831 axienet_start_xmit_done(lp->ndev);
834 if (!(status & XAXIDMA_IRQ_ALL_MASK))
835 dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
836 if (status & XAXIDMA_IRQ_ERROR_MASK) {
837 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
838 dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
839 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
841 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
842 /* Disable coalesce, delay timer and error interrupts */
843 cr &= (~XAXIDMA_IRQ_ALL_MASK);
844 /* Write to the Tx channel control register */
845 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
847 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
848 /* Disable coalesce, delay timer and error interrupts */
849 cr &= (~XAXIDMA_IRQ_ALL_MASK);
850 /* Write to the Rx channel control register */
851 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
853 tasklet_schedule(&lp->dma_err_tasklet);
854 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
861 * axienet_rx_irq - Rx Isr.
863 * @_ndev: net_device pointer
865 * Return: IRQ_HANDLED for all cases.
867 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
870 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
874 struct net_device *ndev = _ndev;
875 struct axienet_local *lp = netdev_priv(ndev);
877 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
878 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
879 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
880 axienet_recv(lp->ndev);
883 if (!(status & XAXIDMA_IRQ_ALL_MASK))
884 dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
885 if (status & XAXIDMA_IRQ_ERROR_MASK) {
886 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
887 dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
888 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
890 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
891 /* Disable coalesce, delay timer and error interrupts */
892 cr &= (~XAXIDMA_IRQ_ALL_MASK);
893 /* Finally write to the Tx channel control register */
894 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
896 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
897 /* Disable coalesce, delay timer and error interrupts */
898 cr &= (~XAXIDMA_IRQ_ALL_MASK);
899 /* write to the Rx channel control register */
900 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
902 tasklet_schedule(&lp->dma_err_tasklet);
903 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
909 static void axienet_dma_err_handler(unsigned long data);
912 * axienet_open - Driver open routine.
913 * @ndev: Pointer to net_device structure
915 * Return: 0, on success.
916 * -ENODEV, if PHY cannot be connected to
917 * non-zero error value on failure
919 * This is the driver open routine. It calls phy_start to start the PHY device.
920 * It also allocates interrupt service routines, enables the interrupt lines
921 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
922 * descriptors are initialized.
924 static int axienet_open(struct net_device *ndev)
927 struct axienet_local *lp = netdev_priv(ndev);
928 struct phy_device *phydev = NULL;
930 dev_dbg(&ndev->dev, "axienet_open()\n");
932 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
933 ret = axienet_mdio_wait_until_ready(lp);
936 /* Disable the MDIO interface till Axi Ethernet Reset is completed.
937 * When we do an Axi Ethernet reset, it resets the complete core
938 * including the MDIO. If MDIO is not disabled when the reset
939 * process is started, MDIO will be broken afterwards.
941 axienet_iow(lp, XAE_MDIO_MC_OFFSET,
942 (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
943 axienet_device_reset(ndev);
944 /* Enable the MDIO */
945 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
946 ret = axienet_mdio_wait_until_ready(lp);
951 if (lp->phy_type == XAE_PHY_TYPE_GMII) {
952 phydev = of_phy_connect(lp->ndev, lp->phy_node,
953 axienet_adjust_link, 0,
954 PHY_INTERFACE_MODE_GMII);
955 } else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) {
956 phydev = of_phy_connect(lp->ndev, lp->phy_node,
957 axienet_adjust_link, 0,
958 PHY_INTERFACE_MODE_RGMII_ID);
962 dev_err(lp->dev, "of_phy_connect() failed\n");
967 /* Enable tasklets for Axi DMA error handling */
968 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
971 /* Enable interrupts for Axi DMA Tx */
972 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
975 /* Enable interrupts for Axi DMA Rx */
976 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
983 free_irq(lp->tx_irq, ndev);
986 phy_disconnect(phydev);
987 tasklet_kill(&lp->dma_err_tasklet);
988 dev_err(lp->dev, "request_irq() failed\n");
993 * axienet_stop - Driver stop routine.
994 * @ndev: Pointer to net_device structure
996 * Return: 0, on success.
998 * This is the driver stop routine. It calls phy_disconnect to stop the PHY
999 * device. It also removes the interrupt handlers and disables the interrupts.
1000 * The Axi DMA Tx/Rx BDs are released.
1002 static int axienet_stop(struct net_device *ndev)
1005 struct axienet_local *lp = netdev_priv(ndev);
1007 dev_dbg(&ndev->dev, "axienet_close()\n");
1009 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1010 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
1011 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
1012 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1013 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
1014 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
1015 axienet_setoptions(ndev, lp->options &
1016 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1018 tasklet_kill(&lp->dma_err_tasklet);
1020 free_irq(lp->tx_irq, ndev);
1021 free_irq(lp->rx_irq, ndev);
1024 phy_disconnect(ndev->phydev);
1026 axienet_dma_bd_release(ndev);
1031 * axienet_change_mtu - Driver change mtu routine.
1032 * @ndev: Pointer to net_device structure
1033 * @new_mtu: New mtu value to be applied
1035 * Return: Always returns 0 (success).
1037 * This is the change mtu driver routine. It checks if the Axi Ethernet
1038 * hardware supports jumbo frames before changing the mtu. This can be
1039 * called only when the device is not up.
1041 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1043 struct axienet_local *lp = netdev_priv(ndev);
1045 if (netif_running(ndev))
1048 if ((new_mtu + VLAN_ETH_HLEN +
1049 XAE_TRL_SIZE) > lp->rxmem)
1052 if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
1055 ndev->mtu = new_mtu;
1060 #ifdef CONFIG_NET_POLL_CONTROLLER
1062 * axienet_poll_controller - Axi Ethernet poll mechanism.
1063 * @ndev: Pointer to net_device structure
1065 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1066 * to polling the ISRs and are enabled back after the polling is done.
1068 static void axienet_poll_controller(struct net_device *ndev)
1070 struct axienet_local *lp = netdev_priv(ndev);
1071 disable_irq(lp->tx_irq);
1072 disable_irq(lp->rx_irq);
1073 axienet_rx_irq(lp->tx_irq, ndev);
1074 axienet_tx_irq(lp->rx_irq, ndev);
1075 enable_irq(lp->tx_irq);
1076 enable_irq(lp->rx_irq);
1080 static const struct net_device_ops axienet_netdev_ops = {
1081 .ndo_open = axienet_open,
1082 .ndo_stop = axienet_stop,
1083 .ndo_start_xmit = axienet_start_xmit,
1084 .ndo_change_mtu = axienet_change_mtu,
1085 .ndo_set_mac_address = netdev_set_mac_address,
1086 .ndo_validate_addr = eth_validate_addr,
1087 .ndo_set_rx_mode = axienet_set_multicast_list,
1088 #ifdef CONFIG_NET_POLL_CONTROLLER
1089 .ndo_poll_controller = axienet_poll_controller,
1094 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1095 * @ndev: Pointer to net_device structure
1096 * @ed: Pointer to ethtool_drvinfo structure
1098 * This implements ethtool command for getting the driver information.
1099 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1101 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1102 struct ethtool_drvinfo *ed)
1104 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1105 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1109 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1111 * @ndev: Pointer to net_device structure
1113 * This implements ethtool command for getting the total register length
1116 * Return: the total regs length
1118 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1120 return sizeof(u32) * AXIENET_REGS_N;
1124 * axienet_ethtools_get_regs - Dump the contents of all registers present
1125 * in AxiEthernet core.
1126 * @ndev: Pointer to net_device structure
1127 * @regs: Pointer to ethtool_regs structure
1128 * @ret: Void pointer used to return the contents of the registers.
1130 * This implements ethtool command for getting the Axi Ethernet register dump.
1131 * Issue "ethtool -d ethX" to execute this function.
1133 static void axienet_ethtools_get_regs(struct net_device *ndev,
1134 struct ethtool_regs *regs, void *ret)
1136 u32 *data = (u32 *) ret;
1137 size_t len = sizeof(u32) * AXIENET_REGS_N;
1138 struct axienet_local *lp = netdev_priv(ndev);
1143 memset(data, 0, len);
1144 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1145 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1146 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1147 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1148 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1149 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1150 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1151 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1152 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1153 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1154 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1155 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1156 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1157 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1158 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1159 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1160 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1161 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1162 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1163 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1164 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1165 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1166 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1167 data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
1168 data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
1169 data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
1170 data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
1171 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1172 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1173 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1174 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1175 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1179 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1181 * @ndev: Pointer to net_device structure
1182 * @epauseparm: Pointer to ethtool_pauseparam structure.
1184 * This implements ethtool command for getting axi ethernet pause frame
1185 * setting. Issue "ethtool -a ethX" to execute this function.
1188 axienet_ethtools_get_pauseparam(struct net_device *ndev,
1189 struct ethtool_pauseparam *epauseparm)
1192 struct axienet_local *lp = netdev_priv(ndev);
1193 epauseparm->autoneg = 0;
1194 regval = axienet_ior(lp, XAE_FCC_OFFSET);
1195 epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
1196 epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
1200 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1202 * @ndev: Pointer to net_device structure
1203 * @epauseparm:Pointer to ethtool_pauseparam structure
1205 * This implements ethtool command for enabling flow control on Rx and Tx
1206 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1209 * Return: 0 on success, -EFAULT if device is running
1212 axienet_ethtools_set_pauseparam(struct net_device *ndev,
1213 struct ethtool_pauseparam *epauseparm)
1216 struct axienet_local *lp = netdev_priv(ndev);
1218 if (netif_running(ndev)) {
1220 "Please stop netif before applying configuration\n");
1224 regval = axienet_ior(lp, XAE_FCC_OFFSET);
1225 if (epauseparm->tx_pause)
1226 regval |= XAE_FCC_FCTX_MASK;
1228 regval &= ~XAE_FCC_FCTX_MASK;
1229 if (epauseparm->rx_pause)
1230 regval |= XAE_FCC_FCRX_MASK;
1232 regval &= ~XAE_FCC_FCRX_MASK;
1233 axienet_iow(lp, XAE_FCC_OFFSET, regval);
1239 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1240 * @ndev: Pointer to net_device structure
1241 * @ecoalesce: Pointer to ethtool_coalesce structure
1243 * This implements ethtool command for getting the DMA interrupt coalescing
1244 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1245 * execute this function.
1249 static int axienet_ethtools_get_coalesce(struct net_device *ndev,
1250 struct ethtool_coalesce *ecoalesce)
1253 struct axienet_local *lp = netdev_priv(ndev);
1254 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1255 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1256 >> XAXIDMA_COALESCE_SHIFT;
1257 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1258 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1259 >> XAXIDMA_COALESCE_SHIFT;
1264 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1265 * @ndev: Pointer to net_device structure
1266 * @ecoalesce: Pointer to ethtool_coalesce structure
1268 * This implements ethtool command for setting the DMA interrupt coalescing
1269 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1270 * prompt to execute this function.
1272 * Return: 0, on success, Non-zero error value on failure.
1274 static int axienet_ethtools_set_coalesce(struct net_device *ndev,
1275 struct ethtool_coalesce *ecoalesce)
1277 struct axienet_local *lp = netdev_priv(ndev);
1279 if (netif_running(ndev)) {
1281 "Please stop netif before applying configuration\n");
1285 if ((ecoalesce->rx_coalesce_usecs) ||
1286 (ecoalesce->rx_coalesce_usecs_irq) ||
1287 (ecoalesce->rx_max_coalesced_frames_irq) ||
1288 (ecoalesce->tx_coalesce_usecs) ||
1289 (ecoalesce->tx_coalesce_usecs_irq) ||
1290 (ecoalesce->tx_max_coalesced_frames_irq) ||
1291 (ecoalesce->stats_block_coalesce_usecs) ||
1292 (ecoalesce->use_adaptive_rx_coalesce) ||
1293 (ecoalesce->use_adaptive_tx_coalesce) ||
1294 (ecoalesce->pkt_rate_low) ||
1295 (ecoalesce->rx_coalesce_usecs_low) ||
1296 (ecoalesce->rx_max_coalesced_frames_low) ||
1297 (ecoalesce->tx_coalesce_usecs_low) ||
1298 (ecoalesce->tx_max_coalesced_frames_low) ||
1299 (ecoalesce->pkt_rate_high) ||
1300 (ecoalesce->rx_coalesce_usecs_high) ||
1301 (ecoalesce->rx_max_coalesced_frames_high) ||
1302 (ecoalesce->tx_coalesce_usecs_high) ||
1303 (ecoalesce->tx_max_coalesced_frames_high) ||
1304 (ecoalesce->rate_sample_interval))
1306 if (ecoalesce->rx_max_coalesced_frames)
1307 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1308 if (ecoalesce->tx_max_coalesced_frames)
1309 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1314 static const struct ethtool_ops axienet_ethtool_ops = {
1315 .get_drvinfo = axienet_ethtools_get_drvinfo,
1316 .get_regs_len = axienet_ethtools_get_regs_len,
1317 .get_regs = axienet_ethtools_get_regs,
1318 .get_link = ethtool_op_get_link,
1319 .get_pauseparam = axienet_ethtools_get_pauseparam,
1320 .set_pauseparam = axienet_ethtools_set_pauseparam,
1321 .get_coalesce = axienet_ethtools_get_coalesce,
1322 .set_coalesce = axienet_ethtools_set_coalesce,
1323 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1324 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1328 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
1329 * @data: Data passed
1331 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
1334 static void axienet_dma_err_handler(unsigned long data)
1339 struct axienet_local *lp = (struct axienet_local *) data;
1340 struct net_device *ndev = lp->ndev;
1341 struct axidma_bd *cur_p;
1343 axienet_setoptions(ndev, lp->options &
1344 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1345 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1346 axienet_mdio_wait_until_ready(lp);
1347 /* Disable the MDIO interface till Axi Ethernet Reset is completed.
1348 * When we do an Axi Ethernet reset, it resets the complete core
1349 * including the MDIO. So if MDIO is not disabled when the reset
1350 * process is started, MDIO will be broken afterwards.
1352 axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
1353 ~XAE_MDIO_MC_MDIOEN_MASK));
1355 __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
1356 __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
1358 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
1359 axienet_mdio_wait_until_ready(lp);
1361 for (i = 0; i < TX_BD_NUM; i++) {
1362 cur_p = &lp->tx_bd_v[i];
1364 dma_unmap_single(ndev->dev.parent, cur_p->phys,
1366 XAXIDMA_BD_CTRL_LENGTH_MASK),
1369 dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
1378 cur_p->sw_id_offset = 0;
1381 for (i = 0; i < RX_BD_NUM; i++) {
1382 cur_p = &lp->rx_bd_v[i];
1395 /* Start updating the Rx channel control register */
1396 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1397 /* Update the interrupt coalesce count */
1398 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
1399 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1400 /* Update the delay timer count */
1401 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
1402 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1403 /* Enable coalesce, delay timer and error interrupts */
1404 cr |= XAXIDMA_IRQ_ALL_MASK;
1405 /* Finally write to the Rx channel control register */
1406 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1408 /* Start updating the Tx channel control register */
1409 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1410 /* Update the interrupt coalesce count */
1411 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
1412 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1413 /* Update the delay timer count */
1414 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
1415 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1416 /* Enable coalesce, delay timer and error interrupts */
1417 cr |= XAXIDMA_IRQ_ALL_MASK;
1418 /* Finally write to the Tx channel control register */
1419 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1421 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
1422 * halted state. This will make the Rx side ready for reception.
1424 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
1425 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1426 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
1427 cr | XAXIDMA_CR_RUNSTOP_MASK);
1428 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
1429 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
1431 /* Write to the RS (Run-stop) bit in the Tx channel control register.
1432 * Tx channel is now ready to run. But only after we write to the
1433 * tail pointer register that the Tx channel will start transmitting
1435 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
1436 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1437 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
1438 cr | XAXIDMA_CR_RUNSTOP_MASK);
1440 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1441 axienet_status &= ~XAE_RCW1_RX_MASK;
1442 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1444 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1445 if (axienet_status & XAE_INT_RXRJECT_MASK)
1446 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1447 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1449 /* Sync default options with HW but leave receiver and
1450 * transmitter disabled.
1452 axienet_setoptions(ndev, lp->options &
1453 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1454 axienet_set_mac_address(ndev, NULL);
1455 axienet_set_multicast_list(ndev);
1456 axienet_setoptions(ndev, lp->options);
1460 * axienet_probe - Axi Ethernet probe function.
1461 * @pdev: Pointer to platform device structure.
1463 * Return: 0, on success
1464 * Non-zero error value on failure.
1466 * This is the probe routine for Axi Ethernet driver. This is called before
1467 * any other driver routines are invoked. It allocates and sets up the Ethernet
1468 * device. Parses through device tree and populates fields of
1469 * axienet_local. It registers the Ethernet device.
1471 static int axienet_probe(struct platform_device *pdev)
1474 struct device_node *np;
1475 struct axienet_local *lp;
1476 struct net_device *ndev;
1478 struct resource *ethres, dmares;
1481 ndev = alloc_etherdev(sizeof(*lp));
1485 platform_set_drvdata(pdev, ndev);
1487 SET_NETDEV_DEV(ndev, &pdev->dev);
1488 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1489 ndev->features = NETIF_F_SG;
1490 ndev->netdev_ops = &axienet_netdev_ops;
1491 ndev->ethtool_ops = &axienet_ethtool_ops;
1493 lp = netdev_priv(ndev);
1495 lp->dev = &pdev->dev;
1496 lp->options = XAE_OPTION_DEFAULTS;
1497 /* Map device registers */
1498 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1499 lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
1500 if (IS_ERR(lp->regs)) {
1501 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
1502 ret = PTR_ERR(lp->regs);
1506 /* Setup checksum offload, but default to off if not specified */
1509 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
1513 lp->csum_offload_on_tx_path =
1514 XAE_FEATURE_PARTIAL_TX_CSUM;
1515 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
1516 /* Can checksum TCP/UDP over IPv4. */
1517 ndev->features |= NETIF_F_IP_CSUM;
1520 lp->csum_offload_on_tx_path =
1521 XAE_FEATURE_FULL_TX_CSUM;
1522 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
1523 /* Can checksum TCP/UDP over IPv4. */
1524 ndev->features |= NETIF_F_IP_CSUM;
1527 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
1530 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
1534 lp->csum_offload_on_rx_path =
1535 XAE_FEATURE_PARTIAL_RX_CSUM;
1536 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
1539 lp->csum_offload_on_rx_path =
1540 XAE_FEATURE_FULL_RX_CSUM;
1541 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
1544 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
1547 /* For supporting jumbo frames, the Axi Ethernet hardware must have
1548 * a larger Rx/Tx Memory. Typically, the size must be large so that
1549 * we can enable jumbo option and start supporting jumbo frames.
1550 * Here we check for memory allocated for Rx/Tx in the hardware from
1551 * the device-tree and accordingly set flags.
1553 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
1554 of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type);
1556 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1557 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
1559 dev_err(&pdev->dev, "could not find DMA node\n");
1563 ret = of_address_to_resource(np, 0, &dmares);
1565 dev_err(&pdev->dev, "unable to get DMA resource\n");
1569 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
1570 if (IS_ERR(lp->dma_regs)) {
1571 dev_err(&pdev->dev, "could not map DMA regs\n");
1572 ret = PTR_ERR(lp->dma_regs);
1576 lp->rx_irq = irq_of_parse_and_map(np, 1);
1577 lp->tx_irq = irq_of_parse_and_map(np, 0);
1579 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
1580 dev_err(&pdev->dev, "could not determine irqs\n");
1585 /* Retrieve the MAC address */
1586 ret = of_property_read_u8_array(pdev->dev.of_node,
1587 "local-mac-address", mac_addr, 6);
1589 dev_err(&pdev->dev, "could not find MAC address\n");
1592 axienet_set_mac_address(ndev, (void *)mac_addr);
1594 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1595 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
1597 lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1599 ret = axienet_mdio_setup(lp, pdev->dev.of_node);
1601 dev_warn(&pdev->dev, "error registering MDIO bus\n");
1604 ret = register_netdev(lp->ndev);
1606 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
1618 static int axienet_remove(struct platform_device *pdev)
1620 struct net_device *ndev = platform_get_drvdata(pdev);
1621 struct axienet_local *lp = netdev_priv(ndev);
1623 axienet_mdio_teardown(lp);
1624 unregister_netdev(ndev);
1626 of_node_put(lp->phy_node);
1627 lp->phy_node = NULL;
1634 static struct platform_driver axienet_driver = {
1635 .probe = axienet_probe,
1636 .remove = axienet_remove,
1638 .name = "xilinx_axienet",
1639 .of_match_table = axienet_of_match,
1643 module_platform_driver(axienet_driver);
1645 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
1646 MODULE_AUTHOR("Xilinx");
1647 MODULE_LICENSE("GPL");