1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for Xilinx TEMAC Ethernet device
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
9 * This is a driver for the Xilinx ll_temac ipcore which is often used
10 * in the Virtex and Spartan series of chips.
13 * - The ll_temac hardware uses indirect access for many of the TEMAC
14 * registers, include the MDIO bus. However, indirect access to MDIO
15 * registers take considerably more clock cycles than to TEMAC registers.
16 * MDIO accesses are long, so threads doing them should probably sleep
17 * rather than busywait. However, since only one indirect access can be
18 * in progress at any given time, that means that *all* indirect accesses
19 * could end up sleeping (to wait for an MDIO access to complete).
20 * Fortunately none of the indirect accesses are on the 'hot' path for tx
21 * or rx, so this should be okay.
24 * - Factor out locallink DMA code into separate driver
25 * - Fix support for hardware checksumming.
26 * - Testing. Lots and lots of testing.
30 #include <linux/delay.h>
31 #include <linux/etherdevice.h>
32 #include <linux/mii.h>
33 #include <linux/module.h>
34 #include <linux/mutex.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_ether.h>
38 #include <linux/of_irq.h>
39 #include <linux/of_mdio.h>
40 #include <linux/of_net.h>
41 #include <linux/platform_device.h>
42 #include <linux/skbuff.h>
43 #include <linux/spinlock.h>
44 #include <linux/tcp.h> /* needed for sizeof(tcphdr) */
45 #include <linux/udp.h> /* needed for sizeof(udphdr) */
46 #include <linux/phy.h>
50 #include <linux/slab.h>
51 #include <linux/interrupt.h>
52 #include <linux/workqueue.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/processor.h>
55 #include <linux/platform_data/xilinx-ll-temac.h>
59 /* Descriptors defines for Tx and Rx DMA */
60 #define TX_BD_NUM_DEFAULT 64
61 #define RX_BD_NUM_DEFAULT 1024
62 #define TX_BD_NUM_MAX 4096
63 #define RX_BD_NUM_MAX 4096
65 /* ---------------------------------------------------------------------
66 * Low level register access functions
69 static u32 _temac_ior_be(struct temac_local *lp, int offset)
71 return ioread32be(lp->regs + offset);
74 static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
76 return iowrite32be(value, lp->regs + offset);
79 static u32 _temac_ior_le(struct temac_local *lp, int offset)
81 return ioread32(lp->regs + offset);
84 static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
86 return iowrite32(value, lp->regs + offset);
89 static bool hard_acs_rdy(struct temac_local *lp)
91 return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
94 static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
96 ktime_t cur = ktime_get();
98 return hard_acs_rdy(lp) || ktime_after(cur, timeout);
101 /* Poll for maximum 20 ms. This is similar to the 2 jiffies @ 100 Hz
102 * that was used before, and should cover MDIO bus speed down to 3200
105 #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
108 * temac_indirect_busywait - Wait for current indirect register access
111 int temac_indirect_busywait(struct temac_local *lp)
113 ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
115 spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
116 if (WARN_ON(!hard_acs_rdy(lp)))
123 * temac_indirect_in32 - Indirect register read access. This function
124 * must be called without lp->indirect_lock being held.
126 u32 temac_indirect_in32(struct temac_local *lp, int reg)
131 spin_lock_irqsave(lp->indirect_lock, flags);
132 val = temac_indirect_in32_locked(lp, reg);
133 spin_unlock_irqrestore(lp->indirect_lock, flags);
138 * temac_indirect_in32_locked - Indirect register read access. This
139 * function must be called with lp->indirect_lock being held. Use
140 * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
141 * repeated lock/unlock and to ensure uninterrupted access to indirect
144 u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
146 /* This initial wait should normally not spin, as we always
147 * try to wait for indirect access to complete before
148 * releasing the indirect_lock.
150 if (WARN_ON(temac_indirect_busywait(lp)))
152 /* Initiate read from indirect register */
153 temac_iow(lp, XTE_CTL0_OFFSET, reg);
154 /* Wait for indirect register access to complete. We really
155 * should not see timeouts, and could even end up causing
156 * problem for following indirect access, so let's make a bit
159 if (WARN_ON(temac_indirect_busywait(lp)))
161 /* Value is ready now */
162 return temac_ior(lp, XTE_LSW0_OFFSET);
166 * temac_indirect_out32 - Indirect register write access. This function
167 * must be called without lp->indirect_lock being held.
169 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
173 spin_lock_irqsave(lp->indirect_lock, flags);
174 temac_indirect_out32_locked(lp, reg, value);
175 spin_unlock_irqrestore(lp->indirect_lock, flags);
179 * temac_indirect_out32_locked - Indirect register write access. This
180 * function must be called with lp->indirect_lock being held. Use
181 * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
182 * repeated lock/unlock and to ensure uninterrupted access to indirect
185 void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
187 /* As in temac_indirect_in32_locked(), we should normally not
188 * spin here. And if it happens, we actually end up silently
189 * ignoring the write request. Ouch.
191 if (WARN_ON(temac_indirect_busywait(lp)))
193 /* Initiate write to indirect register */
194 temac_iow(lp, XTE_LSW0_OFFSET, value);
195 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
196 /* As in temac_indirect_in32_locked(), we should not see timeouts
197 * here. And if it happens, we continue before the write has
198 * completed. Not good.
200 WARN_ON(temac_indirect_busywait(lp));
204 * temac_dma_in32_* - Memory mapped DMA read, these function expects a
205 * register input that is based on DCR word addresses which are then
206 * converted to memory mapped byte addresses. To be assigned to
209 static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
211 return ioread32be(lp->sdma_regs + (reg << 2));
214 static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
216 return ioread32(lp->sdma_regs + (reg << 2));
220 * temac_dma_out32_* - Memory mapped DMA read, these function expects
221 * a register input that is based on DCR word addresses which are then
222 * converted to memory mapped byte addresses. To be assigned to
225 static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
227 iowrite32be(value, lp->sdma_regs + (reg << 2));
230 static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
232 iowrite32(value, lp->sdma_regs + (reg << 2));
235 /* DMA register access functions can be DCR based or memory mapped.
236 * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
239 #ifdef CONFIG_PPC_DCR
242 * temac_dma_dcr_in32 - DCR based DMA read
244 static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
246 return dcr_read(lp->sdma_dcrs, reg);
250 * temac_dma_dcr_out32 - DCR based DMA write
252 static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
254 dcr_write(lp->sdma_dcrs, reg, value);
258 * temac_dcr_setup - If the DMA is DCR based, then setup the address and
261 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
262 struct device_node *np)
266 /* setup the dcr address mapping if it's in the device tree */
268 dcrs = dcr_resource_start(np, 0);
270 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
271 lp->dma_in = temac_dma_dcr_in;
272 lp->dma_out = temac_dma_dcr_out;
273 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
276 /* no DCR in the device tree, indicate a failure */
283 * temac_dcr_setup - This is a stub for when DCR is not supported,
284 * such as with MicroBlaze and x86
286 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
287 struct device_node *np)
295 * temac_dma_bd_release - Release buffer descriptor rings
297 static void temac_dma_bd_release(struct net_device *ndev)
299 struct temac_local *lp = netdev_priv(ndev);
302 /* Reset Local Link (DMA) */
303 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
305 for (i = 0; i < lp->rx_bd_num; i++) {
308 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
309 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
310 dev_kfree_skb(lp->rx_skb[i]);
313 dma_free_coherent(ndev->dev.parent,
314 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
315 lp->rx_bd_v, lp->rx_bd_p);
317 dma_free_coherent(ndev->dev.parent,
318 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
319 lp->tx_bd_v, lp->tx_bd_p);
323 * temac_dma_bd_init - Setup buffer descriptor rings
325 static int temac_dma_bd_init(struct net_device *ndev)
327 struct temac_local *lp = netdev_priv(ndev);
329 dma_addr_t skb_dma_addr;
332 lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
333 sizeof(*lp->rx_skb), GFP_KERNEL);
337 /* allocate the tx and rx ring buffer descriptors. */
338 /* returns a virtual address and a physical address. */
339 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
340 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
341 &lp->tx_bd_p, GFP_KERNEL);
345 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
346 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
347 &lp->rx_bd_p, GFP_KERNEL);
351 for (i = 0; i < lp->tx_bd_num; i++) {
352 lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
353 + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
356 for (i = 0; i < lp->rx_bd_num; i++) {
357 lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
358 + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
360 skb = __netdev_alloc_skb_ip_align(ndev,
361 XTE_MAX_JUMBO_FRAME_SIZE,
367 /* returns physical address of skb->data */
368 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
369 XTE_MAX_JUMBO_FRAME_SIZE,
371 if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
373 lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
374 lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
375 lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
378 /* Configure DMA channel (irq setup) */
379 lp->dma_out(lp, TX_CHNL_CTRL,
380 lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
381 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
382 CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
383 CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
384 lp->dma_out(lp, RX_CHNL_CTRL,
385 lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
387 CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
388 CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
390 /* Init descriptor indexes */
394 lp->rx_bd_tail = lp->rx_bd_num - 1;
396 /* Enable RX DMA transfers */
398 lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
399 lp->dma_out(lp, RX_TAILDESC_PTR,
400 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
402 /* Prepare for TX DMA transfer */
403 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
408 temac_dma_bd_release(ndev);
412 /* ---------------------------------------------------------------------
416 static void temac_do_set_mac_address(struct net_device *ndev)
418 struct temac_local *lp = netdev_priv(ndev);
421 /* set up unicast MAC address filter set its mac address */
422 spin_lock_irqsave(lp->indirect_lock, flags);
423 temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
424 (ndev->dev_addr[0]) |
425 (ndev->dev_addr[1] << 8) |
426 (ndev->dev_addr[2] << 16) |
427 (ndev->dev_addr[3] << 24));
428 /* There are reserved bits in EUAW1
429 * so don't affect them Set MAC bits [47:32] in EUAW1
431 temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
432 (ndev->dev_addr[4] & 0x000000ff) |
433 (ndev->dev_addr[5] << 8));
434 spin_unlock_irqrestore(lp->indirect_lock, flags);
437 static int temac_init_mac_address(struct net_device *ndev, const void *address)
439 eth_hw_addr_set(ndev, address);
440 if (!is_valid_ether_addr(ndev->dev_addr))
441 eth_hw_addr_random(ndev);
442 temac_do_set_mac_address(ndev);
446 static int temac_set_mac_address(struct net_device *ndev, void *p)
448 struct sockaddr *addr = p;
450 if (!is_valid_ether_addr(addr->sa_data))
451 return -EADDRNOTAVAIL;
452 eth_hw_addr_set(ndev, addr->sa_data);
453 temac_do_set_mac_address(ndev);
457 static void temac_set_multicast_list(struct net_device *ndev)
459 struct temac_local *lp = netdev_priv(ndev);
460 u32 multi_addr_msw, multi_addr_lsw;
463 bool promisc_mode_disabled = false;
465 if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
466 (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
467 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
468 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
472 spin_lock_irqsave(lp->indirect_lock, flags);
474 if (!netdev_mc_empty(ndev)) {
475 struct netdev_hw_addr *ha;
477 netdev_for_each_mc_addr(ha, ndev) {
478 if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
480 multi_addr_msw = ((ha->addr[3] << 24) |
481 (ha->addr[2] << 16) |
484 temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
486 multi_addr_lsw = ((ha->addr[5] << 8) |
487 (ha->addr[4]) | (i << 16));
488 temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
494 /* Clear all or remaining/unused address table entries */
495 while (i < MULTICAST_CAM_TABLE_NUM) {
496 temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
497 temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
501 /* Enable address filter block if currently disabled */
502 if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
503 & XTE_AFM_EPPRM_MASK) {
504 temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
505 promisc_mode_disabled = true;
508 spin_unlock_irqrestore(lp->indirect_lock, flags);
510 if (promisc_mode_disabled)
511 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
514 static struct temac_option {
520 } temac_options[] = {
521 /* Turn on jumbo packet support for both Rx and Tx */
523 .opt = XTE_OPTION_JUMBO,
524 .reg = XTE_TXC_OFFSET,
525 .m_or = XTE_TXC_TXJMBO_MASK,
528 .opt = XTE_OPTION_JUMBO,
529 .reg = XTE_RXC1_OFFSET,
530 .m_or = XTE_RXC1_RXJMBO_MASK,
532 /* Turn on VLAN packet support for both Rx and Tx */
534 .opt = XTE_OPTION_VLAN,
535 .reg = XTE_TXC_OFFSET,
536 .m_or = XTE_TXC_TXVLAN_MASK,
539 .opt = XTE_OPTION_VLAN,
540 .reg = XTE_RXC1_OFFSET,
541 .m_or = XTE_RXC1_RXVLAN_MASK,
543 /* Turn on FCS stripping on receive packets */
545 .opt = XTE_OPTION_FCS_STRIP,
546 .reg = XTE_RXC1_OFFSET,
547 .m_or = XTE_RXC1_RXFCS_MASK,
549 /* Turn on FCS insertion on transmit packets */
551 .opt = XTE_OPTION_FCS_INSERT,
552 .reg = XTE_TXC_OFFSET,
553 .m_or = XTE_TXC_TXFCS_MASK,
555 /* Turn on length/type field checking on receive packets */
557 .opt = XTE_OPTION_LENTYPE_ERR,
558 .reg = XTE_RXC1_OFFSET,
559 .m_or = XTE_RXC1_RXLT_MASK,
561 /* Turn on flow control */
563 .opt = XTE_OPTION_FLOW_CONTROL,
564 .reg = XTE_FCC_OFFSET,
565 .m_or = XTE_FCC_RXFLO_MASK,
567 /* Turn on flow control */
569 .opt = XTE_OPTION_FLOW_CONTROL,
570 .reg = XTE_FCC_OFFSET,
571 .m_or = XTE_FCC_TXFLO_MASK,
573 /* Turn on promiscuous frame filtering (all frames are received ) */
575 .opt = XTE_OPTION_PROMISC,
576 .reg = XTE_AFM_OFFSET,
577 .m_or = XTE_AFM_EPPRM_MASK,
579 /* Enable transmitter if not already enabled */
581 .opt = XTE_OPTION_TXEN,
582 .reg = XTE_TXC_OFFSET,
583 .m_or = XTE_TXC_TXEN_MASK,
585 /* Enable receiver? */
587 .opt = XTE_OPTION_RXEN,
588 .reg = XTE_RXC1_OFFSET,
589 .m_or = XTE_RXC1_RXEN_MASK,
597 static u32 temac_setoptions(struct net_device *ndev, u32 options)
599 struct temac_local *lp = netdev_priv(ndev);
600 struct temac_option *tp = &temac_options[0];
604 spin_lock_irqsave(lp->indirect_lock, flags);
606 reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
607 if (options & tp->opt) {
609 temac_indirect_out32_locked(lp, tp->reg, reg);
613 spin_unlock_irqrestore(lp->indirect_lock, flags);
614 lp->options |= options;
619 /* Initialize temac */
620 static void temac_device_reset(struct net_device *ndev)
622 struct temac_local *lp = netdev_priv(ndev);
627 /* Perform a software reset */
629 /* 0x300 host enable bit ? */
630 /* reset PHY through control register ?:1 */
632 dev_dbg(&ndev->dev, "%s()\n", __func__);
634 /* Reset the receiver and wait for it to finish reset */
635 temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
637 while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
639 if (--timeout == 0) {
641 "%s RX reset timeout!!\n", __func__);
646 /* Reset the transmitter and wait for it to finish reset */
647 temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
649 while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
651 if (--timeout == 0) {
653 "%s TX reset timeout!!\n", __func__);
658 /* Disable the receiver */
659 spin_lock_irqsave(lp->indirect_lock, flags);
660 val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
661 temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
662 val & ~XTE_RXC1_RXEN_MASK);
663 spin_unlock_irqrestore(lp->indirect_lock, flags);
665 /* Reset Local Link (DMA) */
666 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
668 while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
670 if (--timeout == 0) {
672 "%s DMA reset timeout!!\n", __func__);
676 lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
678 if (temac_dma_bd_init(ndev)) {
680 "%s descriptor allocation failed\n", __func__);
683 spin_lock_irqsave(lp->indirect_lock, flags);
684 temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
685 temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
686 temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
687 temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
688 spin_unlock_irqrestore(lp->indirect_lock, flags);
690 /* Sync default options with HW
691 * but leave receiver and transmitter disabled.
693 temac_setoptions(ndev,
694 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
696 temac_do_set_mac_address(ndev);
698 /* Set address filter table */
699 temac_set_multicast_list(ndev);
700 if (temac_setoptions(ndev, lp->options))
701 dev_err(&ndev->dev, "Error setting TEMAC options\n");
703 /* Init Driver variable */
704 netif_trans_update(ndev); /* prevent tx timeout */
707 static void temac_adjust_link(struct net_device *ndev)
709 struct temac_local *lp = netdev_priv(ndev);
710 struct phy_device *phy = ndev->phydev;
715 /* hash together the state values to decide if something has changed */
716 link_state = phy->speed | (phy->duplex << 1) | phy->link;
718 if (lp->last_link != link_state) {
719 spin_lock_irqsave(lp->indirect_lock, flags);
720 mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
721 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
723 switch (phy->speed) {
725 mii_speed |= XTE_EMCFG_LINKSPD_1000;
728 mii_speed |= XTE_EMCFG_LINKSPD_100;
731 mii_speed |= XTE_EMCFG_LINKSPD_10;
735 /* Write new speed setting out to TEMAC */
736 temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
737 spin_unlock_irqrestore(lp->indirect_lock, flags);
739 lp->last_link = link_state;
740 phy_print_status(phy);
746 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
748 bd->app3 = (u32)(((u64)p) >> 32);
749 bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
752 static void *ptr_from_txbd(struct cdmac_bd *bd)
754 return (void *)(((u64)(bd->app3) << 32) | bd->app4);
759 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
764 static void *ptr_from_txbd(struct cdmac_bd *bd)
766 return (void *)(bd->app4);
771 static void temac_start_xmit_done(struct net_device *ndev)
773 struct temac_local *lp = netdev_priv(ndev);
774 struct cdmac_bd *cur_p;
775 unsigned int stat = 0;
778 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
779 stat = be32_to_cpu(cur_p->app0);
781 while (stat & STS_CTRL_APP0_CMPLT) {
782 /* Make sure that the other fields are read after bd is
786 dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
787 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
788 skb = (struct sk_buff *)ptr_from_txbd(cur_p);
790 dev_consume_skb_irq(skb);
796 ndev->stats.tx_packets++;
797 ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
799 /* app0 must be visible last, as it is used to flag
800 * availability of the bd
806 if (lp->tx_bd_ci >= lp->tx_bd_num)
809 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
810 stat = be32_to_cpu(cur_p->app0);
813 /* Matches barrier in temac_start_xmit */
816 netif_wake_queue(ndev);
819 static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
821 struct cdmac_bd *cur_p;
824 tail = lp->tx_bd_tail;
825 cur_p = &lp->tx_bd_v[tail];
829 return NETDEV_TX_BUSY;
831 /* Make sure to read next bd app0 after this one */
835 if (tail >= lp->tx_bd_num)
838 cur_p = &lp->tx_bd_v[tail];
840 } while (num_frag >= 0);
846 temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
848 struct temac_local *lp = netdev_priv(ndev);
849 struct cdmac_bd *cur_p;
850 dma_addr_t tail_p, skb_dma_addr;
852 unsigned long num_frag;
855 num_frag = skb_shinfo(skb)->nr_frags;
856 frag = &skb_shinfo(skb)->frags[0];
857 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
859 if (temac_check_tx_bd_space(lp, num_frag + 1)) {
860 if (netif_queue_stopped(ndev))
861 return NETDEV_TX_BUSY;
863 netif_stop_queue(ndev);
865 /* Matches barrier in temac_start_xmit_done */
868 /* Space might have just been freed - check again */
869 if (temac_check_tx_bd_space(lp, num_frag + 1))
870 return NETDEV_TX_BUSY;
872 netif_wake_queue(ndev);
876 if (skb->ip_summed == CHECKSUM_PARTIAL) {
877 unsigned int csum_start_off = skb_checksum_start_offset(skb);
878 unsigned int csum_index_off = csum_start_off + skb->csum_offset;
880 cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
881 cur_p->app1 = cpu_to_be32((csum_start_off << 16)
883 cur_p->app2 = 0; /* initial checksum seed */
886 cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
887 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
888 skb_headlen(skb), DMA_TO_DEVICE);
889 cur_p->len = cpu_to_be32(skb_headlen(skb));
890 if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
891 dev_kfree_skb_any(skb);
892 ndev->stats.tx_dropped++;
895 cur_p->phys = cpu_to_be32(skb_dma_addr);
897 for (ii = 0; ii < num_frag; ii++) {
898 if (++lp->tx_bd_tail >= lp->tx_bd_num)
901 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
902 skb_dma_addr = dma_map_single(ndev->dev.parent,
903 skb_frag_address(frag),
906 if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
907 if (--lp->tx_bd_tail < 0)
908 lp->tx_bd_tail = lp->tx_bd_num - 1;
909 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
912 dma_unmap_single(ndev->dev.parent,
913 be32_to_cpu(cur_p->phys),
916 if (--lp->tx_bd_tail < 0)
917 lp->tx_bd_tail = lp->tx_bd_num - 1;
918 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
920 dma_unmap_single(ndev->dev.parent,
921 be32_to_cpu(cur_p->phys),
922 skb_headlen(skb), DMA_TO_DEVICE);
923 dev_kfree_skb_any(skb);
924 ndev->stats.tx_dropped++;
927 cur_p->phys = cpu_to_be32(skb_dma_addr);
928 cur_p->len = cpu_to_be32(skb_frag_size(frag));
932 cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
934 /* Mark last fragment with skb address, so it can be consumed
935 * in temac_start_xmit_done()
937 ptr_to_txbd((void *)skb, cur_p);
939 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
941 if (lp->tx_bd_tail >= lp->tx_bd_num)
944 skb_tx_timestamp(skb);
946 /* Kick off the transfer */
948 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
950 if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
951 netif_stop_queue(ndev);
956 static int ll_temac_recv_buffers_available(struct temac_local *lp)
960 if (!lp->rx_skb[lp->rx_bd_ci])
962 available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
964 available += lp->rx_bd_num;
968 static void ll_temac_recv(struct net_device *ndev)
970 struct temac_local *lp = netdev_priv(ndev);
973 bool update_tail = false;
975 spin_lock_irqsave(&lp->rx_lock, flags);
977 /* Process all received buffers, passing them on network
978 * stack. After this, the buffer descriptors will be in an
979 * un-allocated stage, where no skb is allocated for it, and
980 * they are therefore not available for TEMAC/DMA.
983 struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
984 struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
985 unsigned int bdstat = be32_to_cpu(bd->app0);
988 /* While this should not normally happen, we can end
989 * here when GFP_ATOMIC allocations fail, and we
990 * therefore have un-allocated buffers.
995 /* Loop over all completed buffer descriptors */
996 if (!(bdstat & STS_CTRL_APP0_CMPLT))
999 dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
1000 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
1001 /* The buffer is not valid for DMA anymore */
1005 length = be32_to_cpu(bd->app4) & 0x3FFF;
1006 skb_put(skb, length);
1007 skb->protocol = eth_type_trans(skb, ndev);
1008 skb_checksum_none_assert(skb);
1010 /* if we're doing rx csum offload, set it up */
1011 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
1012 (skb->protocol == htons(ETH_P_IP)) &&
1014 /* Convert from device endianness (be32) to cpu
1015 * endianness, and if necessary swap the bytes
1016 * (back) for proper IP checksum byte order
1019 skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
1020 skb->ip_summed = CHECKSUM_COMPLETE;
1023 if (!skb_defer_rx_timestamp(skb))
1025 /* The skb buffer is now owned by network stack above */
1026 lp->rx_skb[lp->rx_bd_ci] = NULL;
1028 ndev->stats.rx_packets++;
1029 ndev->stats.rx_bytes += length;
1031 rx_bd = lp->rx_bd_ci;
1032 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1034 } while (rx_bd != lp->rx_bd_tail);
1036 /* DMA operations will halt when the last buffer descriptor is
1037 * processed (ie. the one pointed to by RX_TAILDESC_PTR).
1038 * When that happens, no more interrupt events will be
1039 * generated. No IRQ_COAL or IRQ_DLY, and not even an
1040 * IRQ_ERR. To avoid stalling, we schedule a delayed work
1041 * when there is a potential risk of that happening. The work
1042 * will call this function, and thus re-schedule itself until
1043 * enough buffers are available again.
1045 if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
1046 schedule_delayed_work(&lp->restart_work, HZ / 1000);
1048 /* Allocate new buffers for those buffer descriptors that were
1049 * passed to network stack. Note that GFP_ATOMIC allocations
1050 * can fail (e.g. when a larger burst of GFP_ATOMIC
1051 * allocations occurs), so while we try to allocate all
1052 * buffers in the same interrupt where they were processed, we
1053 * continue with what we could get in case of allocation
1054 * failure. Allocation of remaining buffers will be retried
1055 * in following calls.
1058 struct sk_buff *skb;
1059 struct cdmac_bd *bd;
1060 dma_addr_t skb_dma_addr;
1062 rx_bd = lp->rx_bd_tail + 1;
1063 if (rx_bd >= lp->rx_bd_num)
1065 bd = &lp->rx_bd_v[rx_bd];
1068 break; /* All skb's allocated */
1070 skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
1072 dev_warn(&ndev->dev, "skb alloc failed\n");
1076 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
1077 XTE_MAX_JUMBO_FRAME_SIZE,
1079 if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
1081 dev_kfree_skb_any(skb);
1085 bd->phys = cpu_to_be32(skb_dma_addr);
1086 bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
1087 bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
1088 lp->rx_skb[rx_bd] = skb;
1090 lp->rx_bd_tail = rx_bd;
1094 /* Move tail pointer when buffers have been allocated */
1096 lp->dma_out(lp, RX_TAILDESC_PTR,
1097 lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
1100 spin_unlock_irqrestore(&lp->rx_lock, flags);
1103 /* Function scheduled to ensure a restart in case of DMA halt
1104 * condition caused by running out of buffer descriptors.
1106 static void ll_temac_restart_work_func(struct work_struct *work)
1108 struct temac_local *lp = container_of(work, struct temac_local,
1110 struct net_device *ndev = lp->ndev;
1112 ll_temac_recv(ndev);
1115 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
1117 struct net_device *ndev = _ndev;
1118 struct temac_local *lp = netdev_priv(ndev);
1119 unsigned int status;
1121 status = lp->dma_in(lp, TX_IRQ_REG);
1122 lp->dma_out(lp, TX_IRQ_REG, status);
1124 if (status & (IRQ_COAL | IRQ_DLY))
1125 temac_start_xmit_done(lp->ndev);
1126 if (status & (IRQ_ERR | IRQ_DMAERR))
1127 dev_err_ratelimited(&ndev->dev,
1128 "TX error 0x%x TX_CHNL_STS=0x%08x\n",
1129 status, lp->dma_in(lp, TX_CHNL_STS));
1134 static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
1136 struct net_device *ndev = _ndev;
1137 struct temac_local *lp = netdev_priv(ndev);
1138 unsigned int status;
1140 /* Read and clear the status registers */
1141 status = lp->dma_in(lp, RX_IRQ_REG);
1142 lp->dma_out(lp, RX_IRQ_REG, status);
1144 if (status & (IRQ_COAL | IRQ_DLY))
1145 ll_temac_recv(lp->ndev);
1146 if (status & (IRQ_ERR | IRQ_DMAERR))
1147 dev_err_ratelimited(&ndev->dev,
1148 "RX error 0x%x RX_CHNL_STS=0x%08x\n",
1149 status, lp->dma_in(lp, RX_CHNL_STS));
1154 static int temac_open(struct net_device *ndev)
1156 struct temac_local *lp = netdev_priv(ndev);
1157 struct phy_device *phydev = NULL;
1160 dev_dbg(&ndev->dev, "temac_open()\n");
1163 phydev = of_phy_connect(lp->ndev, lp->phy_node,
1164 temac_adjust_link, 0, 0);
1166 dev_err(lp->dev, "of_phy_connect() failed\n");
1170 } else if (strlen(lp->phy_name) > 0) {
1171 phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
1173 if (IS_ERR(phydev)) {
1174 dev_err(lp->dev, "phy_connect() failed\n");
1175 return PTR_ERR(phydev);
1180 temac_device_reset(ndev);
1182 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
1185 rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
1192 free_irq(lp->tx_irq, ndev);
1195 phy_disconnect(phydev);
1196 dev_err(lp->dev, "request_irq() failed\n");
1200 static int temac_stop(struct net_device *ndev)
1202 struct temac_local *lp = netdev_priv(ndev);
1203 struct phy_device *phydev = ndev->phydev;
1205 dev_dbg(&ndev->dev, "temac_close()\n");
1207 cancel_delayed_work_sync(&lp->restart_work);
1209 free_irq(lp->tx_irq, ndev);
1210 free_irq(lp->rx_irq, ndev);
1213 phy_disconnect(phydev);
1215 temac_dma_bd_release(ndev);
1220 #ifdef CONFIG_NET_POLL_CONTROLLER
1222 temac_poll_controller(struct net_device *ndev)
1224 struct temac_local *lp = netdev_priv(ndev);
1226 disable_irq(lp->tx_irq);
1227 disable_irq(lp->rx_irq);
1229 ll_temac_rx_irq(lp->tx_irq, ndev);
1230 ll_temac_tx_irq(lp->rx_irq, ndev);
1232 enable_irq(lp->tx_irq);
1233 enable_irq(lp->rx_irq);
1237 static const struct net_device_ops temac_netdev_ops = {
1238 .ndo_open = temac_open,
1239 .ndo_stop = temac_stop,
1240 .ndo_start_xmit = temac_start_xmit,
1241 .ndo_set_rx_mode = temac_set_multicast_list,
1242 .ndo_set_mac_address = temac_set_mac_address,
1243 .ndo_validate_addr = eth_validate_addr,
1244 .ndo_eth_ioctl = phy_do_ioctl_running,
1245 #ifdef CONFIG_NET_POLL_CONTROLLER
1246 .ndo_poll_controller = temac_poll_controller,
1250 /* ---------------------------------------------------------------------
1251 * SYSFS device attributes
1253 static ssize_t temac_show_llink_regs(struct device *dev,
1254 struct device_attribute *attr, char *buf)
1256 struct net_device *ndev = dev_get_drvdata(dev);
1257 struct temac_local *lp = netdev_priv(ndev);
1260 for (i = 0; i < 0x11; i++)
1261 len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
1262 (i % 8) == 7 ? "\n" : " ");
1263 len += sprintf(buf + len, "\n");
1268 static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
1270 static struct attribute *temac_device_attrs[] = {
1271 &dev_attr_llink_regs.attr,
1275 static const struct attribute_group temac_attr_group = {
1276 .attrs = temac_device_attrs,
1279 /* ---------------------------------------------------------------------
1284 ll_temac_ethtools_get_ringparam(struct net_device *ndev,
1285 struct ethtool_ringparam *ering,
1286 struct kernel_ethtool_ringparam *kernel_ering,
1287 struct netlink_ext_ack *extack)
1289 struct temac_local *lp = netdev_priv(ndev);
1291 ering->rx_max_pending = RX_BD_NUM_MAX;
1292 ering->rx_mini_max_pending = 0;
1293 ering->rx_jumbo_max_pending = 0;
1294 ering->tx_max_pending = TX_BD_NUM_MAX;
1295 ering->rx_pending = lp->rx_bd_num;
1296 ering->rx_mini_pending = 0;
1297 ering->rx_jumbo_pending = 0;
1298 ering->tx_pending = lp->tx_bd_num;
1302 ll_temac_ethtools_set_ringparam(struct net_device *ndev,
1303 struct ethtool_ringparam *ering,
1304 struct kernel_ethtool_ringparam *kernel_ering,
1305 struct netlink_ext_ack *extack)
1307 struct temac_local *lp = netdev_priv(ndev);
1309 if (ering->rx_pending > RX_BD_NUM_MAX ||
1310 ering->rx_mini_pending ||
1311 ering->rx_jumbo_pending ||
1312 ering->rx_pending > TX_BD_NUM_MAX)
1315 if (netif_running(ndev))
1318 lp->rx_bd_num = ering->rx_pending;
1319 lp->tx_bd_num = ering->tx_pending;
1324 ll_temac_ethtools_get_coalesce(struct net_device *ndev,
1325 struct ethtool_coalesce *ec,
1326 struct kernel_ethtool_coalesce *kernel_coal,
1327 struct netlink_ext_ack *extack)
1329 struct temac_local *lp = netdev_priv(ndev);
1331 ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
1332 ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
1333 ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
1334 ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
1339 ll_temac_ethtools_set_coalesce(struct net_device *ndev,
1340 struct ethtool_coalesce *ec,
1341 struct kernel_ethtool_coalesce *kernel_coal,
1342 struct netlink_ext_ack *extack)
1344 struct temac_local *lp = netdev_priv(ndev);
1346 if (netif_running(ndev)) {
1348 "Please stop netif before applying configuration\n");
1352 if (ec->rx_max_coalesced_frames)
1353 lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
1354 if (ec->tx_max_coalesced_frames)
1355 lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
1356 /* With typical LocalLink clock speed of 200 MHz and
1357 * C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
1359 if (ec->rx_coalesce_usecs)
1360 lp->coalesce_delay_rx =
1361 min(255U, (ec->rx_coalesce_usecs * 100) / 512);
1362 if (ec->tx_coalesce_usecs)
1363 lp->coalesce_delay_tx =
1364 min(255U, (ec->tx_coalesce_usecs * 100) / 512);
1369 static const struct ethtool_ops temac_ethtool_ops = {
1370 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1371 ETHTOOL_COALESCE_MAX_FRAMES,
1372 .nway_reset = phy_ethtool_nway_reset,
1373 .get_link = ethtool_op_get_link,
1374 .get_ts_info = ethtool_op_get_ts_info,
1375 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1376 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1377 .get_ringparam = ll_temac_ethtools_get_ringparam,
1378 .set_ringparam = ll_temac_ethtools_set_ringparam,
1379 .get_coalesce = ll_temac_ethtools_get_coalesce,
1380 .set_coalesce = ll_temac_ethtools_set_coalesce,
1383 static int temac_probe(struct platform_device *pdev)
1385 struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1386 struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
1387 struct temac_local *lp;
1388 struct net_device *ndev;
1394 /* Init network device structure */
1395 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
1399 platform_set_drvdata(pdev, ndev);
1400 SET_NETDEV_DEV(ndev, &pdev->dev);
1401 ndev->features = NETIF_F_SG;
1402 ndev->netdev_ops = &temac_netdev_ops;
1403 ndev->ethtool_ops = &temac_ethtool_ops;
1405 ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
1406 ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
1407 ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
1408 ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
1409 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
1410 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
1411 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
1412 ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
1413 ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
1414 ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
1415 ndev->features |= NETIF_F_LRO; /* large receive offload */
1418 /* setup temac private info structure */
1419 lp = netdev_priv(ndev);
1421 lp->dev = &pdev->dev;
1422 lp->options = XTE_OPTION_DEFAULTS;
1423 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1424 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1425 spin_lock_init(&lp->rx_lock);
1426 INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
1428 /* Setup mutex for synchronization of indirect register access */
1430 if (!pdata->indirect_lock) {
1432 "indirect_lock missing in platform_data\n");
1435 lp->indirect_lock = pdata->indirect_lock;
1437 lp->indirect_lock = devm_kmalloc(&pdev->dev,
1438 sizeof(*lp->indirect_lock),
1440 if (!lp->indirect_lock)
1442 spin_lock_init(lp->indirect_lock);
1445 /* map device registers */
1446 lp->regs = devm_platform_ioremap_resource(pdev, 0);
1447 if (IS_ERR(lp->regs)) {
1448 dev_err(&pdev->dev, "could not map TEMAC registers\n");
1452 /* Select register access functions with the specified
1453 * endianness mode. Default for OF devices is big-endian.
1455 little_endian = false;
1457 little_endian = of_property_read_bool(temac_np, "little-endian");
1459 little_endian = pdata->reg_little_endian;
1461 if (little_endian) {
1462 lp->temac_ior = _temac_ior_le;
1463 lp->temac_iow = _temac_iow_le;
1465 lp->temac_ior = _temac_ior_be;
1466 lp->temac_iow = _temac_iow_be;
1469 /* Setup checksum offload, but default to off if not specified */
1470 lp->temac_features = 0;
1472 p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
1473 if (p && be32_to_cpu(*p))
1474 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1475 p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
1476 if (p && be32_to_cpu(*p))
1477 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1480 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1482 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1484 if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
1485 /* Can checksum TCP/UDP over IPv4. */
1486 ndev->features |= NETIF_F_IP_CSUM;
1488 /* Defaults for IRQ delay/coalescing setup. These are
1489 * configuration values, so does not belong in device-tree.
1491 lp->coalesce_delay_tx = 0x10;
1492 lp->coalesce_count_tx = 0x22;
1493 lp->coalesce_delay_rx = 0xff;
1494 lp->coalesce_count_rx = 0x07;
1496 /* Setup LocalLink DMA */
1498 /* Find the DMA node, map the DMA registers, and
1499 * decode the DMA IRQs.
1501 dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
1503 dev_err(&pdev->dev, "could not find DMA node\n");
1507 /* Setup the DMA register accesses, could be DCR or
1510 if (temac_dcr_setup(lp, pdev, dma_np)) {
1511 /* no DCR in the device tree, try non-DCR */
1512 lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
1514 if (IS_ERR(lp->sdma_regs)) {
1516 "unable to map DMA registers\n");
1517 of_node_put(dma_np);
1518 return PTR_ERR(lp->sdma_regs);
1520 if (of_property_read_bool(dma_np, "little-endian")) {
1521 lp->dma_in = temac_dma_in32_le;
1522 lp->dma_out = temac_dma_out32_le;
1524 lp->dma_in = temac_dma_in32_be;
1525 lp->dma_out = temac_dma_out32_be;
1527 dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1530 /* Get DMA RX and TX interrupts */
1531 lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
1532 lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1534 /* Finished with the DMA node; drop the reference */
1535 of_node_put(dma_np);
1537 /* 2nd memory resource specifies DMA registers */
1538 lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
1539 if (IS_ERR(lp->sdma_regs)) {
1541 "could not map DMA registers\n");
1542 return PTR_ERR(lp->sdma_regs);
1544 if (pdata->dma_little_endian) {
1545 lp->dma_in = temac_dma_in32_le;
1546 lp->dma_out = temac_dma_out32_le;
1548 lp->dma_in = temac_dma_in32_be;
1549 lp->dma_out = temac_dma_out32_be;
1552 /* Get DMA RX and TX interrupts */
1553 lp->rx_irq = platform_get_irq(pdev, 0);
1554 lp->tx_irq = platform_get_irq(pdev, 1);
1556 /* IRQ delay/coalescing setup */
1557 if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
1558 lp->coalesce_delay_tx = pdata->tx_irq_timeout;
1559 lp->coalesce_count_tx = pdata->tx_irq_count;
1561 if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
1562 lp->coalesce_delay_rx = pdata->rx_irq_timeout;
1563 lp->coalesce_count_rx = pdata->rx_irq_count;
1567 /* Error handle returned DMA RX and TX interrupts */
1568 if (lp->rx_irq <= 0) {
1569 rc = lp->rx_irq ?: -EINVAL;
1570 return dev_err_probe(&pdev->dev, rc,
1571 "could not get DMA RX irq\n");
1573 if (lp->tx_irq <= 0) {
1574 rc = lp->tx_irq ?: -EINVAL;
1575 return dev_err_probe(&pdev->dev, rc,
1576 "could not get DMA TX irq\n");
1580 /* Retrieve the MAC address */
1581 rc = of_get_mac_address(temac_np, addr);
1583 dev_err(&pdev->dev, "could not find MAC address\n");
1586 temac_init_mac_address(ndev, addr);
1588 temac_init_mac_address(ndev, pdata->mac_addr);
1591 rc = temac_mdio_setup(lp, pdev);
1593 dev_warn(&pdev->dev, "error registering MDIO bus\n");
1596 lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1598 dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
1600 snprintf(lp->phy_name, sizeof(lp->phy_name),
1601 PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
1602 lp->phy_interface = pdata->phy_interface;
1605 /* Add the device attributes */
1606 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1608 dev_err(lp->dev, "Error creating sysfs files\n");
1609 goto err_sysfs_create;
1612 rc = register_netdev(lp->ndev);
1614 dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
1615 goto err_register_ndev;
1621 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1624 of_node_put(lp->phy_node);
1625 temac_mdio_teardown(lp);
1629 static void temac_remove(struct platform_device *pdev)
1631 struct net_device *ndev = platform_get_drvdata(pdev);
1632 struct temac_local *lp = netdev_priv(ndev);
1634 unregister_netdev(ndev);
1635 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1637 of_node_put(lp->phy_node);
1638 temac_mdio_teardown(lp);
1641 static const struct of_device_id temac_of_match[] = {
1642 { .compatible = "xlnx,xps-ll-temac-1.01.b", },
1643 { .compatible = "xlnx,xps-ll-temac-2.00.a", },
1644 { .compatible = "xlnx,xps-ll-temac-2.02.a", },
1645 { .compatible = "xlnx,xps-ll-temac-2.03.a", },
1648 MODULE_DEVICE_TABLE(of, temac_of_match);
1650 static struct platform_driver temac_driver = {
1651 .probe = temac_probe,
1652 .remove_new = temac_remove,
1654 .name = "xilinx_temac",
1655 .of_match_table = temac_of_match,
1659 module_platform_driver(temac_driver);
1661 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1662 MODULE_AUTHOR("Yoshio Kashiwagi");
1663 MODULE_LICENSE("GPL");