2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/module.h>
118 #include <linux/spinlock.h>
119 #include <linux/tcp.h>
120 #include <linux/if_vlan.h>
121 #include <linux/interrupt.h>
122 #include <net/busy_poll.h>
123 #include <linux/clk.h>
124 #include <linux/if_ether.h>
125 #include <linux/net_tstamp.h>
126 #include <linux/phy.h>
127 #include <net/vxlan.h>
130 #include "xgbe-common.h"
132 static unsigned int ecc_sec_info_threshold = 10;
133 static unsigned int ecc_sec_warn_threshold = 10000;
134 static unsigned int ecc_sec_period = 600;
135 static unsigned int ecc_ded_threshold = 2;
136 static unsigned int ecc_ded_period = 600;
138 #ifdef CONFIG_AMD_XGBE_HAVE_ECC
139 /* Only expose the ECC parameters if supported */
140 module_param(ecc_sec_info_threshold, uint, 0644);
141 MODULE_PARM_DESC(ecc_sec_info_threshold,
142 " ECC corrected error informational threshold setting");
144 module_param(ecc_sec_warn_threshold, uint, 0644);
145 MODULE_PARM_DESC(ecc_sec_warn_threshold,
146 " ECC corrected error warning threshold setting");
148 module_param(ecc_sec_period, uint, 0644);
149 MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)");
151 module_param(ecc_ded_threshold, uint, 0644);
152 MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting");
154 module_param(ecc_ded_period, uint, 0644);
155 MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)");
158 static int xgbe_one_poll(struct napi_struct *, int);
159 static int xgbe_all_poll(struct napi_struct *, int);
160 static void xgbe_stop(struct xgbe_prv_data *);
162 static void *xgbe_alloc_node(size_t size, int node)
166 mem = kzalloc_node(size, GFP_KERNEL, node);
168 mem = kzalloc(size, GFP_KERNEL);
173 static void xgbe_free_channels(struct xgbe_prv_data *pdata)
177 for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) {
178 if (!pdata->channel[i])
181 kfree(pdata->channel[i]->rx_ring);
182 kfree(pdata->channel[i]->tx_ring);
183 kfree(pdata->channel[i]);
185 pdata->channel[i] = NULL;
188 pdata->channel_count = 0;
191 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
193 struct xgbe_channel *channel;
194 struct xgbe_ring *ring;
195 unsigned int count, i;
199 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
200 for (i = 0; i < count; i++) {
201 /* Attempt to use a CPU on the node the device is on */
202 cpu = cpumask_local_spread(i, dev_to_node(pdata->dev));
204 /* Set the allocation node based on the returned CPU */
205 node = cpu_to_node(cpu);
207 channel = xgbe_alloc_node(sizeof(*channel), node);
210 pdata->channel[i] = channel;
212 snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
213 channel->pdata = pdata;
214 channel->queue_index = i;
215 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
217 channel->node = node;
218 cpumask_set_cpu(cpu, &channel->affinity_mask);
220 if (pdata->per_channel_irq)
221 channel->dma_irq = pdata->channel_irq[i];
223 if (i < pdata->tx_ring_count) {
224 ring = xgbe_alloc_node(sizeof(*ring), node);
228 spin_lock_init(&ring->lock);
231 channel->tx_ring = ring;
234 if (i < pdata->rx_ring_count) {
235 ring = xgbe_alloc_node(sizeof(*ring), node);
239 spin_lock_init(&ring->lock);
242 channel->rx_ring = ring;
245 netif_dbg(pdata, drv, pdata->netdev,
246 "%s: cpu=%u, node=%d\n", channel->name, cpu, node);
248 netif_dbg(pdata, drv, pdata->netdev,
249 "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
250 channel->name, channel->dma_regs, channel->dma_irq,
251 channel->tx_ring, channel->rx_ring);
254 pdata->channel_count = count;
259 xgbe_free_channels(pdata);
264 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
266 return (ring->rdesc_count - (ring->cur - ring->dirty));
269 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
271 return (ring->cur - ring->dirty);
274 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
275 struct xgbe_ring *ring, unsigned int count)
277 struct xgbe_prv_data *pdata = channel->pdata;
279 if (count > xgbe_tx_avail_desc(ring)) {
280 netif_info(pdata, drv, pdata->netdev,
281 "Tx queue stopped, not enough descriptors available\n");
282 netif_stop_subqueue(pdata->netdev, channel->queue_index);
283 ring->tx.queue_stopped = 1;
285 /* If we haven't notified the hardware because of xmit_more
286 * support, tell it now
288 if (ring->tx.xmit_more)
289 pdata->hw_if.tx_start_xmit(channel, ring);
291 return NETDEV_TX_BUSY;
297 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
299 unsigned int rx_buf_size;
301 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
302 rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
304 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
305 ~(XGBE_RX_BUF_ALIGN - 1);
310 static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
311 struct xgbe_channel *channel)
313 struct xgbe_hw_if *hw_if = &pdata->hw_if;
314 enum xgbe_int int_id;
316 if (channel->tx_ring && channel->rx_ring)
317 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
318 else if (channel->tx_ring)
319 int_id = XGMAC_INT_DMA_CH_SR_TI;
320 else if (channel->rx_ring)
321 int_id = XGMAC_INT_DMA_CH_SR_RI;
325 hw_if->enable_int(channel, int_id);
328 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
332 for (i = 0; i < pdata->channel_count; i++)
333 xgbe_enable_rx_tx_int(pdata, pdata->channel[i]);
336 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
337 struct xgbe_channel *channel)
339 struct xgbe_hw_if *hw_if = &pdata->hw_if;
340 enum xgbe_int int_id;
342 if (channel->tx_ring && channel->rx_ring)
343 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
344 else if (channel->tx_ring)
345 int_id = XGMAC_INT_DMA_CH_SR_TI;
346 else if (channel->rx_ring)
347 int_id = XGMAC_INT_DMA_CH_SR_RI;
351 hw_if->disable_int(channel, int_id);
354 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
358 for (i = 0; i < pdata->channel_count; i++)
359 xgbe_disable_rx_tx_int(pdata, pdata->channel[i]);
362 static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
363 unsigned int *count, const char *area)
365 if (time_before(jiffies, *period)) {
368 *period = jiffies + (ecc_sec_period * HZ);
372 if (*count > ecc_sec_info_threshold)
373 dev_warn_once(pdata->dev,
374 "%s ECC corrected errors exceed informational threshold\n",
377 if (*count > ecc_sec_warn_threshold) {
378 dev_warn_once(pdata->dev,
379 "%s ECC corrected errors exceed warning threshold\n",
387 static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
388 unsigned int *count, const char *area)
390 if (time_before(jiffies, *period)) {
393 *period = jiffies + (ecc_ded_period * HZ);
397 if (*count > ecc_ded_threshold) {
398 netdev_alert(pdata->netdev,
399 "%s ECC detected errors exceed threshold\n",
407 static void xgbe_ecc_isr_task(unsigned long data)
409 struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
410 unsigned int ecc_isr;
413 /* Mask status with only the interrupts we care about */
414 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
415 ecc_isr &= XP_IOREAD(pdata, XP_ECC_IER);
416 netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr);
418 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_DED)) {
419 stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period,
420 &pdata->tx_ded_count, "TX fifo");
423 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_DED)) {
424 stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period,
425 &pdata->rx_ded_count, "RX fifo");
428 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_DED)) {
429 stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period,
430 &pdata->desc_ded_count,
435 pdata->hw_if.disable_ecc_ded(pdata);
436 schedule_work(&pdata->stopdev_work);
440 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_SEC)) {
441 if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period,
442 &pdata->tx_sec_count, "TX fifo"))
443 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX);
446 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_SEC))
447 if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period,
448 &pdata->rx_sec_count, "RX fifo"))
449 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX);
451 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_SEC))
452 if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period,
453 &pdata->desc_sec_count, "descriptor cache"))
454 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC);
457 /* Clear all ECC interrupts */
458 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
460 /* Reissue interrupt if status is not clear */
461 if (pdata->vdata->irq_reissue_support)
462 XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 1);
465 static irqreturn_t xgbe_ecc_isr(int irq, void *data)
467 struct xgbe_prv_data *pdata = data;
469 if (pdata->isr_as_tasklet)
470 tasklet_schedule(&pdata->tasklet_ecc);
472 xgbe_ecc_isr_task((unsigned long)pdata);
477 static void xgbe_isr_task(unsigned long data)
479 struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
480 struct xgbe_hw_if *hw_if = &pdata->hw_if;
481 struct xgbe_channel *channel;
482 unsigned int dma_isr, dma_ch_isr;
483 unsigned int mac_isr, mac_tssr, mac_mdioisr;
486 /* The DMA interrupt status register also reports MAC and MTL
487 * interrupts. So for polling mode, we just need to check for
488 * this register to be non-zero
490 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
494 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
496 for (i = 0; i < pdata->channel_count; i++) {
497 if (!(dma_isr & (1 << i)))
500 channel = pdata->channel[i];
502 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
503 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
506 /* The TI or RI interrupt bits may still be set even if using
507 * per channel DMA interrupts. Check to be sure those are not
508 * enabled before using the private data napi structure.
510 if (!pdata->per_channel_irq &&
511 (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
512 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
513 if (napi_schedule_prep(&pdata->napi)) {
514 /* Disable Tx and Rx interrupts */
515 xgbe_disable_rx_tx_ints(pdata);
517 /* Turn on polling */
518 __napi_schedule(&pdata->napi);
521 /* Don't clear Rx/Tx status if doing per channel DMA
522 * interrupts, these will be cleared by the ISR for
523 * per channel DMA interrupts.
525 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
526 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
529 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
530 pdata->ext_stats.rx_buffer_unavailable++;
532 /* Restart the device on a Fatal Bus Error */
533 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
534 schedule_work(&pdata->restart_work);
536 /* Clear interrupt signals */
537 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
540 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
541 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
543 netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n",
546 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
547 hw_if->tx_mmc_int(pdata);
549 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
550 hw_if->rx_mmc_int(pdata);
552 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
553 mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
555 netif_dbg(pdata, intr, pdata->netdev,
556 "MAC_TSSR=%#010x\n", mac_tssr);
558 if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
559 /* Read Tx Timestamp to clear interrupt */
561 hw_if->get_tx_tstamp(pdata);
562 queue_work(pdata->dev_workqueue,
563 &pdata->tx_tstamp_work);
567 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) {
568 mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
570 netif_dbg(pdata, intr, pdata->netdev,
571 "MAC_MDIOISR=%#010x\n", mac_mdioisr);
573 if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR,
575 complete(&pdata->mdio_complete);
580 /* If there is not a separate AN irq, handle it here */
581 if (pdata->dev_irq == pdata->an_irq)
582 pdata->phy_if.an_isr(pdata);
584 /* If there is not a separate ECC irq, handle it here */
585 if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
586 xgbe_ecc_isr_task((unsigned long)pdata);
588 /* If there is not a separate I2C irq, handle it here */
589 if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
590 pdata->i2c_if.i2c_isr(pdata);
592 /* Reissue interrupt if status is not clear */
593 if (pdata->vdata->irq_reissue_support) {
594 unsigned int reissue_mask;
596 reissue_mask = 1 << 0;
597 if (!pdata->per_channel_irq)
598 reissue_mask |= 0xffff << 4;
600 XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
604 static irqreturn_t xgbe_isr(int irq, void *data)
606 struct xgbe_prv_data *pdata = data;
608 if (pdata->isr_as_tasklet)
609 tasklet_schedule(&pdata->tasklet_dev);
611 xgbe_isr_task((unsigned long)pdata);
616 static irqreturn_t xgbe_dma_isr(int irq, void *data)
618 struct xgbe_channel *channel = data;
619 struct xgbe_prv_data *pdata = channel->pdata;
620 unsigned int dma_status;
622 /* Per channel DMA interrupts are enabled, so we use the per
623 * channel napi structure and not the private data napi structure
625 if (napi_schedule_prep(&channel->napi)) {
626 /* Disable Tx and Rx interrupts */
627 if (pdata->channel_irq_mode)
628 xgbe_disable_rx_tx_int(pdata, channel);
630 disable_irq_nosync(channel->dma_irq);
632 /* Turn on polling */
633 __napi_schedule_irqoff(&channel->napi);
636 /* Clear Tx/Rx signals */
638 XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
639 XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
640 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
645 static void xgbe_tx_timer(struct timer_list *t)
647 struct xgbe_channel *channel = from_timer(channel, t, tx_timer);
648 struct xgbe_prv_data *pdata = channel->pdata;
649 struct napi_struct *napi;
651 DBGPR("-->xgbe_tx_timer\n");
653 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
655 if (napi_schedule_prep(napi)) {
656 /* Disable Tx and Rx interrupts */
657 if (pdata->per_channel_irq)
658 if (pdata->channel_irq_mode)
659 xgbe_disable_rx_tx_int(pdata, channel);
661 disable_irq_nosync(channel->dma_irq);
663 xgbe_disable_rx_tx_ints(pdata);
665 /* Turn on polling */
666 __napi_schedule(napi);
669 channel->tx_timer_active = 0;
671 DBGPR("<--xgbe_tx_timer\n");
674 static void xgbe_service(struct work_struct *work)
676 struct xgbe_prv_data *pdata = container_of(work,
677 struct xgbe_prv_data,
680 pdata->phy_if.phy_status(pdata);
683 static void xgbe_service_timer(struct timer_list *t)
685 struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
686 struct xgbe_channel *channel;
689 queue_work(pdata->dev_workqueue, &pdata->service_work);
691 mod_timer(&pdata->service_timer, jiffies + HZ);
693 if (!pdata->tx_usecs)
696 for (i = 0; i < pdata->channel_count; i++) {
697 channel = pdata->channel[i];
698 if (!channel->tx_ring || channel->tx_timer_active)
700 channel->tx_timer_active = 1;
701 mod_timer(&channel->tx_timer,
702 jiffies + usecs_to_jiffies(pdata->tx_usecs));
706 static void xgbe_init_timers(struct xgbe_prv_data *pdata)
708 struct xgbe_channel *channel;
711 timer_setup(&pdata->service_timer, xgbe_service_timer, 0);
713 for (i = 0; i < pdata->channel_count; i++) {
714 channel = pdata->channel[i];
715 if (!channel->tx_ring)
718 timer_setup(&channel->tx_timer, xgbe_tx_timer, 0);
722 static void xgbe_start_timers(struct xgbe_prv_data *pdata)
724 mod_timer(&pdata->service_timer, jiffies + HZ);
727 static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
729 struct xgbe_channel *channel;
732 del_timer_sync(&pdata->service_timer);
734 for (i = 0; i < pdata->channel_count; i++) {
735 channel = pdata->channel[i];
736 if (!channel->tx_ring)
739 /* Deactivate the Tx timer */
740 del_timer_sync(&channel->tx_timer);
741 channel->tx_timer_active = 0;
745 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
747 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
748 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
750 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
751 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
752 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
754 memset(hw_feat, 0, sizeof(*hw_feat));
756 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
758 /* Hardware feature register 0 */
759 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
760 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
761 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
762 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
763 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
764 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
765 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
766 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
767 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
768 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
769 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
770 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
772 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
773 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
774 hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN);
776 /* Hardware feature register 1 */
777 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
779 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
781 hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
782 hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
783 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
784 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
785 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
786 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
787 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
788 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
789 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
791 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
794 /* Hardware feature register 2 */
795 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
796 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
797 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
798 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
799 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
800 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
802 /* Translate the Hash Table size into actual number */
803 switch (hw_feat->hash_table_size) {
807 hw_feat->hash_table_size = 64;
810 hw_feat->hash_table_size = 128;
813 hw_feat->hash_table_size = 256;
817 /* Translate the address width setting into actual number */
818 switch (hw_feat->dma_width) {
820 hw_feat->dma_width = 32;
823 hw_feat->dma_width = 40;
826 hw_feat->dma_width = 48;
829 hw_feat->dma_width = 32;
832 /* The Queue, Channel and TC counts are zero based so increment them
833 * to get the actual number
837 hw_feat->rx_ch_cnt++;
838 hw_feat->tx_ch_cnt++;
841 /* Translate the fifo sizes into actual numbers */
842 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
843 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
845 if (netif_msg_probe(pdata)) {
846 dev_dbg(pdata->dev, "Hardware features:\n");
848 /* Hardware feature register 0 */
849 dev_dbg(pdata->dev, " 1GbE support : %s\n",
850 hw_feat->gmii ? "yes" : "no");
851 dev_dbg(pdata->dev, " VLAN hash filter : %s\n",
852 hw_feat->vlhash ? "yes" : "no");
853 dev_dbg(pdata->dev, " MDIO interface : %s\n",
854 hw_feat->sma ? "yes" : "no");
855 dev_dbg(pdata->dev, " Wake-up packet support : %s\n",
856 hw_feat->rwk ? "yes" : "no");
857 dev_dbg(pdata->dev, " Magic packet support : %s\n",
858 hw_feat->mgk ? "yes" : "no");
859 dev_dbg(pdata->dev, " Management counters : %s\n",
860 hw_feat->mmc ? "yes" : "no");
861 dev_dbg(pdata->dev, " ARP offload : %s\n",
862 hw_feat->aoe ? "yes" : "no");
863 dev_dbg(pdata->dev, " IEEE 1588-2008 Timestamp : %s\n",
864 hw_feat->ts ? "yes" : "no");
865 dev_dbg(pdata->dev, " Energy Efficient Ethernet : %s\n",
866 hw_feat->eee ? "yes" : "no");
867 dev_dbg(pdata->dev, " TX checksum offload : %s\n",
868 hw_feat->tx_coe ? "yes" : "no");
869 dev_dbg(pdata->dev, " RX checksum offload : %s\n",
870 hw_feat->rx_coe ? "yes" : "no");
871 dev_dbg(pdata->dev, " Additional MAC addresses : %u\n",
873 dev_dbg(pdata->dev, " Timestamp source : %s\n",
874 (hw_feat->ts_src == 1) ? "internal" :
875 (hw_feat->ts_src == 2) ? "external" :
876 (hw_feat->ts_src == 3) ? "internal/external" : "n/a");
877 dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n",
878 hw_feat->sa_vlan_ins ? "yes" : "no");
879 dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n",
880 hw_feat->vxn ? "yes" : "no");
882 /* Hardware feature register 1 */
883 dev_dbg(pdata->dev, " RX fifo size : %u\n",
884 hw_feat->rx_fifo_size);
885 dev_dbg(pdata->dev, " TX fifo size : %u\n",
886 hw_feat->tx_fifo_size);
887 dev_dbg(pdata->dev, " IEEE 1588 high word : %s\n",
888 hw_feat->adv_ts_hi ? "yes" : "no");
889 dev_dbg(pdata->dev, " DMA width : %u\n",
891 dev_dbg(pdata->dev, " Data Center Bridging : %s\n",
892 hw_feat->dcb ? "yes" : "no");
893 dev_dbg(pdata->dev, " Split header : %s\n",
894 hw_feat->sph ? "yes" : "no");
895 dev_dbg(pdata->dev, " TCP Segmentation Offload : %s\n",
896 hw_feat->tso ? "yes" : "no");
897 dev_dbg(pdata->dev, " Debug memory interface : %s\n",
898 hw_feat->dma_debug ? "yes" : "no");
899 dev_dbg(pdata->dev, " Receive Side Scaling : %s\n",
900 hw_feat->rss ? "yes" : "no");
901 dev_dbg(pdata->dev, " Traffic Class count : %u\n",
903 dev_dbg(pdata->dev, " Hash table size : %u\n",
904 hw_feat->hash_table_size);
905 dev_dbg(pdata->dev, " L3/L4 Filters : %u\n",
906 hw_feat->l3l4_filter_num);
908 /* Hardware feature register 2 */
909 dev_dbg(pdata->dev, " RX queue count : %u\n",
911 dev_dbg(pdata->dev, " TX queue count : %u\n",
913 dev_dbg(pdata->dev, " RX DMA channel count : %u\n",
915 dev_dbg(pdata->dev, " TX DMA channel count : %u\n",
917 dev_dbg(pdata->dev, " PPS outputs : %u\n",
918 hw_feat->pps_out_num);
919 dev_dbg(pdata->dev, " Auxiliary snapshot inputs : %u\n",
920 hw_feat->aux_snap_num);
924 static void xgbe_disable_vxlan_offloads(struct xgbe_prv_data *pdata)
926 struct net_device *netdev = pdata->netdev;
928 if (!pdata->vxlan_offloads_set)
931 netdev_info(netdev, "disabling VXLAN offloads\n");
933 netdev->hw_enc_features &= ~(NETIF_F_SG |
940 NETIF_F_GSO_UDP_TUNNEL |
941 NETIF_F_GSO_UDP_TUNNEL_CSUM);
943 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL |
944 NETIF_F_GSO_UDP_TUNNEL_CSUM);
946 pdata->vxlan_offloads_set = 0;
949 static void xgbe_disable_vxlan_hw(struct xgbe_prv_data *pdata)
951 if (!pdata->vxlan_port_set)
954 pdata->hw_if.disable_vxlan(pdata);
956 pdata->vxlan_port_set = 0;
957 pdata->vxlan_port = 0;
960 static void xgbe_disable_vxlan_accel(struct xgbe_prv_data *pdata)
962 xgbe_disable_vxlan_offloads(pdata);
964 xgbe_disable_vxlan_hw(pdata);
967 static void xgbe_enable_vxlan_offloads(struct xgbe_prv_data *pdata)
969 struct net_device *netdev = pdata->netdev;
971 if (pdata->vxlan_offloads_set)
974 netdev_info(netdev, "enabling VXLAN offloads\n");
976 netdev->hw_enc_features |= NETIF_F_SG |
983 pdata->vxlan_features;
985 netdev->features |= pdata->vxlan_features;
987 pdata->vxlan_offloads_set = 1;
990 static void xgbe_enable_vxlan_hw(struct xgbe_prv_data *pdata)
992 struct xgbe_vxlan_data *vdata;
994 if (pdata->vxlan_port_set)
997 if (list_empty(&pdata->vxlan_ports))
1000 vdata = list_first_entry(&pdata->vxlan_ports,
1001 struct xgbe_vxlan_data, list);
1003 pdata->vxlan_port_set = 1;
1004 pdata->vxlan_port = be16_to_cpu(vdata->port);
1006 pdata->hw_if.enable_vxlan(pdata);
1009 static void xgbe_enable_vxlan_accel(struct xgbe_prv_data *pdata)
1011 /* VXLAN acceleration desired? */
1012 if (!pdata->vxlan_features)
1015 /* VXLAN acceleration possible? */
1016 if (pdata->vxlan_force_disable)
1019 xgbe_enable_vxlan_hw(pdata);
1021 xgbe_enable_vxlan_offloads(pdata);
1024 static void xgbe_reset_vxlan_accel(struct xgbe_prv_data *pdata)
1026 xgbe_disable_vxlan_hw(pdata);
1028 if (pdata->vxlan_features)
1029 xgbe_enable_vxlan_offloads(pdata);
1031 pdata->vxlan_force_disable = 0;
1034 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
1036 struct xgbe_channel *channel;
1039 if (pdata->per_channel_irq) {
1040 for (i = 0; i < pdata->channel_count; i++) {
1041 channel = pdata->channel[i];
1043 netif_napi_add(pdata->netdev, &channel->napi,
1044 xgbe_one_poll, NAPI_POLL_WEIGHT);
1046 napi_enable(&channel->napi);
1050 netif_napi_add(pdata->netdev, &pdata->napi,
1051 xgbe_all_poll, NAPI_POLL_WEIGHT);
1053 napi_enable(&pdata->napi);
1057 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
1059 struct xgbe_channel *channel;
1062 if (pdata->per_channel_irq) {
1063 for (i = 0; i < pdata->channel_count; i++) {
1064 channel = pdata->channel[i];
1065 napi_disable(&channel->napi);
1068 netif_napi_del(&channel->napi);
1071 napi_disable(&pdata->napi);
1074 netif_napi_del(&pdata->napi);
1078 static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
1080 struct xgbe_channel *channel;
1081 struct net_device *netdev = pdata->netdev;
1085 tasklet_init(&pdata->tasklet_dev, xgbe_isr_task, (unsigned long)pdata);
1086 tasklet_init(&pdata->tasklet_ecc, xgbe_ecc_isr_task,
1087 (unsigned long)pdata);
1089 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
1090 netdev_name(netdev), pdata);
1092 netdev_alert(netdev, "error requesting irq %d\n",
1097 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) {
1098 ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr,
1099 0, pdata->ecc_name, pdata);
1101 netdev_alert(netdev, "error requesting ecc irq %d\n",
1107 if (!pdata->per_channel_irq)
1110 for (i = 0; i < pdata->channel_count; i++) {
1111 channel = pdata->channel[i];
1112 snprintf(channel->dma_irq_name,
1113 sizeof(channel->dma_irq_name) - 1,
1114 "%s-TxRx-%u", netdev_name(netdev),
1115 channel->queue_index);
1117 ret = devm_request_irq(pdata->dev, channel->dma_irq,
1119 channel->dma_irq_name, channel);
1121 netdev_alert(netdev, "error requesting irq %d\n",
1126 irq_set_affinity_hint(channel->dma_irq,
1127 &channel->affinity_mask);
1133 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
1134 for (i--; i < pdata->channel_count; i--) {
1135 channel = pdata->channel[i];
1137 irq_set_affinity_hint(channel->dma_irq, NULL);
1138 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1141 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
1142 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
1145 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1150 static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
1152 struct xgbe_channel *channel;
1155 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1157 tasklet_kill(&pdata->tasklet_dev);
1158 tasklet_kill(&pdata->tasklet_ecc);
1160 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
1161 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
1163 if (!pdata->per_channel_irq)
1166 for (i = 0; i < pdata->channel_count; i++) {
1167 channel = pdata->channel[i];
1169 irq_set_affinity_hint(channel->dma_irq, NULL);
1170 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1174 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
1176 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1178 DBGPR("-->xgbe_init_tx_coalesce\n");
1180 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
1181 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
1183 hw_if->config_tx_coalesce(pdata);
1185 DBGPR("<--xgbe_init_tx_coalesce\n");
1188 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
1190 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1192 DBGPR("-->xgbe_init_rx_coalesce\n");
1194 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
1195 pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
1196 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
1198 hw_if->config_rx_coalesce(pdata);
1200 DBGPR("<--xgbe_init_rx_coalesce\n");
1203 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
1205 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1206 struct xgbe_ring *ring;
1207 struct xgbe_ring_data *rdata;
1210 DBGPR("-->xgbe_free_tx_data\n");
1212 for (i = 0; i < pdata->channel_count; i++) {
1213 ring = pdata->channel[i]->tx_ring;
1217 for (j = 0; j < ring->rdesc_count; j++) {
1218 rdata = XGBE_GET_DESC_DATA(ring, j);
1219 desc_if->unmap_rdata(pdata, rdata);
1223 DBGPR("<--xgbe_free_tx_data\n");
1226 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
1228 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1229 struct xgbe_ring *ring;
1230 struct xgbe_ring_data *rdata;
1233 DBGPR("-->xgbe_free_rx_data\n");
1235 for (i = 0; i < pdata->channel_count; i++) {
1236 ring = pdata->channel[i]->rx_ring;
1240 for (j = 0; j < ring->rdesc_count; j++) {
1241 rdata = XGBE_GET_DESC_DATA(ring, j);
1242 desc_if->unmap_rdata(pdata, rdata);
1246 DBGPR("<--xgbe_free_rx_data\n");
1249 static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
1251 pdata->phy_link = -1;
1252 pdata->phy_speed = SPEED_UNKNOWN;
1254 return pdata->phy_if.phy_reset(pdata);
1257 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
1259 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1260 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1261 unsigned long flags;
1263 DBGPR("-->xgbe_powerdown\n");
1265 if (!netif_running(netdev) ||
1266 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
1267 netdev_alert(netdev, "Device is already powered down\n");
1268 DBGPR("<--xgbe_powerdown\n");
1272 spin_lock_irqsave(&pdata->lock, flags);
1274 if (caller == XGMAC_DRIVER_CONTEXT)
1275 netif_device_detach(netdev);
1277 netif_tx_stop_all_queues(netdev);
1279 xgbe_stop_timers(pdata);
1280 flush_workqueue(pdata->dev_workqueue);
1282 hw_if->powerdown_tx(pdata);
1283 hw_if->powerdown_rx(pdata);
1285 xgbe_napi_disable(pdata, 0);
1287 pdata->power_down = 1;
1289 spin_unlock_irqrestore(&pdata->lock, flags);
1291 DBGPR("<--xgbe_powerdown\n");
1296 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
1298 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1299 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1300 unsigned long flags;
1302 DBGPR("-->xgbe_powerup\n");
1304 if (!netif_running(netdev) ||
1305 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
1306 netdev_alert(netdev, "Device is already powered up\n");
1307 DBGPR("<--xgbe_powerup\n");
1311 spin_lock_irqsave(&pdata->lock, flags);
1313 pdata->power_down = 0;
1315 xgbe_napi_enable(pdata, 0);
1317 hw_if->powerup_tx(pdata);
1318 hw_if->powerup_rx(pdata);
1320 if (caller == XGMAC_DRIVER_CONTEXT)
1321 netif_device_attach(netdev);
1323 netif_tx_start_all_queues(netdev);
1325 xgbe_start_timers(pdata);
1327 spin_unlock_irqrestore(&pdata->lock, flags);
1329 DBGPR("<--xgbe_powerup\n");
1334 static void xgbe_free_memory(struct xgbe_prv_data *pdata)
1336 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1338 /* Free the ring descriptors and buffers */
1339 desc_if->free_ring_resources(pdata);
1341 /* Free the channel and ring structures */
1342 xgbe_free_channels(pdata);
1345 static int xgbe_alloc_memory(struct xgbe_prv_data *pdata)
1347 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1348 struct net_device *netdev = pdata->netdev;
1351 if (pdata->new_tx_ring_count) {
1352 pdata->tx_ring_count = pdata->new_tx_ring_count;
1353 pdata->tx_q_count = pdata->tx_ring_count;
1354 pdata->new_tx_ring_count = 0;
1357 if (pdata->new_rx_ring_count) {
1358 pdata->rx_ring_count = pdata->new_rx_ring_count;
1359 pdata->new_rx_ring_count = 0;
1362 /* Calculate the Rx buffer size before allocating rings */
1363 pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1365 /* Allocate the channel and ring structures */
1366 ret = xgbe_alloc_channels(pdata);
1370 /* Allocate the ring descriptors and buffers */
1371 ret = desc_if->alloc_ring_resources(pdata);
1375 /* Initialize the service and Tx timers */
1376 xgbe_init_timers(pdata);
1381 xgbe_free_memory(pdata);
1386 static int xgbe_start(struct xgbe_prv_data *pdata)
1388 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1389 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1390 struct net_device *netdev = pdata->netdev;
1394 /* Set the number of queues */
1395 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
1397 netdev_err(netdev, "error setting real tx queue count\n");
1401 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
1403 netdev_err(netdev, "error setting real rx queue count\n");
1407 /* Set RSS lookup table data for programming */
1408 for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
1409 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
1410 i % pdata->rx_ring_count);
1412 ret = hw_if->init(pdata);
1416 xgbe_napi_enable(pdata, 1);
1418 ret = xgbe_request_irqs(pdata);
1422 ret = phy_if->phy_start(pdata);
1426 hw_if->enable_tx(pdata);
1427 hw_if->enable_rx(pdata);
1429 udp_tunnel_get_rx_info(netdev);
1431 netif_tx_start_all_queues(netdev);
1433 xgbe_start_timers(pdata);
1434 queue_work(pdata->dev_workqueue, &pdata->service_work);
1436 clear_bit(XGBE_STOPPED, &pdata->dev_state);
1441 xgbe_free_irqs(pdata);
1444 xgbe_napi_disable(pdata, 1);
1451 static void xgbe_stop(struct xgbe_prv_data *pdata)
1453 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1454 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1455 struct xgbe_channel *channel;
1456 struct net_device *netdev = pdata->netdev;
1457 struct netdev_queue *txq;
1460 DBGPR("-->xgbe_stop\n");
1462 if (test_bit(XGBE_STOPPED, &pdata->dev_state))
1465 netif_tx_stop_all_queues(netdev);
1466 netif_carrier_off(pdata->netdev);
1468 xgbe_stop_timers(pdata);
1469 flush_workqueue(pdata->dev_workqueue);
1471 xgbe_reset_vxlan_accel(pdata);
1473 hw_if->disable_tx(pdata);
1474 hw_if->disable_rx(pdata);
1476 phy_if->phy_stop(pdata);
1478 xgbe_free_irqs(pdata);
1480 xgbe_napi_disable(pdata, 1);
1484 for (i = 0; i < pdata->channel_count; i++) {
1485 channel = pdata->channel[i];
1486 if (!channel->tx_ring)
1489 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1490 netdev_tx_reset_queue(txq);
1493 set_bit(XGBE_STOPPED, &pdata->dev_state);
1495 DBGPR("<--xgbe_stop\n");
1498 static void xgbe_stopdev(struct work_struct *work)
1500 struct xgbe_prv_data *pdata = container_of(work,
1501 struct xgbe_prv_data,
1508 xgbe_free_tx_data(pdata);
1509 xgbe_free_rx_data(pdata);
1513 netdev_alert(pdata->netdev, "device stopped\n");
1516 void xgbe_full_restart_dev(struct xgbe_prv_data *pdata)
1518 /* If not running, "restart" will happen on open */
1519 if (!netif_running(pdata->netdev))
1524 xgbe_free_memory(pdata);
1525 xgbe_alloc_memory(pdata);
1530 void xgbe_restart_dev(struct xgbe_prv_data *pdata)
1532 /* If not running, "restart" will happen on open */
1533 if (!netif_running(pdata->netdev))
1538 xgbe_free_tx_data(pdata);
1539 xgbe_free_rx_data(pdata);
1544 static void xgbe_restart(struct work_struct *work)
1546 struct xgbe_prv_data *pdata = container_of(work,
1547 struct xgbe_prv_data,
1552 xgbe_restart_dev(pdata);
1557 static void xgbe_tx_tstamp(struct work_struct *work)
1559 struct xgbe_prv_data *pdata = container_of(work,
1560 struct xgbe_prv_data,
1562 struct skb_shared_hwtstamps hwtstamps;
1564 unsigned long flags;
1566 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1567 if (!pdata->tx_tstamp_skb)
1570 if (pdata->tx_tstamp) {
1571 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
1574 memset(&hwtstamps, 0, sizeof(hwtstamps));
1575 hwtstamps.hwtstamp = ns_to_ktime(nsec);
1576 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
1579 dev_kfree_skb_any(pdata->tx_tstamp_skb);
1581 pdata->tx_tstamp_skb = NULL;
1584 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1587 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
1588 struct ifreq *ifreq)
1590 if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
1591 sizeof(pdata->tstamp_config)))
1597 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
1598 struct ifreq *ifreq)
1600 struct hwtstamp_config config;
1601 unsigned int mac_tscr;
1603 if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
1611 switch (config.tx_type) {
1612 case HWTSTAMP_TX_OFF:
1615 case HWTSTAMP_TX_ON:
1616 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1623 switch (config.rx_filter) {
1624 case HWTSTAMP_FILTER_NONE:
1627 case HWTSTAMP_FILTER_NTP_ALL:
1628 case HWTSTAMP_FILTER_ALL:
1629 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1630 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1633 /* PTP v2, UDP, any kind of event packet */
1634 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1635 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1636 /* PTP v1, UDP, any kind of event packet */
1637 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1638 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1639 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1640 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1641 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1644 /* PTP v2, UDP, Sync packet */
1645 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1646 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1647 /* PTP v1, UDP, Sync packet */
1648 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1649 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1650 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1651 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1652 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1655 /* PTP v2, UDP, Delay_req packet */
1656 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1657 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1658 /* PTP v1, UDP, Delay_req packet */
1659 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1660 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1661 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1662 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1663 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1664 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1667 /* 802.AS1, Ethernet, any kind of event packet */
1668 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1669 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1670 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1671 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1674 /* 802.AS1, Ethernet, Sync packet */
1675 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1676 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1677 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1678 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1681 /* 802.AS1, Ethernet, Delay_req packet */
1682 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1683 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1684 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1685 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1686 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1689 /* PTP v2/802.AS1, any layer, any kind of event packet */
1690 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1691 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1692 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1693 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1694 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1695 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1696 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1699 /* PTP v2/802.AS1, any layer, Sync packet */
1700 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1701 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1702 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1703 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1704 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1705 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1706 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1709 /* PTP v2/802.AS1, any layer, Delay_req packet */
1710 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1711 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1712 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1713 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1714 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1715 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1716 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1717 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1724 pdata->hw_if.config_tstamp(pdata, mac_tscr);
1726 memcpy(&pdata->tstamp_config, &config, sizeof(config));
1731 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1732 struct sk_buff *skb,
1733 struct xgbe_packet_data *packet)
1735 unsigned long flags;
1737 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
1738 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1739 if (pdata->tx_tstamp_skb) {
1740 /* Another timestamp in progress, ignore this one */
1741 XGMAC_SET_BITS(packet->attributes,
1742 TX_PACKET_ATTRIBUTES, PTP, 0);
1744 pdata->tx_tstamp_skb = skb_get(skb);
1745 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1747 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1750 skb_tx_timestamp(skb);
1753 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1755 if (skb_vlan_tag_present(skb))
1756 packet->vlan_ctag = skb_vlan_tag_get(skb);
1759 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
1763 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1767 ret = skb_cow_head(skb, 0);
1771 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
1772 packet->header_len = skb_inner_transport_offset(skb) +
1773 inner_tcp_hdrlen(skb);
1774 packet->tcp_header_len = inner_tcp_hdrlen(skb);
1776 packet->header_len = skb_transport_offset(skb) +
1778 packet->tcp_header_len = tcp_hdrlen(skb);
1780 packet->tcp_payload_len = skb->len - packet->header_len;
1781 packet->mss = skb_shinfo(skb)->gso_size;
1783 DBGPR(" packet->header_len=%u\n", packet->header_len);
1784 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1785 packet->tcp_header_len, packet->tcp_payload_len);
1786 DBGPR(" packet->mss=%u\n", packet->mss);
1788 /* Update the number of packets that will ultimately be transmitted
1789 * along with the extra bytes for each extra packet
1791 packet->tx_packets = skb_shinfo(skb)->gso_segs;
1792 packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
1797 static bool xgbe_is_vxlan(struct xgbe_prv_data *pdata, struct sk_buff *skb)
1799 struct xgbe_vxlan_data *vdata;
1801 if (pdata->vxlan_force_disable)
1804 if (!skb->encapsulation)
1807 if (skb->ip_summed != CHECKSUM_PARTIAL)
1810 switch (skb->protocol) {
1811 case htons(ETH_P_IP):
1812 if (ip_hdr(skb)->protocol != IPPROTO_UDP)
1816 case htons(ETH_P_IPV6):
1817 if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
1825 /* See if we have the UDP port in our list */
1826 list_for_each_entry(vdata, &pdata->vxlan_ports, list) {
1827 if ((skb->protocol == htons(ETH_P_IP)) &&
1828 (vdata->sa_family == AF_INET) &&
1829 (vdata->port == udp_hdr(skb)->dest))
1831 else if ((skb->protocol == htons(ETH_P_IPV6)) &&
1832 (vdata->sa_family == AF_INET6) &&
1833 (vdata->port == udp_hdr(skb)->dest))
1840 static int xgbe_is_tso(struct sk_buff *skb)
1842 if (skb->ip_summed != CHECKSUM_PARTIAL)
1845 if (!skb_is_gso(skb))
1848 DBGPR(" TSO packet to be processed\n");
1853 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1854 struct xgbe_ring *ring, struct sk_buff *skb,
1855 struct xgbe_packet_data *packet)
1857 struct skb_frag_struct *frag;
1858 unsigned int context_desc;
1865 packet->rdesc_count = 0;
1867 packet->tx_packets = 1;
1868 packet->tx_bytes = skb->len;
1870 if (xgbe_is_tso(skb)) {
1871 /* TSO requires an extra descriptor if mss is different */
1872 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1874 packet->rdesc_count++;
1877 /* TSO requires an extra descriptor for TSO header */
1878 packet->rdesc_count++;
1880 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1882 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1884 } else if (skb->ip_summed == CHECKSUM_PARTIAL)
1885 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1888 if (xgbe_is_vxlan(pdata, skb))
1889 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1892 if (skb_vlan_tag_present(skb)) {
1893 /* VLAN requires an extra descriptor if tag is different */
1894 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
1895 /* We can share with the TSO context descriptor */
1896 if (!context_desc) {
1898 packet->rdesc_count++;
1901 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1905 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1906 (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1907 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1910 for (len = skb_headlen(skb); len;) {
1911 packet->rdesc_count++;
1912 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1915 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1916 frag = &skb_shinfo(skb)->frags[i];
1917 for (len = skb_frag_size(frag); len; ) {
1918 packet->rdesc_count++;
1919 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1924 static int xgbe_open(struct net_device *netdev)
1926 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1929 /* Create the various names based on netdev name */
1930 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
1931 netdev_name(netdev));
1933 snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc",
1934 netdev_name(netdev));
1936 snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c",
1937 netdev_name(netdev));
1939 /* Create workqueues */
1940 pdata->dev_workqueue =
1941 create_singlethread_workqueue(netdev_name(netdev));
1942 if (!pdata->dev_workqueue) {
1943 netdev_err(netdev, "device workqueue creation failed\n");
1947 pdata->an_workqueue =
1948 create_singlethread_workqueue(pdata->an_name);
1949 if (!pdata->an_workqueue) {
1950 netdev_err(netdev, "phy workqueue creation failed\n");
1955 /* Reset the phy settings */
1956 ret = xgbe_phy_reset(pdata);
1960 /* Enable the clocks */
1961 ret = clk_prepare_enable(pdata->sysclk);
1963 netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1967 ret = clk_prepare_enable(pdata->ptpclk);
1969 netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1973 INIT_WORK(&pdata->service_work, xgbe_service);
1974 INIT_WORK(&pdata->restart_work, xgbe_restart);
1975 INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
1976 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1978 ret = xgbe_alloc_memory(pdata);
1982 ret = xgbe_start(pdata);
1986 clear_bit(XGBE_DOWN, &pdata->dev_state);
1991 xgbe_free_memory(pdata);
1994 clk_disable_unprepare(pdata->ptpclk);
1997 clk_disable_unprepare(pdata->sysclk);
2000 destroy_workqueue(pdata->an_workqueue);
2003 destroy_workqueue(pdata->dev_workqueue);
2008 static int xgbe_close(struct net_device *netdev)
2010 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2012 /* Stop the device */
2015 xgbe_free_memory(pdata);
2017 /* Disable the clocks */
2018 clk_disable_unprepare(pdata->ptpclk);
2019 clk_disable_unprepare(pdata->sysclk);
2021 flush_workqueue(pdata->an_workqueue);
2022 destroy_workqueue(pdata->an_workqueue);
2024 flush_workqueue(pdata->dev_workqueue);
2025 destroy_workqueue(pdata->dev_workqueue);
2027 set_bit(XGBE_DOWN, &pdata->dev_state);
2032 static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
2034 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2035 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2036 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2037 struct xgbe_channel *channel;
2038 struct xgbe_ring *ring;
2039 struct xgbe_packet_data *packet;
2040 struct netdev_queue *txq;
2043 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
2045 channel = pdata->channel[skb->queue_mapping];
2046 txq = netdev_get_tx_queue(netdev, channel->queue_index);
2047 ring = channel->tx_ring;
2048 packet = &ring->packet_data;
2052 if (skb->len == 0) {
2053 netif_err(pdata, tx_err, netdev,
2054 "empty skb received from stack\n");
2055 dev_kfree_skb_any(skb);
2056 goto tx_netdev_return;
2059 /* Calculate preliminary packet info */
2060 memset(packet, 0, sizeof(*packet));
2061 xgbe_packet_info(pdata, ring, skb, packet);
2063 /* Check that there are enough descriptors available */
2064 ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
2066 goto tx_netdev_return;
2068 ret = xgbe_prep_tso(skb, packet);
2070 netif_err(pdata, tx_err, netdev,
2071 "error processing TSO packet\n");
2072 dev_kfree_skb_any(skb);
2073 goto tx_netdev_return;
2075 xgbe_prep_vlan(skb, packet);
2077 if (!desc_if->map_tx_skb(channel, skb)) {
2078 dev_kfree_skb_any(skb);
2079 goto tx_netdev_return;
2082 xgbe_prep_tx_tstamp(pdata, skb, packet);
2084 /* Report on the actual number of bytes (to be) sent */
2085 netdev_tx_sent_queue(txq, packet->tx_bytes);
2087 /* Configure required descriptor fields for transmission */
2088 hw_if->dev_xmit(channel);
2090 if (netif_msg_pktdata(pdata))
2091 xgbe_print_pkt(netdev, skb, true);
2093 /* Stop the queue in advance if there may not be enough descriptors */
2094 xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
2102 static void xgbe_set_rx_mode(struct net_device *netdev)
2104 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2105 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2107 DBGPR("-->xgbe_set_rx_mode\n");
2109 hw_if->config_rx_mode(pdata);
2111 DBGPR("<--xgbe_set_rx_mode\n");
2114 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
2116 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2117 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2118 struct sockaddr *saddr = addr;
2120 DBGPR("-->xgbe_set_mac_address\n");
2122 if (!is_valid_ether_addr(saddr->sa_data))
2123 return -EADDRNOTAVAIL;
2125 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
2127 hw_if->set_mac_address(pdata, netdev->dev_addr);
2129 DBGPR("<--xgbe_set_mac_address\n");
2134 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
2136 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2141 ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
2145 ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
2155 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
2157 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2160 DBGPR("-->xgbe_change_mtu\n");
2162 ret = xgbe_calc_rx_buf_size(netdev, mtu);
2166 pdata->rx_buf_size = ret;
2169 xgbe_restart_dev(pdata);
2171 DBGPR("<--xgbe_change_mtu\n");
2176 static void xgbe_tx_timeout(struct net_device *netdev)
2178 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2180 netdev_warn(netdev, "tx timeout, device restarting\n");
2181 schedule_work(&pdata->restart_work);
2184 static void xgbe_get_stats64(struct net_device *netdev,
2185 struct rtnl_link_stats64 *s)
2187 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2188 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
2190 DBGPR("-->%s\n", __func__);
2192 pdata->hw_if.read_mmc_stats(pdata);
2194 s->rx_packets = pstats->rxframecount_gb;
2195 s->rx_bytes = pstats->rxoctetcount_gb;
2196 s->rx_errors = pstats->rxframecount_gb -
2197 pstats->rxbroadcastframes_g -
2198 pstats->rxmulticastframes_g -
2199 pstats->rxunicastframes_g;
2200 s->multicast = pstats->rxmulticastframes_g;
2201 s->rx_length_errors = pstats->rxlengtherror;
2202 s->rx_crc_errors = pstats->rxcrcerror;
2203 s->rx_fifo_errors = pstats->rxfifooverflow;
2205 s->tx_packets = pstats->txframecount_gb;
2206 s->tx_bytes = pstats->txoctetcount_gb;
2207 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
2208 s->tx_dropped = netdev->stats.tx_dropped;
2210 DBGPR("<--%s\n", __func__);
2213 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
2216 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2217 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2219 DBGPR("-->%s\n", __func__);
2221 set_bit(vid, pdata->active_vlans);
2222 hw_if->update_vlan_hash_table(pdata);
2224 DBGPR("<--%s\n", __func__);
2229 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
2232 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2233 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2235 DBGPR("-->%s\n", __func__);
2237 clear_bit(vid, pdata->active_vlans);
2238 hw_if->update_vlan_hash_table(pdata);
2240 DBGPR("<--%s\n", __func__);
2245 #ifdef CONFIG_NET_POLL_CONTROLLER
2246 static void xgbe_poll_controller(struct net_device *netdev)
2248 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2249 struct xgbe_channel *channel;
2252 DBGPR("-->xgbe_poll_controller\n");
2254 if (pdata->per_channel_irq) {
2255 for (i = 0; i < pdata->channel_count; i++) {
2256 channel = pdata->channel[i];
2257 xgbe_dma_isr(channel->dma_irq, channel);
2260 disable_irq(pdata->dev_irq);
2261 xgbe_isr(pdata->dev_irq, pdata);
2262 enable_irq(pdata->dev_irq);
2265 DBGPR("<--xgbe_poll_controller\n");
2267 #endif /* End CONFIG_NET_POLL_CONTROLLER */
2269 static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type,
2272 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2273 struct tc_mqprio_qopt *mqprio = type_data;
2276 if (type != TC_SETUP_QDISC_MQPRIO)
2279 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2280 tc = mqprio->num_tc;
2282 if (tc > pdata->hw_feat.tc_cnt)
2285 pdata->num_tcs = tc;
2286 pdata->hw_if.config_tc(pdata);
2291 static netdev_features_t xgbe_fix_features(struct net_device *netdev,
2292 netdev_features_t features)
2294 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2295 netdev_features_t vxlan_base, vxlan_mask;
2297 vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT;
2298 vxlan_mask = vxlan_base | NETIF_F_GSO_UDP_TUNNEL_CSUM;
2300 pdata->vxlan_features = features & vxlan_mask;
2302 /* Only fix VXLAN-related features */
2303 if (!pdata->vxlan_features)
2306 /* If VXLAN isn't supported then clear any features:
2307 * This is needed because NETIF_F_RX_UDP_TUNNEL_PORT gets
2308 * automatically set if ndo_udp_tunnel_add is set.
2310 if (!pdata->hw_feat.vxn)
2311 return features & ~vxlan_mask;
2313 /* VXLAN CSUM requires VXLAN base */
2314 if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) &&
2315 !(features & NETIF_F_GSO_UDP_TUNNEL)) {
2316 netdev_notice(netdev,
2317 "forcing tx udp tunnel support\n");
2318 features |= NETIF_F_GSO_UDP_TUNNEL;
2321 /* Can't do one without doing the other */
2322 if ((features & vxlan_base) != vxlan_base) {
2323 netdev_notice(netdev,
2324 "forcing both tx and rx udp tunnel support\n");
2325 features |= vxlan_base;
2328 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2329 if (!(features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) {
2330 netdev_notice(netdev,
2331 "forcing tx udp tunnel checksumming on\n");
2332 features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
2335 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) {
2336 netdev_notice(netdev,
2337 "forcing tx udp tunnel checksumming off\n");
2338 features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
2342 pdata->vxlan_features = features & vxlan_mask;
2344 /* Adjust UDP Tunnel based on current state */
2345 if (pdata->vxlan_force_disable) {
2346 netdev_notice(netdev,
2347 "VXLAN acceleration disabled, turning off udp tunnel features\n");
2348 features &= ~vxlan_mask;
2354 static int xgbe_set_features(struct net_device *netdev,
2355 netdev_features_t features)
2357 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2358 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2359 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
2360 netdev_features_t udp_tunnel;
2363 rxhash = pdata->netdev_features & NETIF_F_RXHASH;
2364 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
2365 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
2366 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
2367 udp_tunnel = pdata->netdev_features & NETIF_F_GSO_UDP_TUNNEL;
2369 if ((features & NETIF_F_RXHASH) && !rxhash)
2370 ret = hw_if->enable_rss(pdata);
2371 else if (!(features & NETIF_F_RXHASH) && rxhash)
2372 ret = hw_if->disable_rss(pdata);
2376 if ((features & NETIF_F_RXCSUM) && !rxcsum)
2377 hw_if->enable_rx_csum(pdata);
2378 else if (!(features & NETIF_F_RXCSUM) && rxcsum)
2379 hw_if->disable_rx_csum(pdata);
2381 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
2382 hw_if->enable_rx_vlan_stripping(pdata);
2383 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
2384 hw_if->disable_rx_vlan_stripping(pdata);
2386 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
2387 hw_if->enable_rx_vlan_filtering(pdata);
2388 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
2389 hw_if->disable_rx_vlan_filtering(pdata);
2391 if ((features & NETIF_F_GSO_UDP_TUNNEL) && !udp_tunnel)
2392 xgbe_enable_vxlan_accel(pdata);
2393 else if (!(features & NETIF_F_GSO_UDP_TUNNEL) && udp_tunnel)
2394 xgbe_disable_vxlan_accel(pdata);
2396 pdata->netdev_features = features;
2398 DBGPR("<--xgbe_set_features\n");
2403 static void xgbe_udp_tunnel_add(struct net_device *netdev,
2404 struct udp_tunnel_info *ti)
2406 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2407 struct xgbe_vxlan_data *vdata;
2409 if (!pdata->hw_feat.vxn)
2412 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2415 pdata->vxlan_port_count++;
2417 netif_dbg(pdata, drv, netdev,
2418 "adding VXLAN tunnel, family=%hx/port=%hx\n",
2419 ti->sa_family, be16_to_cpu(ti->port));
2421 if (pdata->vxlan_force_disable)
2424 vdata = kzalloc(sizeof(*vdata), GFP_ATOMIC);
2426 /* Can no longer properly track VXLAN ports */
2427 pdata->vxlan_force_disable = 1;
2428 netif_dbg(pdata, drv, netdev,
2429 "internal error, disabling VXLAN accelerations\n");
2431 xgbe_disable_vxlan_accel(pdata);
2435 vdata->sa_family = ti->sa_family;
2436 vdata->port = ti->port;
2438 list_add_tail(&vdata->list, &pdata->vxlan_ports);
2440 /* First port added? */
2441 if (pdata->vxlan_port_count == 1) {
2442 xgbe_enable_vxlan_accel(pdata);
2448 static void xgbe_udp_tunnel_del(struct net_device *netdev,
2449 struct udp_tunnel_info *ti)
2451 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2452 struct xgbe_vxlan_data *vdata;
2454 if (!pdata->hw_feat.vxn)
2457 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2460 netif_dbg(pdata, drv, netdev,
2461 "deleting VXLAN tunnel, family=%hx/port=%hx\n",
2462 ti->sa_family, be16_to_cpu(ti->port));
2464 /* Don't need safe version since loop terminates with deletion */
2465 list_for_each_entry(vdata, &pdata->vxlan_ports, list) {
2466 if (vdata->sa_family != ti->sa_family)
2469 if (vdata->port != ti->port)
2472 list_del(&vdata->list);
2478 pdata->vxlan_port_count--;
2479 if (!pdata->vxlan_port_count) {
2480 xgbe_reset_vxlan_accel(pdata);
2485 if (pdata->vxlan_force_disable)
2488 /* See if VXLAN tunnel id needs to be changed */
2489 vdata = list_first_entry(&pdata->vxlan_ports,
2490 struct xgbe_vxlan_data, list);
2491 if (pdata->vxlan_port == be16_to_cpu(vdata->port))
2494 pdata->vxlan_port = be16_to_cpu(vdata->port);
2495 pdata->hw_if.set_vxlan_id(pdata);
2498 static netdev_features_t xgbe_features_check(struct sk_buff *skb,
2499 struct net_device *netdev,
2500 netdev_features_t features)
2502 features = vlan_features_check(skb, features);
2503 features = vxlan_features_check(skb, features);
2508 static const struct net_device_ops xgbe_netdev_ops = {
2509 .ndo_open = xgbe_open,
2510 .ndo_stop = xgbe_close,
2511 .ndo_start_xmit = xgbe_xmit,
2512 .ndo_set_rx_mode = xgbe_set_rx_mode,
2513 .ndo_set_mac_address = xgbe_set_mac_address,
2514 .ndo_validate_addr = eth_validate_addr,
2515 .ndo_do_ioctl = xgbe_ioctl,
2516 .ndo_change_mtu = xgbe_change_mtu,
2517 .ndo_tx_timeout = xgbe_tx_timeout,
2518 .ndo_get_stats64 = xgbe_get_stats64,
2519 .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
2520 .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
2521 #ifdef CONFIG_NET_POLL_CONTROLLER
2522 .ndo_poll_controller = xgbe_poll_controller,
2524 .ndo_setup_tc = xgbe_setup_tc,
2525 .ndo_fix_features = xgbe_fix_features,
2526 .ndo_set_features = xgbe_set_features,
2527 .ndo_udp_tunnel_add = xgbe_udp_tunnel_add,
2528 .ndo_udp_tunnel_del = xgbe_udp_tunnel_del,
2529 .ndo_features_check = xgbe_features_check,
2532 const struct net_device_ops *xgbe_get_netdev_ops(void)
2534 return &xgbe_netdev_ops;
2537 static void xgbe_rx_refresh(struct xgbe_channel *channel)
2539 struct xgbe_prv_data *pdata = channel->pdata;
2540 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2541 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2542 struct xgbe_ring *ring = channel->rx_ring;
2543 struct xgbe_ring_data *rdata;
2545 while (ring->dirty != ring->cur) {
2546 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2548 /* Reset rdata values */
2549 desc_if->unmap_rdata(pdata, rdata);
2551 if (desc_if->map_rx_buffer(pdata, ring, rdata))
2554 hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
2559 /* Make sure everything is written before the register write */
2562 /* Update the Rx Tail Pointer Register with address of
2563 * the last cleaned entry */
2564 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
2565 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
2566 lower_32_bits(rdata->rdesc_dma));
2569 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
2570 struct napi_struct *napi,
2571 struct xgbe_ring_data *rdata,
2574 struct sk_buff *skb;
2577 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
2581 /* Pull in the header buffer which may contain just the header
2582 * or the header plus data
2584 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
2585 rdata->rx.hdr.dma_off,
2586 rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
2588 packet = page_address(rdata->rx.hdr.pa.pages) +
2589 rdata->rx.hdr.pa.pages_offset;
2590 skb_copy_to_linear_data(skb, packet, len);
2596 static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
2597 struct xgbe_packet_data *packet)
2599 /* Always zero if not the first descriptor */
2600 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
2603 /* First descriptor with split header, return header length */
2604 if (rdata->rx.hdr_len)
2605 return rdata->rx.hdr_len;
2607 /* First descriptor but not the last descriptor and no split header,
2608 * so the full buffer was used
2610 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2611 return rdata->rx.hdr.dma_len;
2613 /* First descriptor and last descriptor and no split header, so
2614 * calculate how much of the buffer was used
2616 return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
2619 static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
2620 struct xgbe_packet_data *packet,
2623 /* Always the full buffer if not the last descriptor */
2624 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2625 return rdata->rx.buf.dma_len;
2627 /* Last descriptor so calculate how much of the buffer was used
2628 * for the last bit of data
2630 return rdata->rx.len - len;
2633 static int xgbe_tx_poll(struct xgbe_channel *channel)
2635 struct xgbe_prv_data *pdata = channel->pdata;
2636 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2637 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2638 struct xgbe_ring *ring = channel->tx_ring;
2639 struct xgbe_ring_data *rdata;
2640 struct xgbe_ring_desc *rdesc;
2641 struct net_device *netdev = pdata->netdev;
2642 struct netdev_queue *txq;
2644 unsigned int tx_packets = 0, tx_bytes = 0;
2647 DBGPR("-->xgbe_tx_poll\n");
2649 /* Nothing to do if there isn't a Tx ring for this channel */
2655 /* Be sure we get ring->cur before accessing descriptor data */
2658 txq = netdev_get_tx_queue(netdev, channel->queue_index);
2660 while ((processed < XGBE_TX_DESC_MAX_PROC) &&
2661 (ring->dirty != cur)) {
2662 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2663 rdesc = rdata->rdesc;
2665 if (!hw_if->tx_complete(rdesc))
2668 /* Make sure descriptor fields are read after reading the OWN
2672 if (netif_msg_tx_done(pdata))
2673 xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
2675 if (hw_if->is_last_desc(rdesc)) {
2676 tx_packets += rdata->tx.packets;
2677 tx_bytes += rdata->tx.bytes;
2680 /* Free the SKB and reset the descriptor for re-use */
2681 desc_if->unmap_rdata(pdata, rdata);
2682 hw_if->tx_desc_reset(rdata);
2691 netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
2693 if ((ring->tx.queue_stopped == 1) &&
2694 (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
2695 ring->tx.queue_stopped = 0;
2696 netif_tx_wake_queue(txq);
2699 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
2704 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2706 struct xgbe_prv_data *pdata = channel->pdata;
2707 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2708 struct xgbe_ring *ring = channel->rx_ring;
2709 struct xgbe_ring_data *rdata;
2710 struct xgbe_packet_data *packet;
2711 struct net_device *netdev = pdata->netdev;
2712 struct napi_struct *napi;
2713 struct sk_buff *skb;
2714 struct skb_shared_hwtstamps *hwtstamps;
2715 unsigned int last, error, context_next, context;
2716 unsigned int len, buf1_len, buf2_len, max_len;
2717 unsigned int received = 0;
2718 int packet_count = 0;
2720 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
2722 /* Nothing to do if there isn't a Rx ring for this channel */
2729 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
2731 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2732 packet = &ring->packet_data;
2733 while (packet_count < budget) {
2734 DBGPR(" cur = %d\n", ring->cur);
2736 /* First time in loop see if we need to restore state */
2737 if (!received && rdata->state_saved) {
2738 skb = rdata->state.skb;
2739 error = rdata->state.error;
2740 len = rdata->state.len;
2742 memset(packet, 0, sizeof(*packet));
2749 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2751 if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
2752 xgbe_rx_refresh(channel);
2754 if (hw_if->dev_read(channel))
2760 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2762 context_next = XGMAC_GET_BITS(packet->attributes,
2763 RX_PACKET_ATTRIBUTES,
2765 context = XGMAC_GET_BITS(packet->attributes,
2766 RX_PACKET_ATTRIBUTES,
2769 /* Earlier error, just drain the remaining data */
2770 if ((!last || context_next) && error)
2773 if (error || packet->errors) {
2775 netif_err(pdata, rx_err, netdev,
2776 "error in received packet\n");
2782 /* Get the data length in the descriptor buffers */
2783 buf1_len = xgbe_rx_buf1_len(rdata, packet);
2785 buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
2788 if (buf2_len > rdata->rx.buf.dma_len) {
2789 /* Hardware inconsistency within the descriptors
2790 * that has resulted in a length underflow.
2797 skb = xgbe_create_skb(pdata, napi, rdata,
2806 dma_sync_single_range_for_cpu(pdata->dev,
2807 rdata->rx.buf.dma_base,
2808 rdata->rx.buf.dma_off,
2809 rdata->rx.buf.dma_len,
2812 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2813 rdata->rx.buf.pa.pages,
2814 rdata->rx.buf.pa.pages_offset,
2816 rdata->rx.buf.dma_len);
2817 rdata->rx.buf.pa.pages = NULL;
2822 if (!last || context_next)
2825 if (!skb || error) {
2830 /* Be sure we don't exceed the configured MTU */
2831 max_len = netdev->mtu + ETH_HLEN;
2832 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2833 (skb->protocol == htons(ETH_P_8021Q)))
2834 max_len += VLAN_HLEN;
2836 if (skb->len > max_len) {
2837 netif_err(pdata, rx_err, netdev,
2838 "packet length exceeds configured MTU\n");
2843 if (netif_msg_pktdata(pdata))
2844 xgbe_print_pkt(netdev, skb, false);
2846 skb_checksum_none_assert(skb);
2847 if (XGMAC_GET_BITS(packet->attributes,
2848 RX_PACKET_ATTRIBUTES, CSUM_DONE))
2849 skb->ip_summed = CHECKSUM_UNNECESSARY;
2851 if (XGMAC_GET_BITS(packet->attributes,
2852 RX_PACKET_ATTRIBUTES, TNP)) {
2853 skb->encapsulation = 1;
2855 if (XGMAC_GET_BITS(packet->attributes,
2856 RX_PACKET_ATTRIBUTES, TNPCSUM_DONE))
2857 skb->csum_level = 1;
2860 if (XGMAC_GET_BITS(packet->attributes,
2861 RX_PACKET_ATTRIBUTES, VLAN_CTAG))
2862 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2865 if (XGMAC_GET_BITS(packet->attributes,
2866 RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
2869 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
2871 hwtstamps = skb_hwtstamps(skb);
2872 hwtstamps->hwtstamp = ns_to_ktime(nsec);
2875 if (XGMAC_GET_BITS(packet->attributes,
2876 RX_PACKET_ATTRIBUTES, RSS_HASH))
2877 skb_set_hash(skb, packet->rss_hash,
2878 packet->rss_hash_type);
2881 skb->protocol = eth_type_trans(skb, netdev);
2882 skb_record_rx_queue(skb, channel->queue_index);
2884 napi_gro_receive(napi, skb);
2890 /* Check if we need to save state before leaving */
2891 if (received && (!last || context_next)) {
2892 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2893 rdata->state_saved = 1;
2894 rdata->state.skb = skb;
2895 rdata->state.len = len;
2896 rdata->state.error = error;
2899 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
2901 return packet_count;
2904 static int xgbe_one_poll(struct napi_struct *napi, int budget)
2906 struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
2908 struct xgbe_prv_data *pdata = channel->pdata;
2911 DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
2913 /* Cleanup Tx ring first */
2914 xgbe_tx_poll(channel);
2916 /* Process Rx ring next */
2917 processed = xgbe_rx_poll(channel, budget);
2919 /* If we processed everything, we are done */
2920 if ((processed < budget) && napi_complete_done(napi, processed)) {
2921 /* Enable Tx and Rx interrupts */
2922 if (pdata->channel_irq_mode)
2923 xgbe_enable_rx_tx_int(pdata, channel);
2925 enable_irq(channel->dma_irq);
2928 DBGPR("<--xgbe_one_poll: received = %d\n", processed);
2933 static int xgbe_all_poll(struct napi_struct *napi, int budget)
2935 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
2937 struct xgbe_channel *channel;
2939 int processed, last_processed;
2942 DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
2945 ring_budget = budget / pdata->rx_ring_count;
2947 last_processed = processed;
2949 for (i = 0; i < pdata->channel_count; i++) {
2950 channel = pdata->channel[i];
2952 /* Cleanup Tx ring first */
2953 xgbe_tx_poll(channel);
2955 /* Process Rx ring next */
2956 if (ring_budget > (budget - processed))
2957 ring_budget = budget - processed;
2958 processed += xgbe_rx_poll(channel, ring_budget);
2960 } while ((processed < budget) && (processed != last_processed));
2962 /* If we processed everything, we are done */
2963 if ((processed < budget) && napi_complete_done(napi, processed)) {
2964 /* Enable Tx and Rx interrupts */
2965 xgbe_enable_rx_tx_ints(pdata);
2968 DBGPR("<--xgbe_all_poll: received = %d\n", processed);
2973 void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2974 unsigned int idx, unsigned int count, unsigned int flag)
2976 struct xgbe_ring_data *rdata;
2977 struct xgbe_ring_desc *rdesc;
2980 rdata = XGBE_GET_DESC_DATA(ring, idx);
2981 rdesc = rdata->rdesc;
2982 netdev_dbg(pdata->netdev,
2983 "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
2984 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2985 le32_to_cpu(rdesc->desc0),
2986 le32_to_cpu(rdesc->desc1),
2987 le32_to_cpu(rdesc->desc2),
2988 le32_to_cpu(rdesc->desc3));
2993 void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2996 struct xgbe_ring_data *rdata;
2997 struct xgbe_ring_desc *rdesc;
2999 rdata = XGBE_GET_DESC_DATA(ring, idx);
3000 rdesc = rdata->rdesc;
3001 netdev_dbg(pdata->netdev,
3002 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
3003 idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
3004 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
3007 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
3009 struct ethhdr *eth = (struct ethhdr *)skb->data;
3010 unsigned char buffer[128];
3013 netdev_dbg(netdev, "\n************** SKB dump ****************\n");
3015 netdev_dbg(netdev, "%s packet of %d bytes\n",
3016 (tx_rx ? "TX" : "RX"), skb->len);
3018 netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
3019 netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
3020 netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
3022 for (i = 0; i < skb->len; i += 32) {
3023 unsigned int len = min(skb->len - i, 32U);
3025 hex_dump_to_buffer(&skb->data[i], len, 32, 1,
3026 buffer, sizeof(buffer), false);
3027 netdev_dbg(netdev, " %#06x: %s\n", i, buffer);
3030 netdev_dbg(netdev, "\n************** SKB dump ****************\n");