1 /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
3 * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
5 * This program is dual-licensed; you may select either version 2 of
6 * the GNU General Public License ("GPL") or BSD license ("BSD").
8 * This Synopsys DWC XLGMAC software driver and associated documentation
9 * (hereinafter the "Software") is an unsupported proprietary work of
10 * Synopsys, Inc. unless otherwise expressly agreed to in writing between
11 * Synopsys and you. The Software IS NOT an item of Licensed Software or a
12 * Licensed Product under any End User Software License Agreement or
13 * Agreement for Licensed Products with Synopsys or any supplement thereto.
14 * Synopsys is a registered trademark of Synopsys, Inc. Other names included
15 * in the SOFTWARE may be the trademarks of their respective owners.
18 #include <linux/netdevice.h>
19 #include <linux/tcp.h>
20 #include <linux/interrupt.h>
22 #include "dwc-xlgmac.h"
23 #include "dwc-xlgmac-reg.h"
25 static int xlgmac_one_poll(struct napi_struct *, int);
26 static int xlgmac_all_poll(struct napi_struct *, int);
28 static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
30 return (ring->dma_desc_count - (ring->cur - ring->dirty));
33 static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
35 return (ring->cur - ring->dirty);
38 static int xlgmac_maybe_stop_tx_queue(
39 struct xlgmac_channel *channel,
40 struct xlgmac_ring *ring,
43 struct xlgmac_pdata *pdata = channel->pdata;
45 if (count > xlgmac_tx_avail_desc(ring)) {
46 netif_info(pdata, drv, pdata->netdev,
47 "Tx queue stopped, not enough descriptors available\n");
48 netif_stop_subqueue(pdata->netdev, channel->queue_index);
49 ring->tx.queue_stopped = 1;
51 /* If we haven't notified the hardware because of xmit_more
52 * support, tell it now
54 if (ring->tx.xmit_more)
55 pdata->hw_ops.tx_start_xmit(channel, ring);
57 return NETDEV_TX_BUSY;
63 static void xlgmac_prep_vlan(struct sk_buff *skb,
64 struct xlgmac_pkt_info *pkt_info)
66 if (skb_vlan_tag_present(skb))
67 pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
70 static int xlgmac_prep_tso(struct sk_buff *skb,
71 struct xlgmac_pkt_info *pkt_info)
75 if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
76 TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
77 TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
80 ret = skb_cow_head(skb, 0);
84 pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
85 pkt_info->tcp_header_len = tcp_hdrlen(skb);
86 pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
87 pkt_info->mss = skb_shinfo(skb)->gso_size;
89 XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
90 XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
91 pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
92 XLGMAC_PR("mss=%u\n", pkt_info->mss);
94 /* Update the number of packets that will ultimately be transmitted
95 * along with the extra bytes for each extra packet
97 pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
98 pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
103 static int xlgmac_is_tso(struct sk_buff *skb)
105 if (skb->ip_summed != CHECKSUM_PARTIAL)
108 if (!skb_is_gso(skb))
114 static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
115 struct xlgmac_ring *ring,
117 struct xlgmac_pkt_info *pkt_info)
119 struct skb_frag_struct *frag;
120 unsigned int context_desc;
127 pkt_info->desc_count = 0;
129 pkt_info->tx_packets = 1;
130 pkt_info->tx_bytes = skb->len;
132 if (xlgmac_is_tso(skb)) {
133 /* TSO requires an extra descriptor if mss is different */
134 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
136 pkt_info->desc_count++;
139 /* TSO requires an extra descriptor for TSO header */
140 pkt_info->desc_count++;
142 pkt_info->attributes = XLGMAC_SET_REG_BITS(
143 pkt_info->attributes,
144 TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
145 TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
147 pkt_info->attributes = XLGMAC_SET_REG_BITS(
148 pkt_info->attributes,
149 TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
150 TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
152 } else if (skb->ip_summed == CHECKSUM_PARTIAL)
153 pkt_info->attributes = XLGMAC_SET_REG_BITS(
154 pkt_info->attributes,
155 TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
156 TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
159 if (skb_vlan_tag_present(skb)) {
160 /* VLAN requires an extra descriptor if tag is different */
161 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
162 /* We can share with the TSO context descriptor */
165 pkt_info->desc_count++;
168 pkt_info->attributes = XLGMAC_SET_REG_BITS(
169 pkt_info->attributes,
170 TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
171 TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
175 for (len = skb_headlen(skb); len;) {
176 pkt_info->desc_count++;
177 len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
180 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
181 frag = &skb_shinfo(skb)->frags[i];
182 for (len = skb_frag_size(frag); len; ) {
183 pkt_info->desc_count++;
184 len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
189 static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
191 unsigned int rx_buf_size;
193 if (mtu > XLGMAC_JUMBO_PACKET_MTU) {
194 netdev_alert(netdev, "MTU exceeds maximum supported value\n");
198 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
199 rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
201 rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) &
202 ~(XLGMAC_RX_BUF_ALIGN - 1);
207 static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata)
209 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
210 struct xlgmac_channel *channel;
211 enum xlgmac_int int_id;
214 channel = pdata->channel_head;
215 for (i = 0; i < pdata->channel_count; i++, channel++) {
216 if (channel->tx_ring && channel->rx_ring)
217 int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
218 else if (channel->tx_ring)
219 int_id = XLGMAC_INT_DMA_CH_SR_TI;
220 else if (channel->rx_ring)
221 int_id = XLGMAC_INT_DMA_CH_SR_RI;
225 hw_ops->enable_int(channel, int_id);
229 static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata)
231 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
232 struct xlgmac_channel *channel;
233 enum xlgmac_int int_id;
236 channel = pdata->channel_head;
237 for (i = 0; i < pdata->channel_count; i++, channel++) {
238 if (channel->tx_ring && channel->rx_ring)
239 int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
240 else if (channel->tx_ring)
241 int_id = XLGMAC_INT_DMA_CH_SR_TI;
242 else if (channel->rx_ring)
243 int_id = XLGMAC_INT_DMA_CH_SR_RI;
247 hw_ops->disable_int(channel, int_id);
251 static irqreturn_t xlgmac_isr(int irq, void *data)
253 unsigned int dma_isr, dma_ch_isr, mac_isr;
254 struct xlgmac_pdata *pdata = data;
255 struct xlgmac_channel *channel;
256 struct xlgmac_hw_ops *hw_ops;
257 unsigned int i, ti, ri;
259 hw_ops = &pdata->hw_ops;
261 /* The DMA interrupt status register also reports MAC and MTL
262 * interrupts. So for polling mode, we just need to check for
263 * this register to be non-zero
265 dma_isr = readl(pdata->mac_regs + DMA_ISR);
269 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
271 for (i = 0; i < pdata->channel_count; i++) {
272 if (!(dma_isr & (1 << i)))
275 channel = pdata->channel_head + i;
277 dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
278 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
281 /* The TI or RI interrupt bits may still be set even if using
282 * per channel DMA interrupts. Check to be sure those are not
283 * enabled before using the private data napi structure.
285 ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
287 ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
289 if (!pdata->per_channel_irq && (ti || ri)) {
290 if (napi_schedule_prep(&pdata->napi)) {
291 /* Disable Tx and Rx interrupts */
292 xlgmac_disable_rx_tx_ints(pdata);
294 pdata->stats.napi_poll_isr++;
295 /* Turn on polling */
296 __napi_schedule_irqoff(&pdata->napi);
300 if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS,
302 pdata->stats.tx_process_stopped++;
304 if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS,
306 pdata->stats.rx_process_stopped++;
308 if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS,
310 pdata->stats.tx_buffer_unavailable++;
312 if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
314 pdata->stats.rx_buffer_unavailable++;
316 /* Restart the device on a Fatal Bus Error */
317 if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
318 DMA_CH_SR_FBE_LEN)) {
319 pdata->stats.fatal_bus_error++;
320 schedule_work(&pdata->restart_work);
323 /* Clear all interrupt signals */
324 writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
327 if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
328 DMA_ISR_MACIS_LEN)) {
329 mac_isr = readl(pdata->mac_regs + MAC_ISR);
331 if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
332 MAC_ISR_MMCTXIS_LEN))
333 hw_ops->tx_mmc_int(pdata);
335 if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
336 MAC_ISR_MMCRXIS_LEN))
337 hw_ops->rx_mmc_int(pdata);
343 static irqreturn_t xlgmac_dma_isr(int irq, void *data)
345 struct xlgmac_channel *channel = data;
347 /* Per channel DMA interrupts are enabled, so we use the per
348 * channel napi structure and not the private data napi structure
350 if (napi_schedule_prep(&channel->napi)) {
351 /* Disable Tx and Rx interrupts */
352 disable_irq_nosync(channel->dma_irq);
354 /* Turn on polling */
355 __napi_schedule_irqoff(&channel->napi);
361 static void xlgmac_tx_timer(unsigned long data)
363 struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
364 struct xlgmac_pdata *pdata = channel->pdata;
365 struct napi_struct *napi;
367 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
369 if (napi_schedule_prep(napi)) {
370 /* Disable Tx and Rx interrupts */
371 if (pdata->per_channel_irq)
372 disable_irq_nosync(channel->dma_irq);
374 xlgmac_disable_rx_tx_ints(pdata);
376 pdata->stats.napi_poll_txtimer++;
377 /* Turn on polling */
378 __napi_schedule(napi);
381 channel->tx_timer_active = 0;
384 static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
386 struct xlgmac_channel *channel;
389 channel = pdata->channel_head;
390 for (i = 0; i < pdata->channel_count; i++, channel++) {
391 if (!channel->tx_ring)
394 setup_timer(&channel->tx_timer, xlgmac_tx_timer,
395 (unsigned long)channel);
399 static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
401 struct xlgmac_channel *channel;
404 channel = pdata->channel_head;
405 for (i = 0; i < pdata->channel_count; i++, channel++) {
406 if (!channel->tx_ring)
409 del_timer_sync(&channel->tx_timer);
413 static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
415 struct xlgmac_channel *channel;
418 if (pdata->per_channel_irq) {
419 channel = pdata->channel_head;
420 for (i = 0; i < pdata->channel_count; i++, channel++) {
422 netif_napi_add(pdata->netdev, &channel->napi,
426 napi_enable(&channel->napi);
430 netif_napi_add(pdata->netdev, &pdata->napi,
431 xlgmac_all_poll, NAPI_POLL_WEIGHT);
433 napi_enable(&pdata->napi);
437 static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del)
439 struct xlgmac_channel *channel;
442 if (pdata->per_channel_irq) {
443 channel = pdata->channel_head;
444 for (i = 0; i < pdata->channel_count; i++, channel++) {
445 napi_disable(&channel->napi);
448 netif_napi_del(&channel->napi);
451 napi_disable(&pdata->napi);
454 netif_napi_del(&pdata->napi);
458 static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
460 struct net_device *netdev = pdata->netdev;
461 struct xlgmac_channel *channel;
465 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
466 IRQF_SHARED, netdev->name, pdata);
468 netdev_alert(netdev, "error requesting irq %d\n",
473 if (!pdata->per_channel_irq)
476 channel = pdata->channel_head;
477 for (i = 0; i < pdata->channel_count; i++, channel++) {
478 snprintf(channel->dma_irq_name,
479 sizeof(channel->dma_irq_name) - 1,
480 "%s-TxRx-%u", netdev_name(netdev),
481 channel->queue_index);
483 ret = devm_request_irq(pdata->dev, channel->dma_irq,
485 channel->dma_irq_name, channel);
487 netdev_alert(netdev, "error requesting irq %d\n",
496 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
497 for (i--, channel--; i < pdata->channel_count; i--, channel--)
498 devm_free_irq(pdata->dev, channel->dma_irq, channel);
500 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
505 static void xlgmac_free_irqs(struct xlgmac_pdata *pdata)
507 struct xlgmac_channel *channel;
510 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
512 if (!pdata->per_channel_irq)
515 channel = pdata->channel_head;
516 for (i = 0; i < pdata->channel_count; i++, channel++)
517 devm_free_irq(pdata->dev, channel->dma_irq, channel);
520 static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata)
522 struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
523 struct xlgmac_desc_data *desc_data;
524 struct xlgmac_channel *channel;
525 struct xlgmac_ring *ring;
528 channel = pdata->channel_head;
529 for (i = 0; i < pdata->channel_count; i++, channel++) {
530 ring = channel->tx_ring;
534 for (j = 0; j < ring->dma_desc_count; j++) {
535 desc_data = XLGMAC_GET_DESC_DATA(ring, j);
536 desc_ops->unmap_desc_data(pdata, desc_data);
541 static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata)
543 struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
544 struct xlgmac_desc_data *desc_data;
545 struct xlgmac_channel *channel;
546 struct xlgmac_ring *ring;
549 channel = pdata->channel_head;
550 for (i = 0; i < pdata->channel_count; i++, channel++) {
551 ring = channel->rx_ring;
555 for (j = 0; j < ring->dma_desc_count; j++) {
556 desc_data = XLGMAC_GET_DESC_DATA(ring, j);
557 desc_ops->unmap_desc_data(pdata, desc_data);
562 static int xlgmac_start(struct xlgmac_pdata *pdata)
564 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
565 struct net_device *netdev = pdata->netdev;
569 xlgmac_napi_enable(pdata, 1);
571 ret = xlgmac_request_irqs(pdata);
575 hw_ops->enable_tx(pdata);
576 hw_ops->enable_rx(pdata);
577 netif_tx_start_all_queues(netdev);
582 xlgmac_napi_disable(pdata, 1);
588 static void xlgmac_stop(struct xlgmac_pdata *pdata)
590 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
591 struct net_device *netdev = pdata->netdev;
592 struct xlgmac_channel *channel;
593 struct netdev_queue *txq;
596 netif_tx_stop_all_queues(netdev);
597 xlgmac_stop_timers(pdata);
598 hw_ops->disable_tx(pdata);
599 hw_ops->disable_rx(pdata);
600 xlgmac_free_irqs(pdata);
601 xlgmac_napi_disable(pdata, 1);
604 channel = pdata->channel_head;
605 for (i = 0; i < pdata->channel_count; i++, channel++) {
606 if (!channel->tx_ring)
609 txq = netdev_get_tx_queue(netdev, channel->queue_index);
610 netdev_tx_reset_queue(txq);
614 static void xlgmac_restart_dev(struct xlgmac_pdata *pdata)
616 /* If not running, "restart" will happen on open */
617 if (!netif_running(pdata->netdev))
622 xlgmac_free_tx_data(pdata);
623 xlgmac_free_rx_data(pdata);
628 static void xlgmac_restart(struct work_struct *work)
630 struct xlgmac_pdata *pdata = container_of(work,
636 xlgmac_restart_dev(pdata);
641 static int xlgmac_open(struct net_device *netdev)
643 struct xlgmac_pdata *pdata = netdev_priv(netdev);
644 struct xlgmac_desc_ops *desc_ops;
647 desc_ops = &pdata->desc_ops;
649 /* TODO: Initialize the phy */
651 /* Calculate the Rx buffer size before allocating rings */
652 ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu);
655 pdata->rx_buf_size = ret;
657 /* Allocate the channels and rings */
658 ret = desc_ops->alloc_channles_and_rings(pdata);
662 INIT_WORK(&pdata->restart_work, xlgmac_restart);
663 xlgmac_init_timers(pdata);
665 ret = xlgmac_start(pdata);
667 goto err_channels_and_rings;
671 err_channels_and_rings:
672 desc_ops->free_channels_and_rings(pdata);
677 static int xlgmac_close(struct net_device *netdev)
679 struct xlgmac_pdata *pdata = netdev_priv(netdev);
680 struct xlgmac_desc_ops *desc_ops;
682 desc_ops = &pdata->desc_ops;
684 /* Stop the device */
687 /* Free the channels and rings */
688 desc_ops->free_channels_and_rings(pdata);
693 static void xlgmac_tx_timeout(struct net_device *netdev)
695 struct xlgmac_pdata *pdata = netdev_priv(netdev);
697 netdev_warn(netdev, "tx timeout, device restarting\n");
698 schedule_work(&pdata->restart_work);
701 static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
703 struct xlgmac_pdata *pdata = netdev_priv(netdev);
704 struct xlgmac_pkt_info *tx_pkt_info;
705 struct xlgmac_desc_ops *desc_ops;
706 struct xlgmac_channel *channel;
707 struct xlgmac_hw_ops *hw_ops;
708 struct netdev_queue *txq;
709 struct xlgmac_ring *ring;
712 desc_ops = &pdata->desc_ops;
713 hw_ops = &pdata->hw_ops;
715 XLGMAC_PR("skb->len = %d\n", skb->len);
717 channel = pdata->channel_head + skb->queue_mapping;
718 txq = netdev_get_tx_queue(netdev, channel->queue_index);
719 ring = channel->tx_ring;
720 tx_pkt_info = &ring->pkt_info;
723 netif_err(pdata, tx_err, netdev,
724 "empty skb received from stack\n");
725 dev_kfree_skb_any(skb);
729 /* Prepare preliminary packet info for TX */
730 memset(tx_pkt_info, 0, sizeof(*tx_pkt_info));
731 xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
733 /* Check that there are enough descriptors available */
734 ret = xlgmac_maybe_stop_tx_queue(channel, ring,
735 tx_pkt_info->desc_count);
739 ret = xlgmac_prep_tso(skb, tx_pkt_info);
741 netif_err(pdata, tx_err, netdev,
742 "error processing TSO packet\n");
743 dev_kfree_skb_any(skb);
746 xlgmac_prep_vlan(skb, tx_pkt_info);
748 if (!desc_ops->map_tx_skb(channel, skb)) {
749 dev_kfree_skb_any(skb);
753 /* Report on the actual number of bytes (to be) sent */
754 netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes);
756 /* Configure required descriptor fields for transmission */
757 hw_ops->dev_xmit(channel);
759 if (netif_msg_pktdata(pdata))
760 xlgmac_print_pkt(netdev, skb, true);
762 /* Stop the queue in advance if there may not be enough descriptors */
763 xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
768 static void xlgmac_get_stats64(struct net_device *netdev,
769 struct rtnl_link_stats64 *s)
771 struct xlgmac_pdata *pdata = netdev_priv(netdev);
772 struct xlgmac_stats *pstats = &pdata->stats;
774 pdata->hw_ops.read_mmc_stats(pdata);
776 s->rx_packets = pstats->rxframecount_gb;
777 s->rx_bytes = pstats->rxoctetcount_gb;
778 s->rx_errors = pstats->rxframecount_gb -
779 pstats->rxbroadcastframes_g -
780 pstats->rxmulticastframes_g -
781 pstats->rxunicastframes_g;
782 s->multicast = pstats->rxmulticastframes_g;
783 s->rx_length_errors = pstats->rxlengtherror;
784 s->rx_crc_errors = pstats->rxcrcerror;
785 s->rx_fifo_errors = pstats->rxfifooverflow;
787 s->tx_packets = pstats->txframecount_gb;
788 s->tx_bytes = pstats->txoctetcount_gb;
789 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
790 s->tx_dropped = netdev->stats.tx_dropped;
793 static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
795 struct xlgmac_pdata *pdata = netdev_priv(netdev);
796 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
797 struct sockaddr *saddr = addr;
799 if (!is_valid_ether_addr(saddr->sa_data))
800 return -EADDRNOTAVAIL;
802 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
804 hw_ops->set_mac_address(pdata, netdev->dev_addr);
809 static int xlgmac_ioctl(struct net_device *netdev,
810 struct ifreq *ifreq, int cmd)
812 if (!netif_running(netdev))
818 static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
820 struct xlgmac_pdata *pdata = netdev_priv(netdev);
823 ret = xlgmac_calc_rx_buf_size(netdev, mtu);
827 pdata->rx_buf_size = ret;
830 xlgmac_restart_dev(pdata);
835 static int xlgmac_vlan_rx_add_vid(struct net_device *netdev,
839 struct xlgmac_pdata *pdata = netdev_priv(netdev);
840 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
842 set_bit(vid, pdata->active_vlans);
843 hw_ops->update_vlan_hash_table(pdata);
848 static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev,
852 struct xlgmac_pdata *pdata = netdev_priv(netdev);
853 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
855 clear_bit(vid, pdata->active_vlans);
856 hw_ops->update_vlan_hash_table(pdata);
861 #ifdef CONFIG_NET_POLL_CONTROLLER
862 static void xlgmac_poll_controller(struct net_device *netdev)
864 struct xlgmac_pdata *pdata = netdev_priv(netdev);
865 struct xlgmac_channel *channel;
868 if (pdata->per_channel_irq) {
869 channel = pdata->channel_head;
870 for (i = 0; i < pdata->channel_count; i++, channel++)
871 xlgmac_dma_isr(channel->dma_irq, channel);
873 disable_irq(pdata->dev_irq);
874 xlgmac_isr(pdata->dev_irq, pdata);
875 enable_irq(pdata->dev_irq);
878 #endif /* CONFIG_NET_POLL_CONTROLLER */
880 static int xlgmac_set_features(struct net_device *netdev,
881 netdev_features_t features)
883 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
884 struct xlgmac_pdata *pdata = netdev_priv(netdev);
885 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
888 rxhash = pdata->netdev_features & NETIF_F_RXHASH;
889 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
890 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
891 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
893 if ((features & NETIF_F_RXHASH) && !rxhash)
894 ret = hw_ops->enable_rss(pdata);
895 else if (!(features & NETIF_F_RXHASH) && rxhash)
896 ret = hw_ops->disable_rss(pdata);
900 if ((features & NETIF_F_RXCSUM) && !rxcsum)
901 hw_ops->enable_rx_csum(pdata);
902 else if (!(features & NETIF_F_RXCSUM) && rxcsum)
903 hw_ops->disable_rx_csum(pdata);
905 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
906 hw_ops->enable_rx_vlan_stripping(pdata);
907 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
908 hw_ops->disable_rx_vlan_stripping(pdata);
910 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
911 hw_ops->enable_rx_vlan_filtering(pdata);
912 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
913 hw_ops->disable_rx_vlan_filtering(pdata);
915 pdata->netdev_features = features;
920 static void xlgmac_set_rx_mode(struct net_device *netdev)
922 struct xlgmac_pdata *pdata = netdev_priv(netdev);
923 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
925 hw_ops->config_rx_mode(pdata);
928 static const struct net_device_ops xlgmac_netdev_ops = {
929 .ndo_open = xlgmac_open,
930 .ndo_stop = xlgmac_close,
931 .ndo_start_xmit = xlgmac_xmit,
932 .ndo_tx_timeout = xlgmac_tx_timeout,
933 .ndo_get_stats64 = xlgmac_get_stats64,
934 .ndo_change_mtu = xlgmac_change_mtu,
935 .ndo_set_mac_address = xlgmac_set_mac_address,
936 .ndo_validate_addr = eth_validate_addr,
937 .ndo_do_ioctl = xlgmac_ioctl,
938 .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid,
939 .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid,
940 #ifdef CONFIG_NET_POLL_CONTROLLER
941 .ndo_poll_controller = xlgmac_poll_controller,
943 .ndo_set_features = xlgmac_set_features,
944 .ndo_set_rx_mode = xlgmac_set_rx_mode,
947 const struct net_device_ops *xlgmac_get_netdev_ops(void)
949 return &xlgmac_netdev_ops;
952 static void xlgmac_rx_refresh(struct xlgmac_channel *channel)
954 struct xlgmac_pdata *pdata = channel->pdata;
955 struct xlgmac_ring *ring = channel->rx_ring;
956 struct xlgmac_desc_data *desc_data;
957 struct xlgmac_desc_ops *desc_ops;
958 struct xlgmac_hw_ops *hw_ops;
960 desc_ops = &pdata->desc_ops;
961 hw_ops = &pdata->hw_ops;
963 while (ring->dirty != ring->cur) {
964 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
966 /* Reset desc_data values */
967 desc_ops->unmap_desc_data(pdata, desc_data);
969 if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
972 hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
977 /* Make sure everything is written before the register write */
980 /* Update the Rx Tail Pointer Register with address of
981 * the last cleaned entry
983 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
984 writel(lower_32_bits(desc_data->dma_desc_addr),
985 XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
988 static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata,
989 struct napi_struct *napi,
990 struct xlgmac_desc_data *desc_data,
993 unsigned int copy_len;
997 skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
1001 /* Start with the header buffer which may contain just the header
1002 * or the header plus data
1004 dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
1005 desc_data->rx.hdr.dma_off,
1006 desc_data->rx.hdr.dma_len,
1009 packet = page_address(desc_data->rx.hdr.pa.pages) +
1010 desc_data->rx.hdr.pa.pages_offset;
1011 copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len;
1012 copy_len = min(desc_data->rx.hdr.dma_len, copy_len);
1013 skb_copy_to_linear_data(skb, packet, copy_len);
1014 skb_put(skb, copy_len);
1018 /* Add the remaining data as a frag */
1019 dma_sync_single_range_for_cpu(pdata->dev,
1020 desc_data->rx.buf.dma_base,
1021 desc_data->rx.buf.dma_off,
1022 desc_data->rx.buf.dma_len,
1025 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1026 desc_data->rx.buf.pa.pages,
1027 desc_data->rx.buf.pa.pages_offset,
1028 len, desc_data->rx.buf.dma_len);
1029 desc_data->rx.buf.pa.pages = NULL;
1035 static int xlgmac_tx_poll(struct xlgmac_channel *channel)
1037 struct xlgmac_pdata *pdata = channel->pdata;
1038 struct xlgmac_ring *ring = channel->tx_ring;
1039 struct net_device *netdev = pdata->netdev;
1040 unsigned int tx_packets = 0, tx_bytes = 0;
1041 struct xlgmac_desc_data *desc_data;
1042 struct xlgmac_dma_desc *dma_desc;
1043 struct xlgmac_desc_ops *desc_ops;
1044 struct xlgmac_hw_ops *hw_ops;
1045 struct netdev_queue *txq;
1049 desc_ops = &pdata->desc_ops;
1050 hw_ops = &pdata->hw_ops;
1052 /* Nothing to do if there isn't a Tx ring for this channel */
1058 /* Be sure we get ring->cur before accessing descriptor data */
1061 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1063 while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
1064 (ring->dirty != cur)) {
1065 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
1066 dma_desc = desc_data->dma_desc;
1068 if (!hw_ops->tx_complete(dma_desc))
1071 /* Make sure descriptor fields are read after reading
1076 if (netif_msg_tx_done(pdata))
1077 xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
1079 if (hw_ops->is_last_desc(dma_desc)) {
1080 tx_packets += desc_data->tx.packets;
1081 tx_bytes += desc_data->tx.bytes;
1084 /* Free the SKB and reset the descriptor for re-use */
1085 desc_ops->unmap_desc_data(pdata, desc_data);
1086 hw_ops->tx_desc_reset(desc_data);
1095 netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
1097 if ((ring->tx.queue_stopped == 1) &&
1098 (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
1099 ring->tx.queue_stopped = 0;
1100 netif_tx_wake_queue(txq);
1103 XLGMAC_PR("processed=%d\n", processed);
1108 static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
1110 struct xlgmac_pdata *pdata = channel->pdata;
1111 struct xlgmac_ring *ring = channel->rx_ring;
1112 struct net_device *netdev = pdata->netdev;
1113 unsigned int len, dma_desc_len, max_len;
1114 unsigned int context_next, context;
1115 struct xlgmac_desc_data *desc_data;
1116 struct xlgmac_pkt_info *pkt_info;
1117 unsigned int incomplete, error;
1118 struct xlgmac_hw_ops *hw_ops;
1119 unsigned int received = 0;
1120 struct napi_struct *napi;
1121 struct sk_buff *skb;
1122 int packet_count = 0;
1124 hw_ops = &pdata->hw_ops;
1126 /* Nothing to do if there isn't a Rx ring for this channel */
1133 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
1135 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1136 pkt_info = &ring->pkt_info;
1137 while (packet_count < budget) {
1138 /* First time in loop see if we need to restore state */
1139 if (!received && desc_data->state_saved) {
1140 skb = desc_data->state.skb;
1141 error = desc_data->state.error;
1142 len = desc_data->state.len;
1144 memset(pkt_info, 0, sizeof(*pkt_info));
1151 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1153 if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
1154 xlgmac_rx_refresh(channel);
1156 if (hw_ops->dev_read(channel))
1162 incomplete = XLGMAC_GET_REG_BITS(
1163 pkt_info->attributes,
1164 RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
1165 RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
1166 context_next = XLGMAC_GET_REG_BITS(
1167 pkt_info->attributes,
1168 RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
1169 RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
1170 context = XLGMAC_GET_REG_BITS(
1171 pkt_info->attributes,
1172 RX_PACKET_ATTRIBUTES_CONTEXT_POS,
1173 RX_PACKET_ATTRIBUTES_CONTEXT_LEN);
1175 /* Earlier error, just drain the remaining data */
1176 if ((incomplete || context_next) && error)
1179 if (error || pkt_info->errors) {
1180 if (pkt_info->errors)
1181 netif_err(pdata, rx_err, netdev,
1182 "error in received packet\n");
1188 /* Length is cumulative, get this descriptor's length */
1189 dma_desc_len = desc_data->rx.len - len;
1190 len += dma_desc_len;
1192 if (dma_desc_len && !skb) {
1193 skb = xlgmac_create_skb(pdata, napi, desc_data,
1197 } else if (dma_desc_len) {
1198 dma_sync_single_range_for_cpu(
1200 desc_data->rx.buf.dma_base,
1201 desc_data->rx.buf.dma_off,
1202 desc_data->rx.buf.dma_len,
1206 skb, skb_shinfo(skb)->nr_frags,
1207 desc_data->rx.buf.pa.pages,
1208 desc_data->rx.buf.pa.pages_offset,
1210 desc_data->rx.buf.dma_len);
1211 desc_data->rx.buf.pa.pages = NULL;
1215 if (incomplete || context_next)
1221 /* Be sure we don't exceed the configured MTU */
1222 max_len = netdev->mtu + ETH_HLEN;
1223 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1224 (skb->protocol == htons(ETH_P_8021Q)))
1225 max_len += VLAN_HLEN;
1227 if (skb->len > max_len) {
1228 netif_err(pdata, rx_err, netdev,
1229 "packet length exceeds configured MTU\n");
1234 if (netif_msg_pktdata(pdata))
1235 xlgmac_print_pkt(netdev, skb, false);
1237 skb_checksum_none_assert(skb);
1238 if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1239 RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
1240 RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
1241 skb->ip_summed = CHECKSUM_UNNECESSARY;
1243 if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1244 RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
1245 RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) {
1246 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1247 pkt_info->vlan_ctag);
1248 pdata->stats.rx_vlan_packets++;
1251 if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1252 RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
1253 RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
1254 skb_set_hash(skb, pkt_info->rss_hash,
1255 pkt_info->rss_hash_type);
1258 skb->protocol = eth_type_trans(skb, netdev);
1259 skb_record_rx_queue(skb, channel->queue_index);
1261 napi_gro_receive(napi, skb);
1267 /* Check if we need to save state before leaving */
1268 if (received && (incomplete || context_next)) {
1269 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1270 desc_data->state_saved = 1;
1271 desc_data->state.skb = skb;
1272 desc_data->state.len = len;
1273 desc_data->state.error = error;
1276 XLGMAC_PR("packet_count = %d\n", packet_count);
1278 return packet_count;
1281 static int xlgmac_one_poll(struct napi_struct *napi, int budget)
1283 struct xlgmac_channel *channel = container_of(napi,
1284 struct xlgmac_channel,
1288 XLGMAC_PR("budget=%d\n", budget);
1290 /* Cleanup Tx ring first */
1291 xlgmac_tx_poll(channel);
1293 /* Process Rx ring next */
1294 processed = xlgmac_rx_poll(channel, budget);
1296 /* If we processed everything, we are done */
1297 if (processed < budget) {
1298 /* Turn off polling */
1299 napi_complete_done(napi, processed);
1301 /* Enable Tx and Rx interrupts */
1302 enable_irq(channel->dma_irq);
1305 XLGMAC_PR("received = %d\n", processed);
1310 static int xlgmac_all_poll(struct napi_struct *napi, int budget)
1312 struct xlgmac_pdata *pdata = container_of(napi,
1313 struct xlgmac_pdata,
1315 struct xlgmac_channel *channel;
1316 int processed, last_processed;
1320 XLGMAC_PR("budget=%d\n", budget);
1323 ring_budget = budget / pdata->rx_ring_count;
1325 last_processed = processed;
1327 channel = pdata->channel_head;
1328 for (i = 0; i < pdata->channel_count; i++, channel++) {
1329 /* Cleanup Tx ring first */
1330 xlgmac_tx_poll(channel);
1332 /* Process Rx ring next */
1333 if (ring_budget > (budget - processed))
1334 ring_budget = budget - processed;
1335 processed += xlgmac_rx_poll(channel, ring_budget);
1337 } while ((processed < budget) && (processed != last_processed));
1339 /* If we processed everything, we are done */
1340 if (processed < budget) {
1341 /* Turn off polling */
1342 napi_complete_done(napi, processed);
1344 /* Enable Tx and Rx interrupts */
1345 xlgmac_enable_rx_tx_ints(pdata);
1348 XLGMAC_PR("received = %d\n", processed);