2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/phy.h>
118 #include <linux/mdio.h>
119 #include <linux/clk.h>
120 #include <linux/bitrev.h>
121 #include <linux/crc32.h>
122 #include <linux/crc32poly.h>
125 #include "xgbe-common.h"
127 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
129 return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
132 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
138 DBGPR("-->xgbe_usec_to_riwt\n");
140 rate = pdata->sysclk_rate;
143 * Convert the input usec value to the watchdog timer value. Each
144 * watchdog timer value is equivalent to 256 clock cycles.
145 * Calculate the required value as:
146 * ( usec * ( system_clock_mhz / 10^6 ) / 256
148 ret = (usec * (rate / 1000000)) / 256;
150 DBGPR("<--xgbe_usec_to_riwt\n");
155 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
161 DBGPR("-->xgbe_riwt_to_usec\n");
163 rate = pdata->sysclk_rate;
166 * Convert the input watchdog timer value to the usec value. Each
167 * watchdog timer value is equivalent to 256 clock cycles.
168 * Calculate the required value as:
169 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
171 ret = (riwt * 256) / (rate / 1000000);
173 DBGPR("<--xgbe_riwt_to_usec\n");
178 static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata)
180 unsigned int pblx8, pbl;
183 pblx8 = DMA_PBL_X8_DISABLE;
186 if (pdata->pbl > 32) {
187 pblx8 = DMA_PBL_X8_ENABLE;
191 for (i = 0; i < pdata->channel_count; i++) {
192 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
195 if (pdata->channel[i]->tx_ring)
196 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR,
199 if (pdata->channel[i]->rx_ring)
200 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR,
207 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
211 for (i = 0; i < pdata->channel_count; i++) {
212 if (!pdata->channel[i]->tx_ring)
215 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
222 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
226 for (i = 0; i < pdata->rx_q_count; i++)
227 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
232 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
236 for (i = 0; i < pdata->tx_q_count; i++)
237 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
242 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
247 for (i = 0; i < pdata->rx_q_count; i++)
248 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
253 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
258 for (i = 0; i < pdata->tx_q_count; i++)
259 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
264 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
268 for (i = 0; i < pdata->channel_count; i++) {
269 if (!pdata->channel[i]->rx_ring)
272 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
279 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
284 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
288 for (i = 0; i < pdata->channel_count; i++) {
289 if (!pdata->channel[i]->rx_ring)
292 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
297 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
301 for (i = 0; i < pdata->channel_count; i++) {
302 if (!pdata->channel[i]->tx_ring)
305 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1);
309 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
313 for (i = 0; i < pdata->channel_count; i++) {
314 if (!pdata->channel[i]->rx_ring)
317 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
320 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
323 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
324 unsigned int index, unsigned int val)
329 mutex_lock(&pdata->rss_mutex);
331 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
336 XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
338 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
339 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
340 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
341 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
345 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
348 usleep_range(1000, 1500);
354 mutex_unlock(&pdata->rss_mutex);
359 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
361 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
362 unsigned int *key = (unsigned int *)&pdata->rss_key;
366 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
375 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
380 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
381 ret = xgbe_write_rss_reg(pdata,
382 XGBE_RSS_LOOKUP_TABLE_TYPE, i,
383 pdata->rss_table[i]);
391 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
393 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
395 return xgbe_write_rss_hash_key(pdata);
398 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
403 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
404 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
406 return xgbe_write_rss_lookup_table(pdata);
409 static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
413 if (!pdata->hw_feat.rss)
416 /* Program the hash key */
417 ret = xgbe_write_rss_hash_key(pdata);
421 /* Program the lookup table */
422 ret = xgbe_write_rss_lookup_table(pdata);
426 /* Set the RSS options */
427 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
430 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
435 static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
437 if (!pdata->hw_feat.rss)
440 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
445 static void xgbe_config_rss(struct xgbe_prv_data *pdata)
449 if (!pdata->hw_feat.rss)
452 if (pdata->netdev->features & NETIF_F_RXHASH)
453 ret = xgbe_enable_rss(pdata);
455 ret = xgbe_disable_rss(pdata);
458 netdev_err(pdata->netdev,
459 "error configuring RSS, RSS disabled\n");
462 static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata,
465 unsigned int prio, tc;
467 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
468 /* Does this queue handle the priority? */
469 if (pdata->prio2q_map[prio] != queue)
472 /* Get the Traffic Class for this priority */
473 tc = pdata->ets->prio_tc[prio];
475 /* Check if PFC is enabled for this traffic class */
476 if (pdata->pfc->pfc_en & (1 << tc))
483 static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata)
485 /* Program the VXLAN port */
486 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port);
488 netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n",
492 static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata)
494 if (!pdata->hw_feat.vxn)
497 /* Program the VXLAN port */
498 xgbe_set_vxlan_id(pdata);
500 /* Allow for IPv6/UDP zero-checksum VXLAN packets */
501 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1);
503 /* Enable VXLAN tunneling mode */
504 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0);
505 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1);
507 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n");
510 static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
512 if (!pdata->hw_feat.vxn)
515 /* Disable tunneling mode */
516 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0);
518 /* Clear IPv6/UDP zero-checksum VXLAN packets setting */
519 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0);
521 /* Clear the VXLAN port */
522 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0);
524 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
527 static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
529 unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
531 /* From MAC ver 30H the TFCR is per priority, instead of per queue */
532 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
535 return min_t(unsigned int, pdata->tx_q_count, max_q_count);
538 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
540 unsigned int reg, reg_val;
541 unsigned int i, q_count;
543 /* Clear MTL flow control */
544 for (i = 0; i < pdata->rx_q_count; i++)
545 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
547 /* Clear MAC flow control */
548 q_count = xgbe_get_fc_queue_count(pdata);
550 for (i = 0; i < q_count; i++) {
551 reg_val = XGMAC_IOREAD(pdata, reg);
552 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
553 XGMAC_IOWRITE(pdata, reg, reg_val);
555 reg += MAC_QTFCR_INC;
561 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
563 struct ieee_pfc *pfc = pdata->pfc;
564 struct ieee_ets *ets = pdata->ets;
565 unsigned int reg, reg_val;
566 unsigned int i, q_count;
568 /* Set MTL flow control */
569 for (i = 0; i < pdata->rx_q_count; i++) {
570 unsigned int ehfc = 0;
572 if (pdata->rx_rfd[i]) {
573 /* Flow control thresholds are established */
575 if (xgbe_is_pfc_queue(pdata, i))
582 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
584 netif_dbg(pdata, drv, pdata->netdev,
585 "flow control %s for RXq%u\n",
586 ehfc ? "enabled" : "disabled", i);
589 /* Set MAC flow control */
590 q_count = xgbe_get_fc_queue_count(pdata);
592 for (i = 0; i < q_count; i++) {
593 reg_val = XGMAC_IOREAD(pdata, reg);
595 /* Enable transmit flow control */
596 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
598 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
600 XGMAC_IOWRITE(pdata, reg, reg_val);
602 reg += MAC_QTFCR_INC;
608 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
610 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
615 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
617 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
622 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
624 struct ieee_pfc *pfc = pdata->pfc;
626 if (pdata->tx_pause || (pfc && pfc->pfc_en))
627 xgbe_enable_tx_flow_control(pdata);
629 xgbe_disable_tx_flow_control(pdata);
634 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
636 struct ieee_pfc *pfc = pdata->pfc;
638 if (pdata->rx_pause || (pfc && pfc->pfc_en))
639 xgbe_enable_rx_flow_control(pdata);
641 xgbe_disable_rx_flow_control(pdata);
646 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
648 struct ieee_pfc *pfc = pdata->pfc;
650 xgbe_config_tx_flow_control(pdata);
651 xgbe_config_rx_flow_control(pdata);
653 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
654 (pfc && pfc->pfc_en) ? 1 : 0);
657 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
659 struct xgbe_channel *channel;
662 /* Set the interrupt mode if supported */
663 if (pdata->channel_irq_mode)
664 XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
665 pdata->channel_irq_mode);
667 ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
669 for (i = 0; i < pdata->channel_count; i++) {
670 channel = pdata->channel[i];
672 /* Clear all the interrupts which are set */
673 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR,
674 XGMAC_DMA_IOREAD(channel, DMA_CH_SR));
676 /* Clear all interrupt enable bits */
677 channel->curr_ier = 0;
679 /* Enable following interrupts
680 * NIE - Normal Interrupt Summary Enable
681 * AIE - Abnormal Interrupt Summary Enable
682 * FBEE - Fatal Bus Error Enable
685 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1);
686 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1);
688 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1);
689 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1);
691 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
693 if (channel->tx_ring) {
694 /* Enable the following Tx interrupts
695 * TIE - Transmit Interrupt Enable (unless using
696 * per channel interrupts in edge triggered
699 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
700 XGMAC_SET_BITS(channel->curr_ier,
703 if (channel->rx_ring) {
704 /* Enable following Rx interrupts
705 * RBUE - Receive Buffer Unavailable Enable
706 * RIE - Receive Interrupt Enable (unless using
707 * per channel interrupts in edge triggered
710 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
711 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
712 XGMAC_SET_BITS(channel->curr_ier,
716 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
720 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
722 unsigned int mtl_q_isr;
723 unsigned int q_count, i;
725 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
726 for (i = 0; i < q_count; i++) {
727 /* Clear all the interrupts which are set */
728 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
729 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
731 /* No MTL interrupts to be enabled */
732 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
736 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
738 unsigned int mac_ier = 0;
740 /* Enable Timestamp interrupt */
741 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
743 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
745 /* Enable all counter interrupts */
746 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
747 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
749 /* Enable MDIO single command completion interrupt */
750 XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
753 static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata)
755 unsigned int ecc_isr, ecc_ier = 0;
757 if (!pdata->vdata->ecc_support)
760 /* Clear all the interrupts which are set */
761 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
762 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
764 /* Enable ECC interrupts */
765 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1);
766 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1);
767 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1);
768 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1);
769 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1);
770 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1);
772 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
775 static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata)
777 unsigned int ecc_ier;
779 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
781 /* Disable ECC DED interrupts */
782 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0);
783 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0);
784 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0);
786 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
789 static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata,
790 enum xgbe_ecc_sec sec)
792 unsigned int ecc_ier;
794 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
796 /* Disable ECC SEC interrupt */
798 case XGBE_ECC_SEC_TX:
799 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0);
801 case XGBE_ECC_SEC_RX:
802 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0);
804 case XGBE_ECC_SEC_DESC:
805 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0);
809 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
812 static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
830 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
831 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
836 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
838 /* Put the VLAN tag in the Rx descriptor */
839 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
841 /* Don't check the VLAN type */
842 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
844 /* Check only C-TAG (0x8100) packets */
845 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
847 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
848 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
850 /* Enable VLAN tag stripping */
851 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
856 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
858 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
863 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
865 /* Enable VLAN filtering */
866 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
868 /* Enable VLAN Hash Table filtering */
869 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
871 /* Disable VLAN tag inverse matching */
872 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
874 /* Only filter on the lower 12-bits of the VLAN tag */
875 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
877 /* In order for the VLAN Hash Table filtering to be effective,
878 * the VLAN tag identifier in the VLAN Tag Register must not
879 * be zero. Set the VLAN tag identifier to "1" to enable the
880 * VLAN Hash Table filtering. This implies that a VLAN tag of
881 * 1 will always pass filtering.
883 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
888 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
890 /* Disable VLAN filtering */
891 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
896 static u32 xgbe_vid_crc32_le(__le16 vid_le)
900 unsigned char *data = (unsigned char *)&vid_le;
901 unsigned char data_byte = 0;
904 bits = get_bitmask_order(VLAN_VID_MASK);
905 for (i = 0; i < bits; i++) {
907 data_byte = data[i / 8];
909 temp = ((crc & 1) ^ data_byte) & 1;
914 crc ^= CRC32_POLY_LE;
920 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
925 u16 vlan_hash_table = 0;
927 /* Generate the VLAN Hash Table value */
928 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
929 /* Get the CRC32 value of the VLAN ID */
930 vid_le = cpu_to_le16(vid);
931 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
933 vlan_hash_table |= (1 << crc);
936 /* Set the VLAN Hash Table filtering register */
937 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
942 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
945 unsigned int val = enable ? 1 : 0;
947 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
950 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
951 enable ? "entering" : "leaving");
952 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
954 /* Hardware will still perform VLAN filtering in promiscuous mode */
956 xgbe_disable_rx_vlan_filtering(pdata);
958 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
959 xgbe_enable_rx_vlan_filtering(pdata);
965 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
968 unsigned int val = enable ? 1 : 0;
970 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
973 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
974 enable ? "entering" : "leaving");
975 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
980 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
981 struct netdev_hw_addr *ha, unsigned int *mac_reg)
983 unsigned int mac_addr_hi, mac_addr_lo;
990 mac_addr = (u8 *)&mac_addr_lo;
991 mac_addr[0] = ha->addr[0];
992 mac_addr[1] = ha->addr[1];
993 mac_addr[2] = ha->addr[2];
994 mac_addr[3] = ha->addr[3];
995 mac_addr = (u8 *)&mac_addr_hi;
996 mac_addr[0] = ha->addr[4];
997 mac_addr[1] = ha->addr[5];
999 netif_dbg(pdata, drv, pdata->netdev,
1000 "adding mac address %pM at %#x\n",
1001 ha->addr, *mac_reg);
1003 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
1006 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
1007 *mac_reg += MAC_MACA_INC;
1008 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
1009 *mac_reg += MAC_MACA_INC;
1012 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
1014 struct net_device *netdev = pdata->netdev;
1015 struct netdev_hw_addr *ha;
1016 unsigned int mac_reg;
1017 unsigned int addn_macs;
1019 mac_reg = MAC_MACA1HR;
1020 addn_macs = pdata->hw_feat.addn_mac;
1022 if (netdev_uc_count(netdev) > addn_macs) {
1023 xgbe_set_promiscuous_mode(pdata, 1);
1025 netdev_for_each_uc_addr(ha, netdev) {
1026 xgbe_set_mac_reg(pdata, ha, &mac_reg);
1030 if (netdev_mc_count(netdev) > addn_macs) {
1031 xgbe_set_all_multicast_mode(pdata, 1);
1033 netdev_for_each_mc_addr(ha, netdev) {
1034 xgbe_set_mac_reg(pdata, ha, &mac_reg);
1040 /* Clear remaining additional MAC address entries */
1042 xgbe_set_mac_reg(pdata, NULL, &mac_reg);
1045 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
1047 struct net_device *netdev = pdata->netdev;
1048 struct netdev_hw_addr *ha;
1049 unsigned int hash_reg;
1050 unsigned int hash_table_shift, hash_table_count;
1051 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
1055 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
1056 hash_table_count = pdata->hw_feat.hash_table_size / 32;
1057 memset(hash_table, 0, sizeof(hash_table));
1059 /* Build the MAC Hash Table register values */
1060 netdev_for_each_uc_addr(ha, netdev) {
1061 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
1062 crc >>= hash_table_shift;
1063 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
1066 netdev_for_each_mc_addr(ha, netdev) {
1067 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
1068 crc >>= hash_table_shift;
1069 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
1072 /* Set the MAC Hash Table registers */
1073 hash_reg = MAC_HTR0;
1074 for (i = 0; i < hash_table_count; i++) {
1075 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
1076 hash_reg += MAC_HTR_INC;
1080 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
1082 if (pdata->hw_feat.hash_table_size)
1083 xgbe_set_mac_hash_table(pdata);
1085 xgbe_set_mac_addn_addrs(pdata);
1090 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
1092 unsigned int mac_addr_hi, mac_addr_lo;
1094 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
1095 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
1096 (addr[1] << 8) | (addr[0] << 0);
1098 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1099 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1104 static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
1106 struct net_device *netdev = pdata->netdev;
1107 unsigned int pr_mode, am_mode;
1109 pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
1110 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
1112 xgbe_set_promiscuous_mode(pdata, pr_mode);
1113 xgbe_set_all_multicast_mode(pdata, am_mode);
1115 xgbe_add_mac_addresses(pdata);
1120 static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1127 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1129 reg &= ~(1 << (gpio + 16));
1130 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1135 static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1142 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1144 reg |= (1 << (gpio + 16));
1145 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1150 static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1153 unsigned long flags;
1154 unsigned int mmd_address, index, offset;
1157 if (mmd_reg & MII_ADDR_C45)
1158 mmd_address = mmd_reg & ~MII_ADDR_C45;
1160 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1162 /* The PCS registers are accessed using mmio. The underlying
1163 * management interface uses indirect addressing to access the MMD
1164 * register sets. This requires accessing of the PCS register in two
1165 * phases, an address phase and a data phase.
1167 * The mmio interface is based on 16-bit offsets and values. All
1168 * register offsets must therefore be adjusted by left shifting the
1169 * offset 1 bit and reading 16 bits of data.
1172 index = mmd_address & ~pdata->xpcs_window_mask;
1173 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1175 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1176 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1177 mmd_data = XPCS16_IOREAD(pdata, offset);
1178 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1183 static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1184 int mmd_reg, int mmd_data)
1186 unsigned long flags;
1187 unsigned int mmd_address, index, offset;
1189 if (mmd_reg & MII_ADDR_C45)
1190 mmd_address = mmd_reg & ~MII_ADDR_C45;
1192 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1194 /* The PCS registers are accessed using mmio. The underlying
1195 * management interface uses indirect addressing to access the MMD
1196 * register sets. This requires accessing of the PCS register in two
1197 * phases, an address phase and a data phase.
1199 * The mmio interface is based on 16-bit offsets and values. All
1200 * register offsets must therefore be adjusted by left shifting the
1201 * offset 1 bit and writing 16 bits of data.
1204 index = mmd_address & ~pdata->xpcs_window_mask;
1205 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1207 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1208 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1209 XPCS16_IOWRITE(pdata, offset, mmd_data);
1210 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1213 static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1216 unsigned long flags;
1217 unsigned int mmd_address;
1220 if (mmd_reg & MII_ADDR_C45)
1221 mmd_address = mmd_reg & ~MII_ADDR_C45;
1223 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1225 /* The PCS registers are accessed using mmio. The underlying APB3
1226 * management interface uses indirect addressing to access the MMD
1227 * register sets. This requires accessing of the PCS register in two
1228 * phases, an address phase and a data phase.
1230 * The mmio interface is based on 32-bit offsets and values. All
1231 * register offsets must therefore be adjusted by left shifting the
1232 * offset 2 bits and reading 32 bits of data.
1234 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1235 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1236 mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
1237 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1242 static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1243 int mmd_reg, int mmd_data)
1245 unsigned int mmd_address;
1246 unsigned long flags;
1248 if (mmd_reg & MII_ADDR_C45)
1249 mmd_address = mmd_reg & ~MII_ADDR_C45;
1251 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1253 /* The PCS registers are accessed using mmio. The underlying APB3
1254 * management interface uses indirect addressing to access the MMD
1255 * register sets. This requires accessing of the PCS register in two
1256 * phases, an address phase and a data phase.
1258 * The mmio interface is based on 32-bit offsets and values. All
1259 * register offsets must therefore be adjusted by left shifting the
1260 * offset 2 bits and writing 32 bits of data.
1262 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1263 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1264 XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
1265 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1268 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1271 switch (pdata->vdata->xpcs_access) {
1272 case XGBE_XPCS_ACCESS_V1:
1273 return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg);
1275 case XGBE_XPCS_ACCESS_V2:
1277 return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
1281 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1282 int mmd_reg, int mmd_data)
1284 switch (pdata->vdata->xpcs_access) {
1285 case XGBE_XPCS_ACCESS_V1:
1286 return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
1288 case XGBE_XPCS_ACCESS_V2:
1290 return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
1294 static unsigned int xgbe_create_mdio_sca(int port, int reg)
1296 unsigned int mdio_sca, da;
1298 da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
1301 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
1302 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
1303 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
1308 static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1311 unsigned int mdio_sca, mdio_sccd;
1313 reinit_completion(&pdata->mdio_complete);
1315 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1316 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1319 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
1320 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
1321 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1322 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1324 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1325 netdev_err(pdata->netdev, "mdio write operation timed out\n");
1332 static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1335 unsigned int mdio_sca, mdio_sccd;
1337 reinit_completion(&pdata->mdio_complete);
1339 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1340 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1343 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
1344 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1345 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1347 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1348 netdev_err(pdata->netdev, "mdio read operation timed out\n");
1352 return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
1355 static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
1356 enum xgbe_mdio_mode mode)
1358 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
1361 case XGBE_MDIO_MODE_CL22:
1362 if (port > XGMAC_MAX_C22_PORT)
1364 reg_val |= (1 << port);
1366 case XGBE_MDIO_MODE_CL45:
1372 XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
1377 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
1379 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
1382 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
1384 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1389 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
1391 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1396 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
1398 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1400 /* Reset the Tx descriptor
1401 * Set buffer 1 (lo) address to zero
1402 * Set buffer 1 (hi) address to zero
1403 * Reset all other control bits (IC, TTSE, B2L & B1L)
1404 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1411 /* Make sure ownership is written to the descriptor */
1415 static void xgbe_tx_desc_init(struct xgbe_channel *channel)
1417 struct xgbe_ring *ring = channel->tx_ring;
1418 struct xgbe_ring_data *rdata;
1420 int start_index = ring->cur;
1422 DBGPR("-->tx_desc_init\n");
1424 /* Initialze all descriptors */
1425 for (i = 0; i < ring->rdesc_count; i++) {
1426 rdata = XGBE_GET_DESC_DATA(ring, i);
1428 /* Initialize Tx descriptor */
1429 xgbe_tx_desc_reset(rdata);
1432 /* Update the total number of Tx descriptors */
1433 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
1435 /* Update the starting address of descriptor ring */
1436 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1437 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
1438 upper_32_bits(rdata->rdesc_dma));
1439 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
1440 lower_32_bits(rdata->rdesc_dma));
1442 DBGPR("<--tx_desc_init\n");
1445 static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1446 struct xgbe_ring_data *rdata, unsigned int index)
1448 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1449 unsigned int rx_usecs = pdata->rx_usecs;
1450 unsigned int rx_frames = pdata->rx_frames;
1452 dma_addr_t hdr_dma, buf_dma;
1454 if (!rx_usecs && !rx_frames) {
1455 /* No coalescing, interrupt for every descriptor */
1458 /* Set interrupt based on Rx frame coalescing setting */
1459 if (rx_frames && !((index + 1) % rx_frames))
1465 /* Reset the Rx descriptor
1466 * Set buffer 1 (lo) address to header dma address (lo)
1467 * Set buffer 1 (hi) address to header dma address (hi)
1468 * Set buffer 2 (lo) address to buffer dma address (lo)
1469 * Set buffer 2 (hi) address to buffer dma address (hi) and
1470 * set control bits OWN and INTE
1472 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
1473 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
1474 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
1475 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
1476 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
1477 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
1479 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
1481 /* Since the Rx DMA engine is likely running, make sure everything
1482 * is written to the descriptor(s) before setting the OWN bit
1483 * for the descriptor
1487 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
1489 /* Make sure ownership is written to the descriptor */
1493 static void xgbe_rx_desc_init(struct xgbe_channel *channel)
1495 struct xgbe_prv_data *pdata = channel->pdata;
1496 struct xgbe_ring *ring = channel->rx_ring;
1497 struct xgbe_ring_data *rdata;
1498 unsigned int start_index = ring->cur;
1501 DBGPR("-->rx_desc_init\n");
1503 /* Initialize all descriptors */
1504 for (i = 0; i < ring->rdesc_count; i++) {
1505 rdata = XGBE_GET_DESC_DATA(ring, i);
1507 /* Initialize Rx descriptor */
1508 xgbe_rx_desc_reset(pdata, rdata, i);
1511 /* Update the total number of Rx descriptors */
1512 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
1514 /* Update the starting address of descriptor ring */
1515 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1516 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1517 upper_32_bits(rdata->rdesc_dma));
1518 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1519 lower_32_bits(rdata->rdesc_dma));
1521 /* Update the Rx Descriptor Tail Pointer */
1522 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
1523 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1524 lower_32_bits(rdata->rdesc_dma));
1526 DBGPR("<--rx_desc_init\n");
1529 static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
1530 unsigned int addend)
1532 unsigned int count = 10000;
1534 /* Set the addend register value and tell the device */
1535 XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1536 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1538 /* Wait for addend update to complete */
1539 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1543 netdev_err(pdata->netdev,
1544 "timed out updating timestamp addend register\n");
1547 static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
1550 unsigned int count = 10000;
1552 /* Set the time values and tell the device */
1553 XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1554 XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1555 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1557 /* Wait for time update to complete */
1558 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1562 netdev_err(pdata->netdev, "timed out initializing timestamp\n");
1565 static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
1569 nsec = XGMAC_IOREAD(pdata, MAC_STSR);
1570 nsec *= NSEC_PER_SEC;
1571 nsec += XGMAC_IOREAD(pdata, MAC_STNR);
1576 static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
1578 unsigned int tx_snr, tx_ssr;
1581 if (pdata->vdata->tx_tstamp_workaround) {
1582 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1583 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
1585 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
1586 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1589 if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
1593 nsec *= NSEC_PER_SEC;
1599 static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
1600 struct xgbe_ring_desc *rdesc)
1604 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
1605 !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
1606 nsec = le32_to_cpu(rdesc->desc1);
1608 nsec |= le32_to_cpu(rdesc->desc0);
1609 if (nsec != 0xffffffffffffffffULL) {
1610 packet->rx_tstamp = nsec;
1611 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1617 static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
1618 unsigned int mac_tscr)
1620 /* Set one nano-second accuracy */
1621 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
1623 /* Set fine timestamp update */
1624 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
1626 /* Overwrite earlier timestamps */
1627 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
1629 XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1631 /* Exit if timestamping is not enabled */
1632 if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
1635 /* Initialize time registers */
1636 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
1637 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
1638 xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1639 xgbe_set_tstamp_time(pdata, 0, 0);
1641 /* Initialize the timecounter */
1642 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
1643 ktime_to_ns(ktime_get_real()));
1648 static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
1649 struct xgbe_ring *ring)
1651 struct xgbe_prv_data *pdata = channel->pdata;
1652 struct xgbe_ring_data *rdata;
1654 /* Make sure everything is written before the register write */
1657 /* Issue a poll command to Tx DMA by writing address
1658 * of next immediate free descriptor */
1659 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1660 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1661 lower_32_bits(rdata->rdesc_dma));
1663 /* Start the Tx timer */
1664 if (pdata->tx_usecs && !channel->tx_timer_active) {
1665 channel->tx_timer_active = 1;
1666 mod_timer(&channel->tx_timer,
1667 jiffies + usecs_to_jiffies(pdata->tx_usecs));
1670 ring->tx.xmit_more = 0;
1673 static void xgbe_dev_xmit(struct xgbe_channel *channel)
1675 struct xgbe_prv_data *pdata = channel->pdata;
1676 struct xgbe_ring *ring = channel->tx_ring;
1677 struct xgbe_ring_data *rdata;
1678 struct xgbe_ring_desc *rdesc;
1679 struct xgbe_packet_data *packet = &ring->packet_data;
1680 unsigned int tx_packets, tx_bytes;
1681 unsigned int csum, tso, vlan, vxlan;
1682 unsigned int tso_context, vlan_context;
1683 unsigned int tx_set_ic;
1684 int start_index = ring->cur;
1685 int cur_index = ring->cur;
1688 DBGPR("-->xgbe_dev_xmit\n");
1690 tx_packets = packet->tx_packets;
1691 tx_bytes = packet->tx_bytes;
1693 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1695 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1697 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1699 vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1702 if (tso && (packet->mss != ring->tx.cur_mss))
1707 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
1712 /* Determine if an interrupt should be generated for this Tx:
1714 * - Tx frame count exceeds the frame count setting
1715 * - Addition of Tx frame count to the frame count since the
1716 * last interrupt was set exceeds the frame count setting
1718 * - No frame count setting specified (ethtool -C ethX tx-frames 0)
1719 * - Addition of Tx frame count to the frame count since the
1720 * last interrupt was set does not exceed the frame count setting
1722 ring->coalesce_count += tx_packets;
1723 if (!pdata->tx_frames)
1725 else if (tx_packets > pdata->tx_frames)
1727 else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets)
1732 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1733 rdesc = rdata->rdesc;
1735 /* Create a context descriptor if this is a TSO packet */
1736 if (tso_context || vlan_context) {
1738 netif_dbg(pdata, tx_queued, pdata->netdev,
1739 "TSO context descriptor, mss=%u\n",
1742 /* Set the MSS size */
1743 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
1746 /* Mark it as a CONTEXT descriptor */
1747 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1750 /* Indicate this descriptor contains the MSS */
1751 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1754 ring->tx.cur_mss = packet->mss;
1758 netif_dbg(pdata, tx_queued, pdata->netdev,
1759 "VLAN context descriptor, ctag=%u\n",
1762 /* Mark it as a CONTEXT descriptor */
1763 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1766 /* Set the VLAN tag */
1767 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1768 VT, packet->vlan_ctag);
1770 /* Indicate this descriptor contains the VLAN tag */
1771 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1774 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1778 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1779 rdesc = rdata->rdesc;
1782 /* Update buffer address (for TSO this is the header) */
1783 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1784 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1786 /* Update the buffer length */
1787 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1788 rdata->skb_dma_len);
1790 /* VLAN tag insertion check */
1792 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
1793 TX_NORMAL_DESC2_VLAN_INSERT);
1795 /* Timestamp enablement check */
1796 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1797 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
1799 /* Mark it as First Descriptor */
1800 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1802 /* Mark it as a NORMAL descriptor */
1803 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1805 /* Set OWN bit if not the first descriptor */
1806 if (cur_index != start_index)
1807 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1811 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1812 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1813 packet->tcp_payload_len);
1814 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1815 packet->tcp_header_len / 4);
1817 pdata->ext_stats.tx_tso_packets += tx_packets;
1819 /* Enable CRC and Pad Insertion */
1820 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1822 /* Enable HW CSUM */
1824 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1827 /* Set the total length to be transmitted */
1828 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1833 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP,
1834 TX_NORMAL_DESC3_VXLAN_PACKET);
1836 pdata->ext_stats.tx_vxlan_packets += packet->tx_packets;
1839 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
1841 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1842 rdesc = rdata->rdesc;
1844 /* Update buffer address */
1845 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1846 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1848 /* Update the buffer length */
1849 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1850 rdata->skb_dma_len);
1853 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1855 /* Mark it as NORMAL descriptor */
1856 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1858 /* Enable HW CSUM */
1860 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1864 /* Set LAST bit for the last descriptor */
1865 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1867 /* Set IC bit based on Tx coalescing settings */
1869 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1871 /* Save the Tx info to report back during cleanup */
1872 rdata->tx.packets = tx_packets;
1873 rdata->tx.bytes = tx_bytes;
1875 pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets;
1876 pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes;
1878 /* In case the Tx DMA engine is running, make sure everything
1879 * is written to the descriptor(s) before setting the OWN bit
1880 * for the first descriptor
1884 /* Set OWN bit for the first descriptor */
1885 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1886 rdesc = rdata->rdesc;
1887 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1889 if (netif_msg_tx_queued(pdata))
1890 xgbe_dump_tx_desc(pdata, ring, start_index,
1891 packet->rdesc_count, 1);
1893 /* Make sure ownership is written to the descriptor */
1896 ring->cur = cur_index + 1;
1897 if (!packet->skb->xmit_more ||
1898 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1899 channel->queue_index)))
1900 xgbe_tx_start_xmit(channel, ring);
1902 ring->tx.xmit_more = 1;
1904 DBGPR(" %s: descriptors %u to %u written\n",
1905 channel->name, start_index & (ring->rdesc_count - 1),
1906 (ring->cur - 1) & (ring->rdesc_count - 1));
1908 DBGPR("<--xgbe_dev_xmit\n");
1911 static int xgbe_dev_read(struct xgbe_channel *channel)
1913 struct xgbe_prv_data *pdata = channel->pdata;
1914 struct xgbe_ring *ring = channel->rx_ring;
1915 struct xgbe_ring_data *rdata;
1916 struct xgbe_ring_desc *rdesc;
1917 struct xgbe_packet_data *packet = &ring->packet_data;
1918 struct net_device *netdev = pdata->netdev;
1919 unsigned int err, etlt, l34t;
1921 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1923 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1924 rdesc = rdata->rdesc;
1926 /* Check for data availability */
1927 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1930 /* Make sure descriptor fields are read after reading the OWN bit */
1933 if (netif_msg_rx_status(pdata))
1934 xgbe_dump_rx_desc(pdata, ring, ring->cur);
1936 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
1937 /* Timestamp Context Descriptor */
1938 xgbe_get_rx_tstamp(packet, rdesc);
1940 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1942 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1947 /* Normal Descriptor, be sure Context Descriptor bit is off */
1948 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
1950 /* Indicate if a Context Descriptor is next */
1951 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
1952 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1955 /* Get the header length */
1956 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
1957 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1959 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1960 RX_NORMAL_DESC2, HL);
1961 if (rdata->rx.hdr_len)
1962 pdata->ext_stats.rx_split_header_packets++;
1964 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1968 /* Get the RSS hash */
1969 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
1970 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1973 packet->rss_hash = le32_to_cpu(rdesc->desc1);
1975 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1977 case RX_DESC3_L34T_IPV4_TCP:
1978 case RX_DESC3_L34T_IPV4_UDP:
1979 case RX_DESC3_L34T_IPV6_TCP:
1980 case RX_DESC3_L34T_IPV6_UDP:
1981 packet->rss_hash_type = PKT_HASH_TYPE_L4;
1984 packet->rss_hash_type = PKT_HASH_TYPE_L3;
1988 /* Not all the data has been transferred for this packet */
1989 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
1992 /* This is the last of the data for this packet */
1993 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1996 /* Get the packet length */
1997 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1999 /* Set checksum done indicator as appropriate */
2000 if (netdev->features & NETIF_F_RXCSUM) {
2001 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2003 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2007 /* Set the tunneled packet indicator */
2008 if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) {
2009 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2011 pdata->ext_stats.rx_vxlan_packets++;
2013 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
2015 case RX_DESC3_L34T_IPV4_UNKNOWN:
2016 case RX_DESC3_L34T_IPV6_UNKNOWN:
2017 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2023 /* Check for errors (only valid in last descriptor) */
2024 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
2025 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
2026 netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
2028 if (!err || !etlt) {
2029 /* No error if err is 0 or etlt is 0 */
2030 if ((etlt == 0x09) &&
2031 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
2032 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2034 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
2037 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
2041 unsigned int tnp = XGMAC_GET_BITS(packet->attributes,
2042 RX_PACKET_ATTRIBUTES, TNP);
2044 if ((etlt == 0x05) || (etlt == 0x06)) {
2045 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2047 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2049 pdata->ext_stats.rx_csum_errors++;
2050 } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) {
2051 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2053 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2055 pdata->ext_stats.rx_vxlan_csum_errors++;
2057 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
2062 pdata->ext_stats.rxq_packets[channel->queue_index]++;
2063 pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len;
2065 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
2066 ring->cur & (ring->rdesc_count - 1), ring->cur);
2071 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
2073 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
2074 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
2077 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
2079 /* Rx and Tx share LD bit, so check TDES3.LD bit */
2080 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
2083 static int xgbe_enable_int(struct xgbe_channel *channel,
2084 enum xgbe_int int_id)
2087 case XGMAC_INT_DMA_CH_SR_TI:
2088 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
2090 case XGMAC_INT_DMA_CH_SR_TPS:
2091 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1);
2093 case XGMAC_INT_DMA_CH_SR_TBU:
2094 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1);
2096 case XGMAC_INT_DMA_CH_SR_RI:
2097 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
2099 case XGMAC_INT_DMA_CH_SR_RBU:
2100 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
2102 case XGMAC_INT_DMA_CH_SR_RPS:
2103 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1);
2105 case XGMAC_INT_DMA_CH_SR_TI_RI:
2106 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
2107 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
2109 case XGMAC_INT_DMA_CH_SR_FBE:
2110 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
2112 case XGMAC_INT_DMA_ALL:
2113 channel->curr_ier |= channel->saved_ier;
2119 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
2124 static int xgbe_disable_int(struct xgbe_channel *channel,
2125 enum xgbe_int int_id)
2128 case XGMAC_INT_DMA_CH_SR_TI:
2129 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
2131 case XGMAC_INT_DMA_CH_SR_TPS:
2132 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0);
2134 case XGMAC_INT_DMA_CH_SR_TBU:
2135 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0);
2137 case XGMAC_INT_DMA_CH_SR_RI:
2138 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
2140 case XGMAC_INT_DMA_CH_SR_RBU:
2141 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0);
2143 case XGMAC_INT_DMA_CH_SR_RPS:
2144 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0);
2146 case XGMAC_INT_DMA_CH_SR_TI_RI:
2147 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
2148 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
2150 case XGMAC_INT_DMA_CH_SR_FBE:
2151 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0);
2153 case XGMAC_INT_DMA_ALL:
2154 channel->saved_ier = channel->curr_ier;
2155 channel->curr_ier = 0;
2161 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
2166 static int __xgbe_exit(struct xgbe_prv_data *pdata)
2168 unsigned int count = 2000;
2170 DBGPR("-->xgbe_exit\n");
2172 /* Issue a software reset */
2173 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
2174 usleep_range(10, 15);
2176 /* Poll Until Poll Condition */
2177 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
2178 usleep_range(500, 600);
2183 DBGPR("<--xgbe_exit\n");
2188 static int xgbe_exit(struct xgbe_prv_data *pdata)
2192 /* To guard against possible incorrectly generated interrupts,
2193 * issue the software reset twice.
2195 ret = __xgbe_exit(pdata);
2199 return __xgbe_exit(pdata);
2202 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
2204 unsigned int i, count;
2206 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
2209 for (i = 0; i < pdata->tx_q_count; i++)
2210 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
2212 /* Poll Until Poll Condition */
2213 for (i = 0; i < pdata->tx_q_count; i++) {
2215 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
2217 usleep_range(500, 600);
2226 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
2230 sbmr = XGMAC_IOREAD(pdata, DMA_SBMR);
2232 /* Set enhanced addressing mode */
2233 XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1);
2235 /* Set the System Bus mode */
2236 XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1);
2237 XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2);
2238 XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal);
2239 XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1);
2240 XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1);
2242 XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr);
2244 /* Set descriptor fetching threshold */
2245 if (pdata->vdata->tx_desc_prefetch)
2246 XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS,
2247 pdata->vdata->tx_desc_prefetch);
2249 if (pdata->vdata->rx_desc_prefetch)
2250 XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS,
2251 pdata->vdata->rx_desc_prefetch);
2254 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
2256 XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
2257 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
2259 XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr);
2262 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
2266 /* Set Tx to weighted round robin scheduling algorithm */
2267 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
2269 /* Set Tx traffic classes to use WRR algorithm with equal weights */
2270 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2271 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2273 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
2276 /* Set Rx to strict priority algorithm */
2277 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
2280 static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
2282 unsigned int q_fifo_size)
2284 unsigned int frame_fifo_size;
2285 unsigned int rfa, rfd;
2287 frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
2289 if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) {
2290 /* PFC is active for this queue */
2291 rfa = pdata->pfc_rfa;
2292 rfd = rfa + frame_fifo_size;
2293 if (rfd > XGMAC_FLOW_CONTROL_MAX)
2294 rfd = XGMAC_FLOW_CONTROL_MAX;
2295 if (rfa >= XGMAC_FLOW_CONTROL_MAX)
2296 rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT;
2298 /* This path deals with just maximum frame sizes which are
2299 * limited to a jumbo frame of 9,000 (plus headers, etc.)
2300 * so we can never exceed the maximum allowable RFA/RFD
2303 if (q_fifo_size <= 2048) {
2304 /* rx_rfd to zero to signal no flow control */
2305 pdata->rx_rfa[queue] = 0;
2306 pdata->rx_rfd[queue] = 0;
2310 if (q_fifo_size <= 4096) {
2311 /* Between 2048 and 4096 */
2312 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
2313 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
2317 if (q_fifo_size <= frame_fifo_size) {
2318 /* Between 4096 and max-frame */
2319 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
2320 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
2324 if (q_fifo_size <= (frame_fifo_size * 3)) {
2325 /* Between max-frame and 3 max-frames,
2326 * trigger if we get just over a frame of data and
2327 * resume when we have just under half a frame left.
2329 rfa = q_fifo_size - frame_fifo_size;
2330 rfd = rfa + (frame_fifo_size / 2);
2332 /* Above 3 max-frames - trigger when just over
2333 * 2 frames of space available
2335 rfa = frame_fifo_size * 2;
2336 rfa += XGMAC_FLOW_CONTROL_UNIT;
2337 rfd = rfa + frame_fifo_size;
2341 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
2342 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
2345 static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
2348 unsigned int q_fifo_size;
2351 for (i = 0; i < pdata->rx_q_count; i++) {
2352 q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
2354 xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
2358 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2362 for (i = 0; i < pdata->rx_q_count; i++) {
2363 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
2365 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
2370 static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
2372 /* The configured value may not be the actual amount of fifo RAM */
2373 return min_t(unsigned int, pdata->tx_max_fifo_size,
2374 pdata->hw_feat.tx_fifo_size);
2377 static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
2379 /* The configured value may not be the actual amount of fifo RAM */
2380 return min_t(unsigned int, pdata->rx_max_fifo_size,
2381 pdata->hw_feat.rx_fifo_size);
2384 static void xgbe_calculate_equal_fifo(unsigned int fifo_size,
2385 unsigned int queue_count,
2388 unsigned int q_fifo_size;
2389 unsigned int p_fifo;
2392 q_fifo_size = fifo_size / queue_count;
2394 /* Calculate the fifo setting by dividing the queue's fifo size
2395 * by the fifo allocation increment (with 0 representing the
2396 * base allocation increment so decrement the result by 1).
2398 p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
2402 /* Distribute the fifo equally amongst the queues */
2403 for (i = 0; i < queue_count; i++)
2407 static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size,
2408 unsigned int queue_count,
2413 BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC);
2415 if (queue_count <= IEEE_8021QAZ_MAX_TCS)
2418 /* Rx queues 9 and up are for specialized packets,
2419 * such as PTP or DCB control packets, etc. and
2420 * don't require a large fifo
2422 for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
2423 fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
2424 fifo_size -= XGMAC_FIFO_MIN_ALLOC;
2430 static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata)
2434 /* If a delay has been provided, use that */
2435 if (pdata->pfc->delay)
2436 return pdata->pfc->delay / 8;
2438 /* Allow for two maximum size frames */
2439 delay = xgbe_get_max_frame(pdata);
2440 delay += XGMAC_ETH_PREAMBLE;
2443 /* Allow for PFC frame */
2444 delay += XGMAC_PFC_DATA_LEN;
2445 delay += ETH_HLEN + ETH_FCS_LEN;
2446 delay += XGMAC_ETH_PREAMBLE;
2448 /* Allow for miscellaneous delays (LPI exit, cable, etc.) */
2449 delay += XGMAC_PFC_DELAYS;
2454 static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata)
2456 unsigned int count, prio_queues;
2459 if (!pdata->pfc->pfc_en)
2463 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2464 for (i = 0; i < prio_queues; i++) {
2465 if (!xgbe_is_pfc_queue(pdata, i))
2475 static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata,
2476 unsigned int fifo_size,
2479 unsigned int q_fifo_size, rem_fifo, addn_fifo;
2480 unsigned int prio_queues;
2481 unsigned int pfc_count;
2484 q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata));
2485 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2486 pfc_count = xgbe_get_pfc_queues(pdata);
2488 if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) {
2489 /* No traffic classes with PFC enabled or can't do lossless */
2490 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
2494 /* Calculate how much fifo we have to play with */
2495 rem_fifo = fifo_size - (q_fifo_size * prio_queues);
2497 /* Calculate how much more than base fifo PFC needs, which also
2498 * becomes the threshold activation point (RFA)
2500 pdata->pfc_rfa = xgbe_get_pfc_delay(pdata);
2501 pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa);
2503 if (pdata->pfc_rfa > q_fifo_size) {
2504 addn_fifo = pdata->pfc_rfa - q_fifo_size;
2505 addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo);
2510 /* Calculate DCB fifo settings:
2511 * - distribute remaining fifo between the VLAN priority
2512 * queues based on traffic class PFC enablement and overall
2513 * priority (0 is lowest priority, so start at highest)
2519 fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1;
2521 if (!pdata->pfcq[i] || !addn_fifo)
2524 if (addn_fifo > rem_fifo) {
2525 netdev_warn(pdata->netdev,
2526 "RXq%u cannot set needed fifo size\n", i);
2530 addn_fifo = rem_fifo;
2533 fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT);
2534 rem_fifo -= addn_fifo;
2538 unsigned int inc_fifo = rem_fifo / prio_queues;
2540 /* Distribute remaining fifo across queues */
2541 for (i = 0; i < prio_queues; i++)
2542 fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT);
2546 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
2548 unsigned int fifo_size;
2549 unsigned int fifo[XGBE_MAX_QUEUES];
2552 fifo_size = xgbe_get_tx_fifo_size(pdata);
2554 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
2556 for (i = 0; i < pdata->tx_q_count; i++)
2557 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
2559 netif_info(pdata, drv, pdata->netdev,
2560 "%d Tx hardware queues, %d byte fifo per queue\n",
2561 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
2564 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
2566 unsigned int fifo_size;
2567 unsigned int fifo[XGBE_MAX_QUEUES];
2568 unsigned int prio_queues;
2571 /* Clear any DCB related fifo/queue information */
2572 memset(pdata->pfcq, 0, sizeof(pdata->pfcq));
2575 fifo_size = xgbe_get_rx_fifo_size(pdata);
2576 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2578 /* Assign a minimum fifo to the non-VLAN priority queues */
2579 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
2581 if (pdata->pfc && pdata->ets)
2582 xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo);
2584 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
2586 for (i = 0; i < pdata->rx_q_count; i++)
2587 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
2589 xgbe_calculate_flow_control_threshold(pdata, fifo);
2590 xgbe_config_flow_control_threshold(pdata);
2592 if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) {
2593 netif_info(pdata, drv, pdata->netdev,
2594 "%u Rx hardware queues\n", pdata->rx_q_count);
2595 for (i = 0; i < pdata->rx_q_count; i++)
2596 netif_info(pdata, drv, pdata->netdev,
2597 "RxQ%u, %u byte fifo queue\n", i,
2598 ((fifo[i] + 1) * XGMAC_FIFO_UNIT));
2600 netif_info(pdata, drv, pdata->netdev,
2601 "%u Rx hardware queues, %u byte fifo per queue\n",
2603 ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
2607 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
2609 unsigned int qptc, qptc_extra, queue;
2610 unsigned int prio_queues;
2611 unsigned int ppq, ppq_extra, prio;
2613 unsigned int i, j, reg, reg_val;
2615 /* Map the MTL Tx Queues to Traffic Classes
2616 * Note: Tx Queues >= Traffic Classes
2618 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
2619 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
2621 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
2622 for (j = 0; j < qptc; j++) {
2623 netif_dbg(pdata, drv, pdata->netdev,
2624 "TXq%u mapped to TC%u\n", queue, i);
2625 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2627 pdata->q2tc_map[queue++] = i;
2630 if (i < qptc_extra) {
2631 netif_dbg(pdata, drv, pdata->netdev,
2632 "TXq%u mapped to TC%u\n", queue, i);
2633 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2635 pdata->q2tc_map[queue++] = i;
2639 /* Map the 8 VLAN priority values to available MTL Rx queues */
2640 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2641 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
2642 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
2646 for (i = 0, prio = 0; i < prio_queues;) {
2648 for (j = 0; j < ppq; j++) {
2649 netif_dbg(pdata, drv, pdata->netdev,
2650 "PRIO%u mapped to RXq%u\n", prio, i);
2651 mask |= (1 << prio);
2652 pdata->prio2q_map[prio++] = i;
2655 if (i < ppq_extra) {
2656 netif_dbg(pdata, drv, pdata->netdev,
2657 "PRIO%u mapped to RXq%u\n", prio, i);
2658 mask |= (1 << prio);
2659 pdata->prio2q_map[prio++] = i;
2662 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
2664 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
2667 XGMAC_IOWRITE(pdata, reg, reg_val);
2668 reg += MAC_RQC2_INC;
2672 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
2675 for (i = 0; i < pdata->rx_q_count;) {
2676 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
2678 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
2681 XGMAC_IOWRITE(pdata, reg, reg_val);
2683 reg += MTL_RQDCM_INC;
2688 static void xgbe_config_tc(struct xgbe_prv_data *pdata)
2690 unsigned int offset, queue, prio;
2693 netdev_reset_tc(pdata->netdev);
2694 if (!pdata->num_tcs)
2697 netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
2699 for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
2700 while ((queue < pdata->tx_q_count) &&
2701 (pdata->q2tc_map[queue] == i))
2704 netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
2705 i, offset, queue - 1);
2706 netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
2713 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
2714 netdev_set_prio_tc_map(pdata->netdev, prio,
2715 pdata->ets->prio_tc[prio]);
2718 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
2720 struct ieee_ets *ets = pdata->ets;
2721 unsigned int total_weight, min_weight, weight;
2722 unsigned int mask, reg, reg_val;
2723 unsigned int i, prio;
2728 /* Set Tx to deficit weighted round robin scheduling algorithm (when
2729 * traffic class is using ETS algorithm)
2731 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
2733 /* Set Traffic Class algorithms */
2734 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
2735 min_weight = total_weight / 100;
2739 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2740 /* Map the priorities to the traffic class */
2742 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
2743 if (ets->prio_tc[prio] == i)
2744 mask |= (1 << prio);
2748 netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
2750 reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
2751 reg_val = XGMAC_IOREAD(pdata, reg);
2753 reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
2754 reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
2756 XGMAC_IOWRITE(pdata, reg, reg_val);
2758 /* Set the traffic class algorithm */
2759 switch (ets->tc_tsa[i]) {
2760 case IEEE_8021QAZ_TSA_STRICT:
2761 netif_dbg(pdata, drv, pdata->netdev,
2762 "TC%u using SP\n", i);
2763 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2766 case IEEE_8021QAZ_TSA_ETS:
2767 weight = total_weight * ets->tc_tx_bw[i] / 100;
2768 weight = clamp(weight, min_weight, total_weight);
2770 netif_dbg(pdata, drv, pdata->netdev,
2771 "TC%u using DWRR (weight %u)\n", i, weight);
2772 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2774 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
2780 xgbe_config_tc(pdata);
2783 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
2785 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2786 /* Just stop the Tx queues while Rx fifo is changed */
2787 netif_tx_stop_all_queues(pdata->netdev);
2789 /* Suspend Rx so that fifo's can be adjusted */
2790 pdata->hw_if.disable_rx(pdata);
2793 xgbe_config_rx_fifo_size(pdata);
2794 xgbe_config_flow_control(pdata);
2796 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2798 pdata->hw_if.enable_rx(pdata);
2800 /* Resume Tx queues */
2801 netif_tx_start_all_queues(pdata->netdev);
2805 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
2807 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
2809 /* Filtering is done using perfect filtering and hash filtering */
2810 if (pdata->hw_feat.hash_table_size) {
2811 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
2812 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
2813 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
2817 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2821 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
2823 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2826 static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2828 xgbe_set_speed(pdata, pdata->phy_speed);
2831 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2833 if (pdata->netdev->features & NETIF_F_RXCSUM)
2834 xgbe_enable_rx_csum(pdata);
2836 xgbe_disable_rx_csum(pdata);
2839 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
2841 /* Indicate that VLAN Tx CTAGs come from context descriptors */
2842 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
2843 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
2845 /* Set the current VLAN Hash Table register value */
2846 xgbe_update_vlan_hash_table(pdata);
2848 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
2849 xgbe_enable_rx_vlan_filtering(pdata);
2851 xgbe_disable_rx_vlan_filtering(pdata);
2853 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2854 xgbe_enable_rx_vlan_stripping(pdata);
2856 xgbe_disable_rx_vlan_stripping(pdata);
2859 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
2864 if (pdata->vdata->mmc_64bit) {
2866 /* These registers are always 32 bit */
2867 case MMC_RXRUNTERROR:
2868 case MMC_RXJABBERERROR:
2869 case MMC_RXUNDERSIZE_G:
2870 case MMC_RXOVERSIZE_G:
2871 case MMC_RXWATCHDOGERROR:
2880 /* These registers are always 64 bit */
2881 case MMC_TXOCTETCOUNT_GB_LO:
2882 case MMC_TXOCTETCOUNT_G_LO:
2883 case MMC_RXOCTETCOUNT_GB_LO:
2884 case MMC_RXOCTETCOUNT_G_LO:
2893 val = XGMAC_IOREAD(pdata, reg_lo);
2896 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
2901 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
2903 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2904 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
2906 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
2907 stats->txoctetcount_gb +=
2908 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2910 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
2911 stats->txframecount_gb +=
2912 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2914 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
2915 stats->txbroadcastframes_g +=
2916 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2918 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
2919 stats->txmulticastframes_g +=
2920 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2922 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
2923 stats->tx64octets_gb +=
2924 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2926 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
2927 stats->tx65to127octets_gb +=
2928 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2930 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
2931 stats->tx128to255octets_gb +=
2932 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2934 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
2935 stats->tx256to511octets_gb +=
2936 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2938 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
2939 stats->tx512to1023octets_gb +=
2940 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2942 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
2943 stats->tx1024tomaxoctets_gb +=
2944 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2946 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
2947 stats->txunicastframes_gb +=
2948 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2950 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2951 stats->txmulticastframes_gb +=
2952 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2954 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2955 stats->txbroadcastframes_g +=
2956 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2958 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2959 stats->txunderflowerror +=
2960 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2962 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2963 stats->txoctetcount_g +=
2964 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2966 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2967 stats->txframecount_g +=
2968 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2970 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2971 stats->txpauseframes +=
2972 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2974 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2975 stats->txvlanframes_g +=
2976 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2979 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2981 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2982 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
2984 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2985 stats->rxframecount_gb +=
2986 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2988 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2989 stats->rxoctetcount_gb +=
2990 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2992 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2993 stats->rxoctetcount_g +=
2994 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2996 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2997 stats->rxbroadcastframes_g +=
2998 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
3000 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
3001 stats->rxmulticastframes_g +=
3002 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
3004 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
3005 stats->rxcrcerror +=
3006 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
3008 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
3009 stats->rxrunterror +=
3010 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
3012 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
3013 stats->rxjabbererror +=
3014 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
3016 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
3017 stats->rxundersize_g +=
3018 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
3020 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
3021 stats->rxoversize_g +=
3022 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
3024 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
3025 stats->rx64octets_gb +=
3026 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
3028 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
3029 stats->rx65to127octets_gb +=
3030 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
3032 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
3033 stats->rx128to255octets_gb +=
3034 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
3036 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
3037 stats->rx256to511octets_gb +=
3038 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
3040 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
3041 stats->rx512to1023octets_gb +=
3042 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
3044 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
3045 stats->rx1024tomaxoctets_gb +=
3046 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
3048 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
3049 stats->rxunicastframes_g +=
3050 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
3052 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
3053 stats->rxlengtherror +=
3054 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
3056 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
3057 stats->rxoutofrangetype +=
3058 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
3060 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
3061 stats->rxpauseframes +=
3062 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
3064 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
3065 stats->rxfifooverflow +=
3066 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
3068 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
3069 stats->rxvlanframes_gb +=
3070 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
3072 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
3073 stats->rxwatchdogerror +=
3074 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
3077 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
3079 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
3081 /* Freeze counters */
3082 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
3084 stats->txoctetcount_gb +=
3085 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
3087 stats->txframecount_gb +=
3088 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
3090 stats->txbroadcastframes_g +=
3091 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
3093 stats->txmulticastframes_g +=
3094 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
3096 stats->tx64octets_gb +=
3097 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
3099 stats->tx65to127octets_gb +=
3100 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
3102 stats->tx128to255octets_gb +=
3103 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
3105 stats->tx256to511octets_gb +=
3106 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
3108 stats->tx512to1023octets_gb +=
3109 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
3111 stats->tx1024tomaxoctets_gb +=
3112 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
3114 stats->txunicastframes_gb +=
3115 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
3117 stats->txmulticastframes_gb +=
3118 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
3120 stats->txbroadcastframes_g +=
3121 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
3123 stats->txunderflowerror +=
3124 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
3126 stats->txoctetcount_g +=
3127 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
3129 stats->txframecount_g +=
3130 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
3132 stats->txpauseframes +=
3133 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
3135 stats->txvlanframes_g +=
3136 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
3138 stats->rxframecount_gb +=
3139 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
3141 stats->rxoctetcount_gb +=
3142 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
3144 stats->rxoctetcount_g +=
3145 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
3147 stats->rxbroadcastframes_g +=
3148 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
3150 stats->rxmulticastframes_g +=
3151 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
3153 stats->rxcrcerror +=
3154 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
3156 stats->rxrunterror +=
3157 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
3159 stats->rxjabbererror +=
3160 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
3162 stats->rxundersize_g +=
3163 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
3165 stats->rxoversize_g +=
3166 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
3168 stats->rx64octets_gb +=
3169 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
3171 stats->rx65to127octets_gb +=
3172 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
3174 stats->rx128to255octets_gb +=
3175 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
3177 stats->rx256to511octets_gb +=
3178 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
3180 stats->rx512to1023octets_gb +=
3181 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
3183 stats->rx1024tomaxoctets_gb +=
3184 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
3186 stats->rxunicastframes_g +=
3187 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
3189 stats->rxlengtherror +=
3190 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
3192 stats->rxoutofrangetype +=
3193 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
3195 stats->rxpauseframes +=
3196 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
3198 stats->rxfifooverflow +=
3199 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
3201 stats->rxvlanframes_gb +=
3202 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
3204 stats->rxwatchdogerror +=
3205 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
3207 /* Un-freeze counters */
3208 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
3211 static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
3213 /* Set counters to reset on read */
3214 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
3216 /* Reset the counters */
3217 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
3220 static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata,
3223 unsigned int tx_status;
3224 unsigned long tx_timeout;
3226 /* The Tx engine cannot be stopped if it is actively processing
3227 * packets. Wait for the Tx queue to empty the Tx fifo. Don't
3228 * wait forever though...
3230 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3231 while (time_before(jiffies, tx_timeout)) {
3232 tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
3233 if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
3234 (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
3237 usleep_range(500, 1000);
3240 if (!time_before(jiffies, tx_timeout))
3241 netdev_info(pdata->netdev,
3242 "timed out waiting for Tx queue %u to empty\n",
3246 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
3249 unsigned int tx_dsr, tx_pos, tx_qidx;
3250 unsigned int tx_status;
3251 unsigned long tx_timeout;
3253 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
3254 return xgbe_txq_prepare_tx_stop(pdata, queue);
3256 /* Calculate the status register to read and the position within */
3257 if (queue < DMA_DSRX_FIRST_QUEUE) {
3259 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
3261 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
3263 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
3264 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
3268 /* The Tx engine cannot be stopped if it is actively processing
3269 * descriptors. Wait for the Tx engine to enter the stopped or
3270 * suspended state. Don't wait forever though...
3272 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3273 while (time_before(jiffies, tx_timeout)) {
3274 tx_status = XGMAC_IOREAD(pdata, tx_dsr);
3275 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
3276 if ((tx_status == DMA_TPS_STOPPED) ||
3277 (tx_status == DMA_TPS_SUSPENDED))
3280 usleep_range(500, 1000);
3283 if (!time_before(jiffies, tx_timeout))
3284 netdev_info(pdata->netdev,
3285 "timed out waiting for Tx DMA channel %u to stop\n",
3289 static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
3293 /* Enable each Tx DMA channel */
3294 for (i = 0; i < pdata->channel_count; i++) {
3295 if (!pdata->channel[i]->tx_ring)
3298 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
3301 /* Enable each Tx queue */
3302 for (i = 0; i < pdata->tx_q_count; i++)
3303 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
3307 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3310 static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
3314 /* Prepare for Tx DMA channel stop */
3315 for (i = 0; i < pdata->tx_q_count; i++)
3316 xgbe_prepare_tx_stop(pdata, i);
3318 /* Disable MAC Tx */
3319 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3321 /* Disable each Tx queue */
3322 for (i = 0; i < pdata->tx_q_count; i++)
3323 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
3325 /* Disable each Tx DMA channel */
3326 for (i = 0; i < pdata->channel_count; i++) {
3327 if (!pdata->channel[i]->tx_ring)
3330 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
3334 static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
3337 unsigned int rx_status;
3338 unsigned long rx_timeout;
3340 /* The Rx engine cannot be stopped if it is actively processing
3341 * packets. Wait for the Rx queue to empty the Rx fifo. Don't
3342 * wait forever though...
3344 rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3345 while (time_before(jiffies, rx_timeout)) {
3346 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
3347 if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
3348 (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
3351 usleep_range(500, 1000);
3354 if (!time_before(jiffies, rx_timeout))
3355 netdev_info(pdata->netdev,
3356 "timed out waiting for Rx queue %u to empty\n",
3360 static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
3362 unsigned int reg_val, i;
3364 /* Enable each Rx DMA channel */
3365 for (i = 0; i < pdata->channel_count; i++) {
3366 if (!pdata->channel[i]->rx_ring)
3369 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
3372 /* Enable each Rx queue */
3374 for (i = 0; i < pdata->rx_q_count; i++)
3375 reg_val |= (0x02 << (i << 1));
3376 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
3379 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
3380 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
3381 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
3382 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
3385 static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
3389 /* Disable MAC Rx */
3390 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
3391 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
3392 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
3393 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
3395 /* Prepare for Rx DMA channel stop */
3396 for (i = 0; i < pdata->rx_q_count; i++)
3397 xgbe_prepare_rx_stop(pdata, i);
3399 /* Disable each Rx queue */
3400 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
3402 /* Disable each Rx DMA channel */
3403 for (i = 0; i < pdata->channel_count; i++) {
3404 if (!pdata->channel[i]->rx_ring)
3407 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
3411 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
3415 /* Enable each Tx DMA channel */
3416 for (i = 0; i < pdata->channel_count; i++) {
3417 if (!pdata->channel[i]->tx_ring)
3420 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
3424 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3427 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
3431 /* Prepare for Tx DMA channel stop */
3432 for (i = 0; i < pdata->tx_q_count; i++)
3433 xgbe_prepare_tx_stop(pdata, i);
3435 /* Disable MAC Tx */
3436 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3438 /* Disable each Tx DMA channel */
3439 for (i = 0; i < pdata->channel_count; i++) {
3440 if (!pdata->channel[i]->tx_ring)
3443 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
3447 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
3451 /* Enable each Rx DMA channel */
3452 for (i = 0; i < pdata->channel_count; i++) {
3453 if (!pdata->channel[i]->rx_ring)
3456 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
3460 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
3464 /* Disable each Rx DMA channel */
3465 for (i = 0; i < pdata->channel_count; i++) {
3466 if (!pdata->channel[i]->rx_ring)
3469 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
3473 static int xgbe_init(struct xgbe_prv_data *pdata)
3475 struct xgbe_desc_if *desc_if = &pdata->desc_if;
3478 DBGPR("-->xgbe_init\n");
3480 /* Flush Tx queues */
3481 ret = xgbe_flush_tx_queues(pdata);
3483 netdev_err(pdata->netdev, "error flushing TX queues\n");
3488 * Initialize DMA related features
3490 xgbe_config_dma_bus(pdata);
3491 xgbe_config_dma_cache(pdata);
3492 xgbe_config_osp_mode(pdata);
3493 xgbe_config_pbl_val(pdata);
3494 xgbe_config_rx_coalesce(pdata);
3495 xgbe_config_tx_coalesce(pdata);
3496 xgbe_config_rx_buffer_size(pdata);
3497 xgbe_config_tso_mode(pdata);
3498 xgbe_config_sph_mode(pdata);
3499 xgbe_config_rss(pdata);
3500 desc_if->wrapper_tx_desc_init(pdata);
3501 desc_if->wrapper_rx_desc_init(pdata);
3502 xgbe_enable_dma_interrupts(pdata);
3505 * Initialize MTL related features
3507 xgbe_config_mtl_mode(pdata);
3508 xgbe_config_queue_mapping(pdata);
3509 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
3510 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
3511 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
3512 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
3513 xgbe_config_tx_fifo_size(pdata);
3514 xgbe_config_rx_fifo_size(pdata);
3515 /*TODO: Error Packet and undersized good Packet forwarding enable
3518 xgbe_config_dcb_tc(pdata);
3519 xgbe_enable_mtl_interrupts(pdata);
3522 * Initialize MAC related features
3524 xgbe_config_mac_address(pdata);
3525 xgbe_config_rx_mode(pdata);
3526 xgbe_config_jumbo_enable(pdata);
3527 xgbe_config_flow_control(pdata);
3528 xgbe_config_mac_speed(pdata);
3529 xgbe_config_checksum_offload(pdata);
3530 xgbe_config_vlan_support(pdata);
3531 xgbe_config_mmc(pdata);
3532 xgbe_enable_mac_interrupts(pdata);
3535 * Initialize ECC related features
3537 xgbe_enable_ecc_interrupts(pdata);
3539 DBGPR("<--xgbe_init\n");
3544 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
3546 DBGPR("-->xgbe_init_function_ptrs\n");
3548 hw_if->tx_complete = xgbe_tx_complete;
3550 hw_if->set_mac_address = xgbe_set_mac_address;
3551 hw_if->config_rx_mode = xgbe_config_rx_mode;
3553 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
3554 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
3556 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
3557 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
3558 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
3559 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
3560 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
3562 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
3563 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
3565 hw_if->set_speed = xgbe_set_speed;
3567 hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
3568 hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
3569 hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
3571 hw_if->set_gpio = xgbe_set_gpio;
3572 hw_if->clr_gpio = xgbe_clr_gpio;
3574 hw_if->enable_tx = xgbe_enable_tx;
3575 hw_if->disable_tx = xgbe_disable_tx;
3576 hw_if->enable_rx = xgbe_enable_rx;
3577 hw_if->disable_rx = xgbe_disable_rx;
3579 hw_if->powerup_tx = xgbe_powerup_tx;
3580 hw_if->powerdown_tx = xgbe_powerdown_tx;
3581 hw_if->powerup_rx = xgbe_powerup_rx;
3582 hw_if->powerdown_rx = xgbe_powerdown_rx;
3584 hw_if->dev_xmit = xgbe_dev_xmit;
3585 hw_if->dev_read = xgbe_dev_read;
3586 hw_if->enable_int = xgbe_enable_int;
3587 hw_if->disable_int = xgbe_disable_int;
3588 hw_if->init = xgbe_init;
3589 hw_if->exit = xgbe_exit;
3591 /* Descriptor related Sequences have to be initialized here */
3592 hw_if->tx_desc_init = xgbe_tx_desc_init;
3593 hw_if->rx_desc_init = xgbe_rx_desc_init;
3594 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
3595 hw_if->rx_desc_reset = xgbe_rx_desc_reset;
3596 hw_if->is_last_desc = xgbe_is_last_desc;
3597 hw_if->is_context_desc = xgbe_is_context_desc;
3598 hw_if->tx_start_xmit = xgbe_tx_start_xmit;
3601 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
3602 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
3604 /* For RX coalescing */
3605 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
3606 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
3607 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
3608 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
3610 /* For RX and TX threshold config */
3611 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
3612 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
3614 /* For RX and TX Store and Forward Mode config */
3615 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
3616 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
3618 /* For TX DMA Operating on Second Frame config */
3619 hw_if->config_osp_mode = xgbe_config_osp_mode;
3621 /* For MMC statistics support */
3622 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
3623 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
3624 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
3626 /* For PTP config */
3627 hw_if->config_tstamp = xgbe_config_tstamp;
3628 hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
3629 hw_if->set_tstamp_time = xgbe_set_tstamp_time;
3630 hw_if->get_tstamp_time = xgbe_get_tstamp_time;
3631 hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
3633 /* For Data Center Bridging config */
3634 hw_if->config_tc = xgbe_config_tc;
3635 hw_if->config_dcb_tc = xgbe_config_dcb_tc;
3636 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
3638 /* For Receive Side Scaling */
3639 hw_if->enable_rss = xgbe_enable_rss;
3640 hw_if->disable_rss = xgbe_disable_rss;
3641 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
3642 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
3645 hw_if->disable_ecc_ded = xgbe_disable_ecc_ded;
3646 hw_if->disable_ecc_sec = xgbe_disable_ecc_sec;
3649 hw_if->enable_vxlan = xgbe_enable_vxlan;
3650 hw_if->disable_vxlan = xgbe_disable_vxlan;
3651 hw_if->set_vxlan_id = xgbe_set_vxlan_id;
3653 DBGPR("<--xgbe_init_function_ptrs\n");