1 /*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
5 Copyright(C) 2007-2011 STMicroelectronics Ltd
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
23 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25 Documentation available at:
26 http://www.stlinux.com
28 https://bugzilla.stlinux.com/
29 *******************************************************************************/
31 #include <linux/clk.h>
32 #include <linux/kernel.h>
33 #include <linux/interrupt.h>
35 #include <linux/tcp.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/if_ether.h>
39 #include <linux/crc32.h>
40 #include <linux/mii.h>
42 #include <linux/if_vlan.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/slab.h>
45 #include <linux/prefetch.h>
46 #include <linux/pinctrl/consumer.h>
47 #ifdef CONFIG_DEBUG_FS
48 #include <linux/debugfs.h>
49 #include <linux/seq_file.h>
50 #endif /* CONFIG_DEBUG_FS */
51 #include <linux/net_tstamp.h>
52 #include "stmmac_ptp.h"
54 #include <linux/reset.h>
55 #include <linux/of_mdio.h>
57 #define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
59 /* Module parameters */
61 static int watchdog = TX_TIMEO;
62 module_param(watchdog, int, S_IRUGO | S_IWUSR);
63 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65 static int debug = -1;
66 module_param(debug, int, S_IRUGO | S_IWUSR);
67 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69 static int phyaddr = -1;
70 module_param(phyaddr, int, S_IRUGO);
71 MODULE_PARM_DESC(phyaddr, "Physical device address");
73 #define DMA_TX_SIZE 256
74 static int dma_txsize = DMA_TX_SIZE;
75 module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
78 #define DMA_RX_SIZE 256
79 static int dma_rxsize = DMA_RX_SIZE;
80 module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
81 MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
83 static int flow_ctrl = FLOW_OFF;
84 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
87 static int pause = PAUSE_TIME;
88 module_param(pause, int, S_IRUGO | S_IWUSR);
89 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
92 static int tc = TC_DEFAULT;
93 module_param(tc, int, S_IRUGO | S_IWUSR);
94 MODULE_PARM_DESC(tc, "DMA threshold control value");
96 #define DEFAULT_BUFSIZE 1536
97 static int buf_sz = DEFAULT_BUFSIZE;
98 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
99 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
101 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
102 NETIF_MSG_LINK | NETIF_MSG_IFUP |
103 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
105 #define STMMAC_DEFAULT_LPI_TIMER 1000
106 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
107 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
108 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
109 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
111 /* By default the driver will use the ring mode to manage tx and rx descriptors
112 * but passing this value so user can force to use the chain instead of the ring
114 static unsigned int chain_mode;
115 module_param(chain_mode, int, S_IRUGO);
116 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
118 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
120 #ifdef CONFIG_DEBUG_FS
121 static int stmmac_init_fs(struct net_device *dev);
122 static void stmmac_exit_fs(struct net_device *dev);
125 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
128 * stmmac_verify_args - verify the driver parameters.
129 * Description: it checks the driver parameters and set a default in case of
132 static void stmmac_verify_args(void)
134 if (unlikely(watchdog < 0))
136 if (unlikely(dma_rxsize < 0))
137 dma_rxsize = DMA_RX_SIZE;
138 if (unlikely(dma_txsize < 0))
139 dma_txsize = DMA_TX_SIZE;
140 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
141 buf_sz = DEFAULT_BUFSIZE;
142 if (unlikely(flow_ctrl > 1))
143 flow_ctrl = FLOW_AUTO;
144 else if (likely(flow_ctrl < 0))
145 flow_ctrl = FLOW_OFF;
146 if (unlikely((pause < 0) || (pause > 0xffff)))
149 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
153 * stmmac_clk_csr_set - dynamically set the MDC clock
154 * @priv: driver private structure
155 * Description: this is to dynamically set the MDC clock according to the csr
158 * If a specific clk_csr value is passed from the platform
159 * this means that the CSR Clock Range selection cannot be
160 * changed at run-time and it is fixed (as reported in the driver
161 * documentation). Viceversa the driver will try to set the MDC
162 * clock dynamically according to the actual clock input.
164 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
168 clk_rate = clk_get_rate(priv->stmmac_clk);
170 /* Platform provided default clk_csr would be assumed valid
171 * for all other cases except for the below mentioned ones.
172 * For values higher than the IEEE 802.3 specified frequency
173 * we can not estimate the proper divider as it is not known
174 * the frequency of clk_csr_i. So we do not change the default
177 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
178 if (clk_rate < CSR_F_35M)
179 priv->clk_csr = STMMAC_CSR_20_35M;
180 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
181 priv->clk_csr = STMMAC_CSR_35_60M;
182 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
183 priv->clk_csr = STMMAC_CSR_60_100M;
184 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
185 priv->clk_csr = STMMAC_CSR_100_150M;
186 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
187 priv->clk_csr = STMMAC_CSR_150_250M;
188 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
189 priv->clk_csr = STMMAC_CSR_250_300M;
193 static void print_pkt(unsigned char *buf, int len)
195 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
196 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
202 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
204 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
208 * stmmac_hw_fix_mac_speed - callback for speed selection
209 * @priv: driver private structure
210 * Description: on some platforms (e.g. ST), some HW system configuraton
211 * registers have to be set according to the link speed negotiated.
213 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
215 struct phy_device *phydev = priv->phydev;
217 if (likely(priv->plat->fix_mac_speed))
218 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
222 * stmmac_enable_eee_mode - check and enter in LPI mode
223 * @priv: driver private structure
224 * Description: this function is to verify and enter in LPI mode in case of
227 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
229 /* Check and enter in LPI mode */
230 if ((priv->dirty_tx == priv->cur_tx) &&
231 (priv->tx_path_in_lpi_mode == false))
232 priv->hw->mac->set_eee_mode(priv->hw);
236 * stmmac_disable_eee_mode - disable and exit from LPI mode
237 * @priv: driver private structure
238 * Description: this function is to exit and disable EEE in case of
239 * LPI state is true. This is called by the xmit.
241 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
243 priv->hw->mac->reset_eee_mode(priv->hw);
244 del_timer_sync(&priv->eee_ctrl_timer);
245 priv->tx_path_in_lpi_mode = false;
249 * stmmac_eee_ctrl_timer - EEE TX SW timer.
252 * if there is no data transfer and if we are not in LPI state,
253 * then MAC Transmitter can be moved to LPI state.
255 static void stmmac_eee_ctrl_timer(unsigned long arg)
257 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
259 stmmac_enable_eee_mode(priv);
260 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
264 * stmmac_eee_init - init EEE
265 * @priv: driver private structure
267 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
268 * can also manage EEE, this function enable the LPI state and start related
271 bool stmmac_eee_init(struct stmmac_priv *priv)
273 char *phy_bus_name = priv->plat->phy_bus_name;
275 int interface = priv->plat->interface;
278 if ((interface != PHY_INTERFACE_MODE_MII) &&
279 (interface != PHY_INTERFACE_MODE_GMII) &&
280 !phy_interface_mode_is_rgmii(interface))
283 /* Using PCS we cannot dial with the phy registers at this stage
284 * so we do not support extra feature like EEE.
286 if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
287 (priv->pcs == STMMAC_PCS_RTBI))
290 /* Never init EEE in case of a switch is attached */
291 if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
294 /* MAC core supports the EEE feature. */
295 if (priv->dma_cap.eee) {
296 int tx_lpi_timer = priv->tx_lpi_timer;
298 /* Check if the PHY supports EEE */
299 if (phy_init_eee(priv->phydev, 1)) {
300 /* To manage at run-time if the EEE cannot be supported
301 * anymore (for example because the lp caps have been
303 * In that case the driver disable own timers.
305 spin_lock_irqsave(&priv->lock, flags);
306 if (priv->eee_active) {
307 pr_debug("stmmac: disable EEE\n");
308 del_timer_sync(&priv->eee_ctrl_timer);
309 priv->hw->mac->set_eee_timer(priv->hw, 0,
312 priv->eee_active = 0;
313 spin_unlock_irqrestore(&priv->lock, flags);
316 /* Activate the EEE and start timers */
317 spin_lock_irqsave(&priv->lock, flags);
318 if (!priv->eee_active) {
319 priv->eee_active = 1;
320 setup_timer(&priv->eee_ctrl_timer,
321 stmmac_eee_ctrl_timer,
322 (unsigned long)priv);
323 mod_timer(&priv->eee_ctrl_timer,
324 STMMAC_LPI_T(eee_timer));
326 priv->hw->mac->set_eee_timer(priv->hw,
327 STMMAC_DEFAULT_LIT_LS,
330 /* Set HW EEE according to the speed */
331 priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
334 spin_unlock_irqrestore(&priv->lock, flags);
336 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
342 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
343 * @priv: driver private structure
344 * @entry : descriptor index to be used.
345 * @skb : the socket buffer
347 * This function will read timestamp from the descriptor & pass it to stack.
348 * and also perform some sanity checks.
350 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
351 unsigned int entry, struct sk_buff *skb)
353 struct skb_shared_hwtstamps shhwtstamp;
357 if (!priv->hwts_tx_en)
360 /* exit if skb doesn't support hw tstamp */
361 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
365 desc = (priv->dma_etx + entry);
367 desc = (priv->dma_tx + entry);
369 /* check tx tstamp status */
370 if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
373 /* get the valid tstamp */
374 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
376 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
377 shhwtstamp.hwtstamp = ns_to_ktime(ns);
378 /* pass tstamp to stack */
379 skb_tstamp_tx(skb, &shhwtstamp);
384 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
385 * @priv: driver private structure
386 * @entry : descriptor index to be used.
387 * @skb : the socket buffer
389 * This function will read received packet's timestamp from the descriptor
390 * and pass it to stack. It also perform some sanity checks.
392 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
393 unsigned int entry, struct sk_buff *skb)
395 struct skb_shared_hwtstamps *shhwtstamp = NULL;
399 if (!priv->hwts_rx_en)
403 desc = (priv->dma_erx + entry);
405 desc = (priv->dma_rx + entry);
407 /* exit if rx tstamp is not valid */
408 if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
411 /* get valid tstamp */
412 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
413 shhwtstamp = skb_hwtstamps(skb);
414 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
415 shhwtstamp->hwtstamp = ns_to_ktime(ns);
419 * stmmac_hwtstamp_ioctl - control hardware timestamping.
420 * @dev: device pointer.
421 * @ifr: An IOCTL specefic structure, that can contain a pointer to
422 * a proprietary structure used to pass information to the driver.
424 * This function configures the MAC to enable/disable both outgoing(TX)
425 * and incoming(RX) packets time stamping based on user input.
427 * 0 on success and an appropriate -ve integer on failure.
429 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
431 struct stmmac_priv *priv = netdev_priv(dev);
432 struct hwtstamp_config config;
433 struct timespec64 now;
437 u32 ptp_over_ipv4_udp = 0;
438 u32 ptp_over_ipv6_udp = 0;
439 u32 ptp_over_ethernet = 0;
440 u32 snap_type_sel = 0;
441 u32 ts_master_en = 0;
445 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
446 netdev_alert(priv->dev, "No support for HW time stamping\n");
447 priv->hwts_tx_en = 0;
448 priv->hwts_rx_en = 0;
453 if (copy_from_user(&config, ifr->ifr_data,
454 sizeof(struct hwtstamp_config)))
457 pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
458 __func__, config.flags, config.tx_type, config.rx_filter);
460 /* reserved for future extensions */
464 if (config.tx_type != HWTSTAMP_TX_OFF &&
465 config.tx_type != HWTSTAMP_TX_ON)
469 switch (config.rx_filter) {
470 case HWTSTAMP_FILTER_NONE:
471 /* time stamp no incoming packet at all */
472 config.rx_filter = HWTSTAMP_FILTER_NONE;
475 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
476 /* PTP v1, UDP, any kind of event packet */
477 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
478 /* take time stamp for all event messages */
479 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
481 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
482 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
485 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
486 /* PTP v1, UDP, Sync packet */
487 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
488 /* take time stamp for SYNC messages only */
489 ts_event_en = PTP_TCR_TSEVNTENA;
491 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
492 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
495 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
496 /* PTP v1, UDP, Delay_req packet */
497 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
498 /* take time stamp for Delay_Req messages only */
499 ts_master_en = PTP_TCR_TSMSTRENA;
500 ts_event_en = PTP_TCR_TSEVNTENA;
502 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
503 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
506 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
507 /* PTP v2, UDP, any kind of event packet */
508 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
509 ptp_v2 = PTP_TCR_TSVER2ENA;
510 /* take time stamp for all event messages */
511 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
513 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
514 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
517 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
518 /* PTP v2, UDP, Sync packet */
519 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
520 ptp_v2 = PTP_TCR_TSVER2ENA;
521 /* take time stamp for SYNC messages only */
522 ts_event_en = PTP_TCR_TSEVNTENA;
524 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
525 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
528 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
529 /* PTP v2, UDP, Delay_req packet */
530 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
531 ptp_v2 = PTP_TCR_TSVER2ENA;
532 /* take time stamp for Delay_Req messages only */
533 ts_master_en = PTP_TCR_TSMSTRENA;
534 ts_event_en = PTP_TCR_TSEVNTENA;
536 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
537 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
540 case HWTSTAMP_FILTER_PTP_V2_EVENT:
541 /* PTP v2/802.AS1 any layer, any kind of event packet */
542 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
543 ptp_v2 = PTP_TCR_TSVER2ENA;
544 /* take time stamp for all event messages */
545 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
547 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
548 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
549 ptp_over_ethernet = PTP_TCR_TSIPENA;
552 case HWTSTAMP_FILTER_PTP_V2_SYNC:
553 /* PTP v2/802.AS1, any layer, Sync packet */
554 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
555 ptp_v2 = PTP_TCR_TSVER2ENA;
556 /* take time stamp for SYNC messages only */
557 ts_event_en = PTP_TCR_TSEVNTENA;
559 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
560 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
561 ptp_over_ethernet = PTP_TCR_TSIPENA;
564 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
565 /* PTP v2/802.AS1, any layer, Delay_req packet */
566 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
567 ptp_v2 = PTP_TCR_TSVER2ENA;
568 /* take time stamp for Delay_Req messages only */
569 ts_master_en = PTP_TCR_TSMSTRENA;
570 ts_event_en = PTP_TCR_TSEVNTENA;
572 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
573 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
574 ptp_over_ethernet = PTP_TCR_TSIPENA;
577 case HWTSTAMP_FILTER_ALL:
578 /* time stamp any incoming packet */
579 config.rx_filter = HWTSTAMP_FILTER_ALL;
580 tstamp_all = PTP_TCR_TSENALL;
587 switch (config.rx_filter) {
588 case HWTSTAMP_FILTER_NONE:
589 config.rx_filter = HWTSTAMP_FILTER_NONE;
592 /* PTP v1, UDP, any kind of event packet */
593 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
597 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
598 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
600 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
601 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
603 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
604 tstamp_all | ptp_v2 | ptp_over_ethernet |
605 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
606 ts_master_en | snap_type_sel);
608 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
610 /* program Sub Second Increment reg */
611 priv->hw->ptp->config_sub_second_increment(priv->ioaddr);
613 /* calculate default added value:
615 * addend = (2^32)/freq_div_ratio;
616 * where, freq_div_ratio = clk_ptp_ref_i/50MHz
617 * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i;
618 * NOTE: clk_ptp_ref_i should be >= 50MHz to
619 * achieve 20ns accuracy.
621 * 2^x * y == (y << x), hence
622 * 2^32 * 50000000 ==> (50000000 << 32)
624 temp = (u64) (50000000ULL << 32);
625 priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
626 priv->hw->ptp->config_addend(priv->ioaddr,
627 priv->default_addend);
629 /* initialize system time */
630 ktime_get_real_ts64(&now);
632 /* lower 32 bits of tv_sec are safe until y2106 */
633 priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec,
637 return copy_to_user(ifr->ifr_data, &config,
638 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
642 * stmmac_init_ptp - init PTP
643 * @priv: driver private structure
644 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
645 * This is done by looking at the HW cap. register.
646 * This function also registers the ptp driver.
648 static int stmmac_init_ptp(struct stmmac_priv *priv)
650 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
653 /* Fall-back to main clock in case of no PTP ref is passed */
654 priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
655 if (IS_ERR(priv->clk_ptp_ref)) {
656 priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
657 priv->clk_ptp_ref = NULL;
659 clk_prepare_enable(priv->clk_ptp_ref);
660 priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
664 if (priv->dma_cap.atime_stamp && priv->extend_desc)
667 if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
668 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
670 if (netif_msg_hw(priv) && priv->adv_ts)
671 pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
673 priv->hw->ptp = &stmmac_ptp;
674 priv->hwts_tx_en = 0;
675 priv->hwts_rx_en = 0;
677 return stmmac_ptp_register(priv);
680 static void stmmac_release_ptp(struct stmmac_priv *priv)
682 if (priv->clk_ptp_ref)
683 clk_disable_unprepare(priv->clk_ptp_ref);
684 stmmac_ptp_unregister(priv);
688 * stmmac_adjust_link - adjusts the link parameters
689 * @dev: net device structure
690 * Description: this is the helper called by the physical abstraction layer
691 * drivers to communicate the phy link status. According the speed and duplex
692 * this driver can invoke registered glue-logic as well.
693 * It also invoke the eee initialization because it could happen when switch
694 * on different networks (that are eee capable).
696 static void stmmac_adjust_link(struct net_device *dev)
698 struct stmmac_priv *priv = netdev_priv(dev);
699 struct phy_device *phydev = priv->phydev;
702 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
707 spin_lock_irqsave(&priv->lock, flags);
710 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
712 /* Now we make sure that we can be in full duplex mode.
713 * If not, we operate in half-duplex mode. */
714 if (phydev->duplex != priv->oldduplex) {
716 if (!(phydev->duplex))
717 ctrl &= ~priv->hw->link.duplex;
719 ctrl |= priv->hw->link.duplex;
720 priv->oldduplex = phydev->duplex;
722 /* Flow Control operation */
724 priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
727 if (phydev->speed != priv->speed) {
729 switch (phydev->speed) {
731 if (likely(priv->plat->has_gmac))
732 ctrl &= ~priv->hw->link.port;
733 stmmac_hw_fix_mac_speed(priv);
737 if (priv->plat->has_gmac) {
738 ctrl |= priv->hw->link.port;
739 if (phydev->speed == SPEED_100) {
740 ctrl |= priv->hw->link.speed;
742 ctrl &= ~(priv->hw->link.speed);
745 ctrl &= ~priv->hw->link.port;
747 stmmac_hw_fix_mac_speed(priv);
750 if (netif_msg_link(priv))
751 pr_warn("%s: Speed (%d) not 10/100\n",
752 dev->name, phydev->speed);
756 priv->speed = phydev->speed;
759 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
761 if (!priv->oldlink) {
765 } else if (priv->oldlink) {
769 priv->oldduplex = -1;
772 if (new_state && netif_msg_link(priv))
773 phy_print_status(phydev);
775 spin_unlock_irqrestore(&priv->lock, flags);
777 /* At this stage, it could be needed to setup the EEE or adjust some
778 * MAC related HW registers.
780 priv->eee_enabled = stmmac_eee_init(priv);
784 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
785 * @priv: driver private structure
786 * Description: this is to verify if the HW supports the PCS.
787 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
788 * configured for the TBI, RTBI, or SGMII PHY interface.
790 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
792 int interface = priv->plat->interface;
794 if (priv->dma_cap.pcs) {
795 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
796 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
797 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
798 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
799 pr_debug("STMMAC: PCS RGMII support enable\n");
800 priv->pcs = STMMAC_PCS_RGMII;
801 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
802 pr_debug("STMMAC: PCS SGMII support enable\n");
803 priv->pcs = STMMAC_PCS_SGMII;
809 * stmmac_init_phy - PHY initialization
810 * @dev: net device structure
811 * Description: it initializes the driver's PHY state, and attaches the PHY
816 static int stmmac_init_phy(struct net_device *dev)
818 struct stmmac_priv *priv = netdev_priv(dev);
819 struct phy_device *phydev;
820 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
821 char bus_id[MII_BUS_ID_SIZE];
822 int interface = priv->plat->interface;
823 int max_speed = priv->plat->max_speed;
826 priv->oldduplex = -1;
828 if (priv->plat->phy_node) {
829 phydev = of_phy_connect(dev, priv->plat->phy_node,
830 &stmmac_adjust_link, 0, interface);
832 if (priv->plat->phy_bus_name)
833 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
834 priv->plat->phy_bus_name, priv->plat->bus_id);
836 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
839 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
840 priv->plat->phy_addr);
841 pr_debug("stmmac_init_phy: trying to attach to %s\n",
844 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
848 if (IS_ERR_OR_NULL(phydev)) {
849 pr_err("%s: Could not attach to PHY\n", dev->name);
853 return PTR_ERR(phydev);
856 /* Stop Advertising 1000BASE Capability if interface is not GMII */
857 if ((interface == PHY_INTERFACE_MODE_MII) ||
858 (interface == PHY_INTERFACE_MODE_RMII) ||
859 (max_speed < 1000 && max_speed > 0))
860 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
861 SUPPORTED_1000baseT_Full);
864 * Broken HW is sometimes missing the pull-up resistor on the
865 * MDIO line, which results in reads to non-existent devices returning
866 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
868 * Note: phydev->phy_id is the result of reading the UID PHY registers.
870 if (!priv->plat->phy_node && phydev->phy_id == 0) {
871 phy_disconnect(phydev);
874 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
875 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
877 priv->phydev = phydev;
883 * stmmac_display_ring - display ring
884 * @head: pointer to the head of the ring passed.
885 * @size: size of the ring.
886 * @extend_desc: to verify if extended descriptors are used.
887 * Description: display the control/status and buffer descriptors.
889 static void stmmac_display_ring(void *head, int size, int extend_desc)
892 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
893 struct dma_desc *p = (struct dma_desc *)head;
895 for (i = 0; i < size; i++) {
899 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
900 i, (unsigned int)virt_to_phys(ep),
901 (unsigned int)x, (unsigned int)(x >> 32),
902 ep->basic.des2, ep->basic.des3);
906 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
907 i, (unsigned int)virt_to_phys(p),
908 (unsigned int)x, (unsigned int)(x >> 32),
916 static void stmmac_display_rings(struct stmmac_priv *priv)
918 unsigned int txsize = priv->dma_tx_size;
919 unsigned int rxsize = priv->dma_rx_size;
921 if (priv->extend_desc) {
922 pr_info("Extended RX descriptor ring:\n");
923 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
924 pr_info("Extended TX descriptor ring:\n");
925 stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
927 pr_info("RX descriptor ring:\n");
928 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
929 pr_info("TX descriptor ring:\n");
930 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
934 static int stmmac_set_bfsize(int mtu, int bufsize)
938 if (mtu >= BUF_SIZE_8KiB)
939 ret = BUF_SIZE_16KiB;
940 else if (mtu >= BUF_SIZE_4KiB)
942 else if (mtu >= BUF_SIZE_2KiB)
944 else if (mtu > DEFAULT_BUFSIZE)
947 ret = DEFAULT_BUFSIZE;
953 * stmmac_clear_descriptors - clear descriptors
954 * @priv: driver private structure
955 * Description: this function is called to clear the tx and rx descriptors
956 * in case of both basic and extended descriptors are used.
958 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
961 unsigned int txsize = priv->dma_tx_size;
962 unsigned int rxsize = priv->dma_rx_size;
964 /* Clear the Rx/Tx descriptors */
965 for (i = 0; i < rxsize; i++)
966 if (priv->extend_desc)
967 priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
968 priv->use_riwt, priv->mode,
969 (i == rxsize - 1), priv->dma_buf_sz);
971 priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
972 priv->use_riwt, priv->mode,
973 (i == rxsize - 1), priv->dma_buf_sz);
974 for (i = 0; i < txsize; i++)
975 if (priv->extend_desc)
976 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
980 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
986 * stmmac_init_rx_buffers - init the RX descriptor buffer.
987 * @priv: driver private structure
988 * @p: descriptor pointer
989 * @i: descriptor index
991 * Description: this function is called to allocate a receive buffer, perform
992 * the DMA mapping and init the descriptor.
994 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
999 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1001 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
1004 priv->rx_skbuff[i] = skb;
1005 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1008 if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
1009 pr_err("%s: DMA mapping error\n", __func__);
1010 dev_kfree_skb_any(skb);
1014 p->des2 = priv->rx_skbuff_dma[i];
1016 if ((priv->hw->mode->init_desc3) &&
1017 (priv->dma_buf_sz == BUF_SIZE_16KiB))
1018 priv->hw->mode->init_desc3(p);
1023 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
1025 if (priv->rx_skbuff[i]) {
1026 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
1027 priv->dma_buf_sz, DMA_FROM_DEVICE);
1028 dev_kfree_skb_any(priv->rx_skbuff[i]);
1030 priv->rx_skbuff[i] = NULL;
1034 * init_dma_desc_rings - init the RX/TX descriptor rings
1035 * @dev: net device structure
1037 * Description: this function initializes the DMA RX/TX descriptors
1038 * and allocates the socket buffers. It suppors the chained and ring
1041 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1044 struct stmmac_priv *priv = netdev_priv(dev);
1045 unsigned int txsize = priv->dma_tx_size;
1046 unsigned int rxsize = priv->dma_rx_size;
1047 unsigned int bfsize = 0;
1050 if (priv->hw->mode->set_16kib_bfsize)
1051 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1053 if (bfsize < BUF_SIZE_16KiB)
1054 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1056 priv->dma_buf_sz = bfsize;
1058 if (netif_msg_probe(priv))
1059 pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__,
1060 txsize, rxsize, bfsize);
1062 if (netif_msg_probe(priv)) {
1063 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1064 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
1066 /* RX INITIALIZATION */
1067 pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
1069 for (i = 0; i < rxsize; i++) {
1071 if (priv->extend_desc)
1072 p = &((priv->dma_erx + i)->basic);
1074 p = priv->dma_rx + i;
1076 ret = stmmac_init_rx_buffers(priv, p, i, flags);
1078 goto err_init_rx_buffers;
1080 if (netif_msg_probe(priv))
1081 pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
1082 priv->rx_skbuff[i]->data,
1083 (unsigned int)priv->rx_skbuff_dma[i]);
1086 priv->dirty_rx = (unsigned int)(i - rxsize);
1089 /* Setup the chained descriptor addresses */
1090 if (priv->mode == STMMAC_CHAIN_MODE) {
1091 if (priv->extend_desc) {
1092 priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
1094 priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
1097 priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
1099 priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
1104 /* TX INITIALIZATION */
1105 for (i = 0; i < txsize; i++) {
1107 if (priv->extend_desc)
1108 p = &((priv->dma_etx + i)->basic);
1110 p = priv->dma_tx + i;
1112 priv->tx_skbuff_dma[i].buf = 0;
1113 priv->tx_skbuff_dma[i].map_as_page = false;
1114 priv->tx_skbuff[i] = NULL;
1119 netdev_reset_queue(priv->dev);
1121 stmmac_clear_descriptors(priv);
1123 if (netif_msg_hw(priv))
1124 stmmac_display_rings(priv);
1127 err_init_rx_buffers:
1129 stmmac_free_rx_buffers(priv, i);
1133 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1137 for (i = 0; i < priv->dma_rx_size; i++)
1138 stmmac_free_rx_buffers(priv, i);
1141 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1145 for (i = 0; i < priv->dma_tx_size; i++) {
1148 if (priv->extend_desc)
1149 p = &((priv->dma_etx + i)->basic);
1151 p = priv->dma_tx + i;
1153 if (priv->tx_skbuff_dma[i].buf) {
1154 if (priv->tx_skbuff_dma[i].map_as_page)
1155 dma_unmap_page(priv->device,
1156 priv->tx_skbuff_dma[i].buf,
1157 priv->hw->desc->get_tx_len(p),
1160 dma_unmap_single(priv->device,
1161 priv->tx_skbuff_dma[i].buf,
1162 priv->hw->desc->get_tx_len(p),
1166 if (priv->tx_skbuff[i] != NULL) {
1167 dev_kfree_skb_any(priv->tx_skbuff[i]);
1168 priv->tx_skbuff[i] = NULL;
1169 priv->tx_skbuff_dma[i].buf = 0;
1170 priv->tx_skbuff_dma[i].map_as_page = false;
1176 * alloc_dma_desc_resources - alloc TX/RX resources.
1177 * @priv: private structure
1178 * Description: according to which descriptor can be used (extend or basic)
1179 * this function allocates the resources for TX and RX paths. In case of
1180 * reception, for example, it pre-allocated the RX socket buffer in order to
1181 * allow zero-copy mechanism.
1183 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1185 unsigned int txsize = priv->dma_tx_size;
1186 unsigned int rxsize = priv->dma_rx_size;
1189 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
1191 if (!priv->rx_skbuff_dma)
1194 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
1196 if (!priv->rx_skbuff)
1199 priv->tx_skbuff_dma = kmalloc_array(txsize,
1200 sizeof(*priv->tx_skbuff_dma),
1202 if (!priv->tx_skbuff_dma)
1203 goto err_tx_skbuff_dma;
1205 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
1207 if (!priv->tx_skbuff)
1210 if (priv->extend_desc) {
1211 priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize *
1219 priv->dma_etx = dma_zalloc_coherent(priv->device, txsize *
1224 if (!priv->dma_etx) {
1225 dma_free_coherent(priv->device, priv->dma_rx_size *
1226 sizeof(struct dma_extended_desc),
1227 priv->dma_erx, priv->dma_rx_phy);
1231 priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize *
1232 sizeof(struct dma_desc),
1238 priv->dma_tx = dma_zalloc_coherent(priv->device, txsize *
1239 sizeof(struct dma_desc),
1242 if (!priv->dma_tx) {
1243 dma_free_coherent(priv->device, priv->dma_rx_size *
1244 sizeof(struct dma_desc),
1245 priv->dma_rx, priv->dma_rx_phy);
1253 kfree(priv->tx_skbuff);
1255 kfree(priv->tx_skbuff_dma);
1257 kfree(priv->rx_skbuff);
1259 kfree(priv->rx_skbuff_dma);
1263 static void free_dma_desc_resources(struct stmmac_priv *priv)
1265 /* Release the DMA TX/RX socket buffers */
1266 dma_free_rx_skbufs(priv);
1267 dma_free_tx_skbufs(priv);
1269 /* Free DMA regions of consistent memory previously allocated */
1270 if (!priv->extend_desc) {
1271 dma_free_coherent(priv->device,
1272 priv->dma_tx_size * sizeof(struct dma_desc),
1273 priv->dma_tx, priv->dma_tx_phy);
1274 dma_free_coherent(priv->device,
1275 priv->dma_rx_size * sizeof(struct dma_desc),
1276 priv->dma_rx, priv->dma_rx_phy);
1278 dma_free_coherent(priv->device, priv->dma_tx_size *
1279 sizeof(struct dma_extended_desc),
1280 priv->dma_etx, priv->dma_tx_phy);
1281 dma_free_coherent(priv->device, priv->dma_rx_size *
1282 sizeof(struct dma_extended_desc),
1283 priv->dma_erx, priv->dma_rx_phy);
1285 kfree(priv->rx_skbuff_dma);
1286 kfree(priv->rx_skbuff);
1287 kfree(priv->tx_skbuff_dma);
1288 kfree(priv->tx_skbuff);
1292 * stmmac_dma_operation_mode - HW DMA operation mode
1293 * @priv: driver private structure
1294 * Description: it is used for configuring the DMA operation mode register in
1295 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1297 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1299 int rxfifosz = priv->plat->rx_fifo_size;
1301 if (priv->plat->force_thresh_dma_mode)
1302 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
1303 else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1305 * In case of GMAC, SF mode can be enabled
1306 * to perform the TX COE in HW. This depends on:
1307 * 1) TX COE if actually supported
1308 * 2) There is no bugged Jumbo frame support
1309 * that needs to not insert csum in the TDES.
1311 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
1313 priv->xstats.threshold = SF_DMA_MODE;
1315 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
1320 * stmmac_tx_clean - to manage the transmission completion
1321 * @priv: driver private structure
1322 * Description: it reclaims the transmit resources after transmission completes.
1324 static void stmmac_tx_clean(struct stmmac_priv *priv)
1326 unsigned int txsize = priv->dma_tx_size;
1327 unsigned int bytes_compl = 0, pkts_compl = 0;
1329 spin_lock(&priv->tx_lock);
1331 priv->xstats.tx_clean++;
1333 while (priv->dirty_tx != priv->cur_tx) {
1335 unsigned int entry = priv->dirty_tx % txsize;
1336 struct sk_buff *skb = priv->tx_skbuff[entry];
1339 if (priv->extend_desc)
1340 p = (struct dma_desc *)(priv->dma_etx + entry);
1342 p = priv->dma_tx + entry;
1344 /* Check if the descriptor is owned by the DMA. */
1345 if (priv->hw->desc->get_tx_owner(p))
1348 /* Verify tx error by looking at the last segment. */
1349 last = priv->hw->desc->get_tx_ls(p);
1352 priv->hw->desc->tx_status(&priv->dev->stats,
1355 if (likely(tx_error == 0)) {
1356 priv->dev->stats.tx_packets++;
1357 priv->xstats.tx_pkt_n++;
1359 priv->dev->stats.tx_errors++;
1361 stmmac_get_tx_hwtstamp(priv, entry, skb);
1363 if (netif_msg_tx_done(priv))
1364 pr_debug("%s: curr %d, dirty %d\n", __func__,
1365 priv->cur_tx, priv->dirty_tx);
1367 if (likely(priv->tx_skbuff_dma[entry].buf)) {
1368 if (priv->tx_skbuff_dma[entry].map_as_page)
1369 dma_unmap_page(priv->device,
1370 priv->tx_skbuff_dma[entry].buf,
1371 priv->hw->desc->get_tx_len(p),
1374 dma_unmap_single(priv->device,
1375 priv->tx_skbuff_dma[entry].buf,
1376 priv->hw->desc->get_tx_len(p),
1378 priv->tx_skbuff_dma[entry].buf = 0;
1379 priv->tx_skbuff_dma[entry].map_as_page = false;
1381 priv->hw->mode->clean_desc3(priv, p);
1383 if (likely(skb != NULL)) {
1385 bytes_compl += skb->len;
1386 dev_consume_skb_any(skb);
1387 priv->tx_skbuff[entry] = NULL;
1390 priv->hw->desc->release_tx_desc(p, priv->mode);
1395 netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1397 if (unlikely(netif_queue_stopped(priv->dev) &&
1398 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
1399 netif_tx_lock(priv->dev);
1400 if (netif_queue_stopped(priv->dev) &&
1401 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
1402 if (netif_msg_tx_done(priv))
1403 pr_debug("%s: restart transmit\n", __func__);
1404 netif_wake_queue(priv->dev);
1406 netif_tx_unlock(priv->dev);
1409 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1410 stmmac_enable_eee_mode(priv);
1411 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1413 spin_unlock(&priv->tx_lock);
1416 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
1418 priv->hw->dma->enable_dma_irq(priv->ioaddr);
1421 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
1423 priv->hw->dma->disable_dma_irq(priv->ioaddr);
1427 * stmmac_tx_err - to manage the tx error
1428 * @priv: driver private structure
1429 * Description: it cleans the descriptors and restarts the transmission
1430 * in case of transmission errors.
1432 static void stmmac_tx_err(struct stmmac_priv *priv)
1435 int txsize = priv->dma_tx_size;
1436 netif_stop_queue(priv->dev);
1438 priv->hw->dma->stop_tx(priv->ioaddr);
1439 dma_free_tx_skbufs(priv);
1440 for (i = 0; i < txsize; i++)
1441 if (priv->extend_desc)
1442 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1446 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1451 netdev_reset_queue(priv->dev);
1452 priv->hw->dma->start_tx(priv->ioaddr);
1454 priv->dev->stats.tx_errors++;
1455 netif_wake_queue(priv->dev);
1459 * stmmac_dma_interrupt - DMA ISR
1460 * @priv: driver private structure
1461 * Description: this is the DMA ISR. It is called by the main ISR.
1462 * It calls the dwmac dma routine and schedule poll method in case of some
1465 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1468 int rxfifosz = priv->plat->rx_fifo_size;
1470 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1471 if (likely((status & handle_rx)) || (status & handle_tx)) {
1472 if (likely(napi_schedule_prep(&priv->napi))) {
1473 stmmac_disable_dma_irq(priv);
1474 __napi_schedule(&priv->napi);
1477 if (unlikely(status & tx_hard_error_bump_tc)) {
1478 /* Try to bump up the dma threshold on this failure */
1479 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1482 if (priv->plat->force_thresh_dma_mode)
1483 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
1486 priv->hw->dma->dma_mode(priv->ioaddr, tc,
1487 SF_DMA_MODE, rxfifosz);
1488 priv->xstats.threshold = tc;
1490 } else if (unlikely(status == tx_hard_error))
1491 stmmac_tx_err(priv);
1495 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1496 * @priv: driver private structure
1497 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1499 static void stmmac_mmc_setup(struct stmmac_priv *priv)
1501 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1502 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1504 dwmac_mmc_intr_all_mask(priv->ioaddr);
1506 if (priv->dma_cap.rmon) {
1507 dwmac_mmc_ctrl(priv->ioaddr, mode);
1508 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1510 pr_info(" No MAC Management Counters available\n");
1514 * stmmac_get_synopsys_id - return the SYINID.
1515 * @priv: driver private structure
1516 * Description: this simple function is to decode and return the SYINID
1517 * starting from the HW core register.
1519 static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
1521 u32 hwid = priv->hw->synopsys_uid;
1523 /* Check Synopsys Id (not available on old chips) */
1525 u32 uid = ((hwid & 0x0000ff00) >> 8);
1526 u32 synid = (hwid & 0x000000ff);
1528 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
1537 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1538 * @priv: driver private structure
1539 * Description: select the Enhanced/Alternate or Normal descriptors.
1540 * In case of Enhanced/Alternate, it checks if the extended descriptors are
1541 * supported by the HW capability register.
1543 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1545 if (priv->plat->enh_desc) {
1546 pr_info(" Enhanced/Alternate descriptors\n");
1548 /* GMAC older than 3.50 has no extended descriptors */
1549 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1550 pr_info("\tEnabled extended descriptors\n");
1551 priv->extend_desc = 1;
1553 pr_warn("Extended descriptors not supported\n");
1555 priv->hw->desc = &enh_desc_ops;
1557 pr_info(" Normal descriptors\n");
1558 priv->hw->desc = &ndesc_ops;
1563 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
1564 * @priv: driver private structure
1566 * new GMAC chip generations have a new register to indicate the
1567 * presence of the optional feature/functions.
1568 * This can be also used to override the value passed through the
1569 * platform and necessary for old MAC10/100 and GMAC chips.
1571 static int stmmac_get_hw_features(struct stmmac_priv *priv)
1575 if (priv->hw->dma->get_hw_feature) {
1576 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
1578 priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
1579 priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
1580 priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
1581 priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
1582 priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
1583 priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
1584 priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
1585 priv->dma_cap.pmt_remote_wake_up =
1586 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
1587 priv->dma_cap.pmt_magic_frame =
1588 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
1590 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
1591 /* IEEE 1588-2002 */
1592 priv->dma_cap.time_stamp =
1593 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
1594 /* IEEE 1588-2008 */
1595 priv->dma_cap.atime_stamp =
1596 (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
1597 /* 802.3az - Energy-Efficient Ethernet (EEE) */
1598 priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
1599 priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
1600 /* TX and RX csum */
1601 priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
1602 priv->dma_cap.rx_coe_type1 =
1603 (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
1604 priv->dma_cap.rx_coe_type2 =
1605 (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
1606 priv->dma_cap.rxfifo_over_2048 =
1607 (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
1608 /* TX and RX number of channels */
1609 priv->dma_cap.number_rx_channel =
1610 (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
1611 priv->dma_cap.number_tx_channel =
1612 (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
1613 /* Alternate (enhanced) DESC mode */
1614 priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
1621 * stmmac_check_ether_addr - check if the MAC addr is valid
1622 * @priv: driver private structure
1624 * it is to verify if the MAC address is valid, in case of failures it
1625 * generates a random MAC address
1627 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1629 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1630 priv->hw->mac->get_umac_addr(priv->hw,
1631 priv->dev->dev_addr, 0);
1632 if (!is_valid_ether_addr(priv->dev->dev_addr))
1633 eth_hw_addr_random(priv->dev);
1634 pr_info("%s: device MAC address %pM\n", priv->dev->name,
1635 priv->dev->dev_addr);
1640 * stmmac_init_dma_engine - DMA init.
1641 * @priv: driver private structure
1643 * It inits the DMA invoking the specific MAC/GMAC callback.
1644 * Some DMA parameters can be passed from the platform;
1645 * in case of these are not passed a default is kept for the MAC or GMAC.
1647 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1649 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
1650 int mixed_burst = 0;
1653 if (priv->plat->dma_cfg) {
1654 pbl = priv->plat->dma_cfg->pbl;
1655 fixed_burst = priv->plat->dma_cfg->fixed_burst;
1656 mixed_burst = priv->plat->dma_cfg->mixed_burst;
1657 burst_len = priv->plat->dma_cfg->burst_len;
1660 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1663 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
1664 burst_len, priv->dma_tx_phy,
1665 priv->dma_rx_phy, atds);
1669 * stmmac_tx_timer - mitigation sw timer for tx.
1670 * @data: data pointer
1672 * This is the timer handler to directly invoke the stmmac_tx_clean.
1674 static void stmmac_tx_timer(unsigned long data)
1676 struct stmmac_priv *priv = (struct stmmac_priv *)data;
1678 stmmac_tx_clean(priv);
1682 * stmmac_init_tx_coalesce - init tx mitigation options.
1683 * @priv: driver private structure
1685 * This inits the transmit coalesce parameters: i.e. timer rate,
1686 * timer handler and default threshold used for enabling the
1687 * interrupt on completion bit.
1689 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1691 priv->tx_coal_frames = STMMAC_TX_FRAMES;
1692 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1693 init_timer(&priv->txtimer);
1694 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1695 priv->txtimer.data = (unsigned long)priv;
1696 priv->txtimer.function = stmmac_tx_timer;
1697 add_timer(&priv->txtimer);
1701 * stmmac_hw_setup - setup mac in a usable state.
1702 * @dev : pointer to the device structure.
1704 * this is the main function to setup the HW in a usable state because the
1705 * dma engine is reset, the core registers are configured (e.g. AXI,
1706 * Checksum features, timers). The DMA is ready to start receiving and
1709 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1712 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1714 struct stmmac_priv *priv = netdev_priv(dev);
1717 /* DMA initialization and SW reset */
1718 ret = stmmac_init_dma_engine(priv);
1720 pr_err("%s: DMA engine initialization failed\n", __func__);
1724 /* Copy the MAC addr into the HW */
1725 priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
1727 /* If required, perform hw setup of the bus. */
1728 if (priv->plat->bus_setup)
1729 priv->plat->bus_setup(priv->ioaddr);
1731 /* Initialize the MAC Core */
1732 priv->hw->mac->core_init(priv->hw, dev->mtu);
1734 ret = priv->hw->mac->rx_ipc(priv->hw);
1736 pr_warn(" RX IPC Checksum Offload disabled\n");
1737 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1738 priv->hw->rx_csum = 0;
1741 /* Enable the MAC Rx/Tx */
1742 stmmac_set_mac(priv->ioaddr, true);
1744 /* Set the HW DMA mode and the COE */
1745 stmmac_dma_operation_mode(priv);
1747 stmmac_mmc_setup(priv);
1750 ret = stmmac_init_ptp(priv);
1751 if (ret && ret != -EOPNOTSUPP)
1752 pr_warn("%s: failed PTP initialisation\n", __func__);
1755 #ifdef CONFIG_DEBUG_FS
1756 ret = stmmac_init_fs(dev);
1758 pr_warn("%s: failed debugFS registration\n", __func__);
1760 /* Start the ball rolling... */
1761 pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
1762 priv->hw->dma->start_tx(priv->ioaddr);
1763 priv->hw->dma->start_rx(priv->ioaddr);
1765 /* Dump DMA/MAC registers */
1766 if (netif_msg_hw(priv)) {
1767 priv->hw->mac->dump_regs(priv->hw);
1768 priv->hw->dma->dump_regs(priv->ioaddr);
1770 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1772 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1773 priv->rx_riwt = MAX_DMA_RIWT;
1774 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1777 if (priv->pcs && priv->hw->mac->ctrl_ane)
1778 priv->hw->mac->ctrl_ane(priv->hw, 0);
1784 * stmmac_open - open entry point of the driver
1785 * @dev : pointer to the device structure.
1787 * This function is the open entry point of the driver.
1789 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1792 static int stmmac_open(struct net_device *dev)
1794 struct stmmac_priv *priv = netdev_priv(dev);
1797 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
1798 priv->pcs != STMMAC_PCS_RTBI) {
1799 ret = stmmac_init_phy(dev);
1801 pr_err("%s: Cannot attach to PHY (error: %d)\n",
1807 /* Extra statistics */
1808 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1809 priv->xstats.threshold = tc;
1811 /* Create and initialize the TX/RX descriptors chains. */
1812 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1813 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
1814 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1816 ret = alloc_dma_desc_resources(priv);
1818 pr_err("%s: DMA descriptors allocation failed\n", __func__);
1819 goto dma_desc_error;
1822 ret = init_dma_desc_rings(dev, GFP_KERNEL);
1824 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1828 ret = stmmac_hw_setup(dev, true);
1830 pr_err("%s: Hw setup failed\n", __func__);
1834 stmmac_init_tx_coalesce(priv);
1837 phy_start(priv->phydev);
1839 /* Request the IRQ lines */
1840 ret = request_irq(dev->irq, stmmac_interrupt,
1841 IRQF_SHARED, dev->name, dev);
1842 if (unlikely(ret < 0)) {
1843 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1844 __func__, dev->irq, ret);
1848 /* Request the Wake IRQ in case of another line is used for WoL */
1849 if (priv->wol_irq != dev->irq) {
1850 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1851 IRQF_SHARED, dev->name, dev);
1852 if (unlikely(ret < 0)) {
1853 pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1854 __func__, priv->wol_irq, ret);
1859 /* Request the IRQ lines */
1860 if (priv->lpi_irq > 0) {
1861 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1863 if (unlikely(ret < 0)) {
1864 pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1865 __func__, priv->lpi_irq, ret);
1870 napi_enable(&priv->napi);
1871 netif_start_queue(dev);
1876 if (priv->wol_irq != dev->irq)
1877 free_irq(priv->wol_irq, dev);
1879 free_irq(dev->irq, dev);
1882 free_dma_desc_resources(priv);
1885 phy_disconnect(priv->phydev);
1891 * stmmac_release - close entry point of the driver
1892 * @dev : device pointer.
1894 * This is the stop entry point of the driver.
1896 static int stmmac_release(struct net_device *dev)
1898 struct stmmac_priv *priv = netdev_priv(dev);
1900 /* Stop and disconnect the PHY */
1902 phy_stop(priv->phydev);
1903 phy_disconnect(priv->phydev);
1904 priv->phydev = NULL;
1907 netif_stop_queue(dev);
1909 napi_disable(&priv->napi);
1911 del_timer_sync(&priv->txtimer);
1913 /* Free the IRQ lines */
1914 free_irq(dev->irq, dev);
1915 if (priv->wol_irq != dev->irq)
1916 free_irq(priv->wol_irq, dev);
1917 if (priv->lpi_irq > 0)
1918 free_irq(priv->lpi_irq, dev);
1920 if (priv->eee_enabled) {
1921 priv->tx_path_in_lpi_mode = false;
1922 del_timer_sync(&priv->eee_ctrl_timer);
1925 /* Stop TX/RX DMA and clear the descriptors */
1926 priv->hw->dma->stop_tx(priv->ioaddr);
1927 priv->hw->dma->stop_rx(priv->ioaddr);
1929 /* Release and free the Rx/Tx resources */
1930 free_dma_desc_resources(priv);
1932 /* Disable the MAC Rx/Tx */
1933 stmmac_set_mac(priv->ioaddr, false);
1935 netif_carrier_off(dev);
1937 #ifdef CONFIG_DEBUG_FS
1938 stmmac_exit_fs(dev);
1941 stmmac_release_ptp(priv);
1947 * stmmac_xmit - Tx entry point of the driver
1948 * @skb : the socket buffer
1949 * @dev : device pointer
1950 * Description : this is the tx entry point of the driver.
1951 * It programs the chain or the ring and supports oversized frames
1954 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1956 struct stmmac_priv *priv = netdev_priv(dev);
1957 unsigned int txsize = priv->dma_tx_size;
1959 int i, csum_insertion = 0, is_jumbo = 0;
1960 int nfrags = skb_shinfo(skb)->nr_frags;
1961 struct dma_desc *desc, *first;
1962 unsigned int nopaged_len = skb_headlen(skb);
1963 unsigned int enh_desc = priv->plat->enh_desc;
1965 spin_lock(&priv->tx_lock);
1967 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1968 spin_unlock(&priv->tx_lock);
1969 if (!netif_queue_stopped(dev)) {
1970 netif_stop_queue(dev);
1971 /* This is a hard error, log it. */
1972 pr_err("%s: Tx Ring full when queue awake\n", __func__);
1974 return NETDEV_TX_BUSY;
1977 if (priv->tx_path_in_lpi_mode)
1978 stmmac_disable_eee_mode(priv);
1980 entry = priv->cur_tx % txsize;
1982 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
1984 if (priv->extend_desc)
1985 desc = (struct dma_desc *)(priv->dma_etx + entry);
1987 desc = priv->dma_tx + entry;
1991 /* To program the descriptors according to the size of the frame */
1993 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
1995 if (likely(!is_jumbo)) {
1996 desc->des2 = dma_map_single(priv->device, skb->data,
1997 nopaged_len, DMA_TO_DEVICE);
1998 if (dma_mapping_error(priv->device, desc->des2))
2000 priv->tx_skbuff_dma[entry].buf = desc->des2;
2001 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
2002 csum_insertion, priv->mode);
2005 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
2006 if (unlikely(entry < 0))
2010 for (i = 0; i < nfrags; i++) {
2011 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2012 int len = skb_frag_size(frag);
2014 priv->tx_skbuff[entry] = NULL;
2015 entry = (++priv->cur_tx) % txsize;
2016 if (priv->extend_desc)
2017 desc = (struct dma_desc *)(priv->dma_etx + entry);
2019 desc = priv->dma_tx + entry;
2021 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
2023 if (dma_mapping_error(priv->device, desc->des2))
2024 goto dma_map_err; /* should reuse desc w/o issues */
2026 priv->tx_skbuff_dma[entry].buf = desc->des2;
2027 priv->tx_skbuff_dma[entry].map_as_page = true;
2028 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
2031 priv->hw->desc->set_tx_owner(desc);
2035 priv->tx_skbuff[entry] = skb;
2037 /* Finalize the latest segment. */
2038 priv->hw->desc->close_tx_desc(desc);
2041 /* According to the coalesce parameter the IC bit for the latest
2042 * segment could be reset and the timer re-started to invoke the
2043 * stmmac_tx function. This approach takes care about the fragments.
2045 priv->tx_count_frames += nfrags + 1;
2046 if (priv->tx_coal_frames > priv->tx_count_frames) {
2047 priv->hw->desc->clear_tx_ic(desc);
2048 priv->xstats.tx_reset_ic_bit++;
2049 mod_timer(&priv->txtimer,
2050 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2052 priv->tx_count_frames = 0;
2054 /* To avoid raise condition */
2055 priv->hw->desc->set_tx_owner(first);
2060 if (netif_msg_pktdata(priv)) {
2061 pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
2062 __func__, (priv->cur_tx % txsize),
2063 (priv->dirty_tx % txsize), entry, first, nfrags);
2065 if (priv->extend_desc)
2066 stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
2068 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
2070 pr_debug(">>> frame to be transmitted: ");
2071 print_pkt(skb->data, skb->len);
2073 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2074 if (netif_msg_hw(priv))
2075 pr_debug("%s: stop transmitted packets\n", __func__);
2076 netif_stop_queue(dev);
2079 dev->stats.tx_bytes += skb->len;
2081 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2082 priv->hwts_tx_en)) {
2083 /* declare that device is doing timestamping */
2084 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2085 priv->hw->desc->enable_tx_timestamp(first);
2088 if (!priv->hwts_tx_en)
2089 skb_tx_timestamp(skb);
2091 netdev_sent_queue(dev, skb->len);
2092 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2094 spin_unlock(&priv->tx_lock);
2095 return NETDEV_TX_OK;
2098 spin_unlock(&priv->tx_lock);
2099 dev_err(priv->device, "Tx dma map failed\n");
2101 priv->dev->stats.tx_dropped++;
2102 return NETDEV_TX_OK;
2105 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
2107 struct ethhdr *ehdr;
2110 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
2111 NETIF_F_HW_VLAN_CTAG_RX &&
2112 !__vlan_get_tag(skb, &vlanid)) {
2113 /* pop the vlan tag */
2114 ehdr = (struct ethhdr *)skb->data;
2115 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
2116 skb_pull(skb, VLAN_HLEN);
2117 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
2123 * stmmac_rx_refill - refill used skb preallocated buffers
2124 * @priv: driver private structure
2125 * Description : this is to reallocate the skb for the reception process
2126 * that is based on zero-copy.
2128 static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2130 unsigned int rxsize = priv->dma_rx_size;
2131 int bfsize = priv->dma_buf_sz;
2133 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
2134 unsigned int entry = priv->dirty_rx % rxsize;
2137 if (priv->extend_desc)
2138 p = (struct dma_desc *)(priv->dma_erx + entry);
2140 p = priv->dma_rx + entry;
2142 if (likely(priv->rx_skbuff[entry] == NULL)) {
2143 struct sk_buff *skb;
2145 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
2147 if (unlikely(skb == NULL))
2150 priv->rx_skbuff[entry] = skb;
2151 priv->rx_skbuff_dma[entry] =
2152 dma_map_single(priv->device, skb->data, bfsize,
2154 if (dma_mapping_error(priv->device,
2155 priv->rx_skbuff_dma[entry])) {
2156 dev_err(priv->device, "Rx dma map failed\n");
2160 p->des2 = priv->rx_skbuff_dma[entry];
2162 priv->hw->mode->refill_desc3(priv, p);
2164 if (netif_msg_rx_status(priv))
2165 pr_debug("\trefill entry #%d\n", entry);
2168 priv->hw->desc->set_rx_owner(p);
2174 * stmmac_rx - manage the receive process
2175 * @priv: driver private structure
2176 * @limit: napi bugget.
2177 * Description : this the function called by the napi poll method.
2178 * It gets all the frames inside the ring.
2180 static int stmmac_rx(struct stmmac_priv *priv, int limit)
2182 unsigned int rxsize = priv->dma_rx_size;
2183 unsigned int next_entry = priv->cur_rx % rxsize;
2184 unsigned int count = 0;
2185 int coe = priv->hw->rx_csum;
2187 if (netif_msg_rx_status(priv)) {
2188 pr_debug("%s: descriptor ring:\n", __func__);
2189 if (priv->extend_desc)
2190 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
2192 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
2194 while (count < limit) {
2200 if (priv->extend_desc)
2201 p = (struct dma_desc *)(priv->dma_erx + entry);
2203 p = priv->dma_rx + entry;
2205 if (priv->hw->desc->get_rx_owner(p))
2210 next_entry = (++priv->cur_rx) % rxsize;
2211 if (priv->extend_desc)
2212 prefetch(priv->dma_erx + next_entry);
2214 prefetch(priv->dma_rx + next_entry);
2216 /* read the status of the incoming frame */
2217 status = priv->hw->desc->rx_status(&priv->dev->stats,
2219 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2220 priv->hw->desc->rx_extended_status(&priv->dev->stats,
2224 if (unlikely(status == discard_frame)) {
2225 priv->dev->stats.rx_errors++;
2226 if (priv->hwts_rx_en && !priv->extend_desc) {
2227 /* DESC2 & DESC3 will be overwitten by device
2228 * with timestamp value, hence reinitialize
2229 * them in stmmac_rx_refill() function so that
2230 * device can reuse it.
2232 priv->rx_skbuff[entry] = NULL;
2233 dma_unmap_single(priv->device,
2234 priv->rx_skbuff_dma[entry],
2239 struct sk_buff *skb;
2242 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2244 /* check if frame_len fits the preallocated memory */
2245 if (frame_len > priv->dma_buf_sz) {
2246 priv->dev->stats.rx_length_errors++;
2250 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2251 * Type frames (LLC/LLC-SNAP)
2253 if (unlikely(status != llc_snap))
2254 frame_len -= ETH_FCS_LEN;
2256 if (netif_msg_rx_status(priv)) {
2257 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
2259 if (frame_len > ETH_FRAME_LEN)
2260 pr_debug("\tframe size %d, COE: %d\n",
2263 skb = priv->rx_skbuff[entry];
2264 if (unlikely(!skb)) {
2265 pr_err("%s: Inconsistent Rx descriptor chain\n",
2267 priv->dev->stats.rx_dropped++;
2270 prefetch(skb->data - NET_IP_ALIGN);
2271 priv->rx_skbuff[entry] = NULL;
2273 stmmac_get_rx_hwtstamp(priv, entry, skb);
2275 skb_put(skb, frame_len);
2276 dma_unmap_single(priv->device,
2277 priv->rx_skbuff_dma[entry],
2278 priv->dma_buf_sz, DMA_FROM_DEVICE);
2280 if (netif_msg_pktdata(priv)) {
2281 pr_debug("frame received (%dbytes)", frame_len);
2282 print_pkt(skb->data, frame_len);
2285 stmmac_rx_vlan(priv->dev, skb);
2287 skb->protocol = eth_type_trans(skb, priv->dev);
2290 skb_checksum_none_assert(skb);
2292 skb->ip_summed = CHECKSUM_UNNECESSARY;
2294 napi_gro_receive(&priv->napi, skb);
2296 priv->dev->stats.rx_packets++;
2297 priv->dev->stats.rx_bytes += frame_len;
2301 stmmac_rx_refill(priv);
2303 priv->xstats.rx_pkt_n += count;
2309 * stmmac_poll - stmmac poll method (NAPI)
2310 * @napi : pointer to the napi structure.
2311 * @budget : maximum number of packets that the current CPU can receive from
2314 * To look at the incoming frames and clear the tx resources.
2316 static int stmmac_poll(struct napi_struct *napi, int budget)
2318 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2321 priv->xstats.napi_poll++;
2322 stmmac_tx_clean(priv);
2324 work_done = stmmac_rx(priv, budget);
2325 if (work_done < budget) {
2326 napi_complete(napi);
2327 stmmac_enable_dma_irq(priv);
2334 * @dev : Pointer to net device structure
2335 * Description: this function is called when a packet transmission fails to
2336 * complete within a reasonable time. The driver will mark the error in the
2337 * netdev structure and arrange for the device to be reset to a sane state
2338 * in order to transmit a new packet.
2340 static void stmmac_tx_timeout(struct net_device *dev)
2342 struct stmmac_priv *priv = netdev_priv(dev);
2344 /* Clear Tx resources and restart transmitting again */
2345 stmmac_tx_err(priv);
2349 * stmmac_set_rx_mode - entry point for multicast addressing
2350 * @dev : pointer to the device structure
2352 * This function is a driver entry point which gets called by the kernel
2353 * whenever multicast addresses must be enabled/disabled.
2357 static void stmmac_set_rx_mode(struct net_device *dev)
2359 struct stmmac_priv *priv = netdev_priv(dev);
2361 priv->hw->mac->set_filter(priv->hw, dev);
2365 * stmmac_change_mtu - entry point to change MTU size for the device.
2366 * @dev : device pointer.
2367 * @new_mtu : the new MTU size for the device.
2368 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
2369 * to drive packet transmission. Ethernet has an MTU of 1500 octets
2370 * (ETH_DATA_LEN). This value can be changed with ifconfig.
2372 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2375 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2377 struct stmmac_priv *priv = netdev_priv(dev);
2380 if (netif_running(dev)) {
2381 pr_err("%s: must be stopped to change its MTU\n", dev->name);
2385 if (priv->plat->enh_desc)
2386 max_mtu = JUMBO_LEN;
2388 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
2390 if (priv->plat->maxmtu < max_mtu)
2391 max_mtu = priv->plat->maxmtu;
2393 if ((new_mtu < 46) || (new_mtu > max_mtu)) {
2394 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
2399 netdev_update_features(dev);
2404 static netdev_features_t stmmac_fix_features(struct net_device *dev,
2405 netdev_features_t features)
2407 struct stmmac_priv *priv = netdev_priv(dev);
2409 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2410 features &= ~NETIF_F_RXCSUM;
2412 if (!priv->plat->tx_coe)
2413 features &= ~NETIF_F_ALL_CSUM;
2415 /* Some GMAC devices have a bugged Jumbo frame support that
2416 * needs to have the Tx COE disabled for oversized frames
2417 * (due to limited buffer sizes). In this case we disable
2418 * the TX csum insertionin the TDES and not use SF.
2420 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2421 features &= ~NETIF_F_ALL_CSUM;
2426 static int stmmac_set_features(struct net_device *netdev,
2427 netdev_features_t features)
2429 struct stmmac_priv *priv = netdev_priv(netdev);
2431 /* Keep the COE Type in case of csum is supporting */
2432 if (features & NETIF_F_RXCSUM)
2433 priv->hw->rx_csum = priv->plat->rx_coe;
2435 priv->hw->rx_csum = 0;
2436 /* No check needed because rx_coe has been set before and it will be
2437 * fixed in case of issue.
2439 priv->hw->mac->rx_ipc(priv->hw);
2445 * stmmac_interrupt - main ISR
2446 * @irq: interrupt number.
2447 * @dev_id: to pass the net device pointer.
2448 * Description: this is the main driver interrupt service routine.
2450 * o DMA service routine (to manage incoming frame reception and transmission
2452 * o Core interrupts to manage: remote wake-up, management counter, LPI
2455 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2457 struct net_device *dev = (struct net_device *)dev_id;
2458 struct stmmac_priv *priv = netdev_priv(dev);
2461 pm_wakeup_event(priv->device, 0);
2463 if (unlikely(!dev)) {
2464 pr_err("%s: invalid dev pointer\n", __func__);
2468 /* To handle GMAC own interrupts */
2469 if (priv->plat->has_gmac) {
2470 int status = priv->hw->mac->host_irq_status(priv->hw,
2472 if (unlikely(status)) {
2473 /* For LPI we need to save the tx status */
2474 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2475 priv->tx_path_in_lpi_mode = true;
2476 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2477 priv->tx_path_in_lpi_mode = false;
2481 /* To handle DMA interrupts */
2482 stmmac_dma_interrupt(priv);
2487 #ifdef CONFIG_NET_POLL_CONTROLLER
2488 /* Polling receive - used by NETCONSOLE and other diagnostic tools
2489 * to allow network I/O with interrupts disabled.
2491 static void stmmac_poll_controller(struct net_device *dev)
2493 disable_irq(dev->irq);
2494 stmmac_interrupt(dev->irq, dev);
2495 enable_irq(dev->irq);
2500 * stmmac_ioctl - Entry point for the Ioctl
2501 * @dev: Device pointer.
2502 * @rq: An IOCTL specefic structure, that can contain a pointer to
2503 * a proprietary structure used to pass information to the driver.
2504 * @cmd: IOCTL command
2506 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
2508 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2510 struct stmmac_priv *priv = netdev_priv(dev);
2511 int ret = -EOPNOTSUPP;
2513 if (!netif_running(dev))
2522 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
2525 ret = stmmac_hwtstamp_ioctl(dev, rq);
2534 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
2536 struct stmmac_priv *priv = netdev_priv(ndev);
2539 ret = eth_mac_addr(ndev, addr);
2543 priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
2548 #ifdef CONFIG_DEBUG_FS
2549 static struct dentry *stmmac_fs_dir;
2551 static void sysfs_display_ring(void *head, int size, int extend_desc,
2552 struct seq_file *seq)
2555 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
2556 struct dma_desc *p = (struct dma_desc *)head;
2558 for (i = 0; i < size; i++) {
2562 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2563 i, (unsigned int)virt_to_phys(ep),
2564 (unsigned int)x, (unsigned int)(x >> 32),
2565 ep->basic.des2, ep->basic.des3);
2569 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2570 i, (unsigned int)virt_to_phys(ep),
2571 (unsigned int)x, (unsigned int)(x >> 32),
2575 seq_printf(seq, "\n");
2579 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2581 struct net_device *dev = seq->private;
2582 struct stmmac_priv *priv = netdev_priv(dev);
2583 unsigned int txsize = priv->dma_tx_size;
2584 unsigned int rxsize = priv->dma_rx_size;
2586 if (priv->extend_desc) {
2587 seq_printf(seq, "Extended RX descriptor ring:\n");
2588 sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq);
2589 seq_printf(seq, "Extended TX descriptor ring:\n");
2590 sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq);
2592 seq_printf(seq, "RX descriptor ring:\n");
2593 sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
2594 seq_printf(seq, "TX descriptor ring:\n");
2595 sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
2601 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
2603 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2606 static const struct file_operations stmmac_rings_status_fops = {
2607 .owner = THIS_MODULE,
2608 .open = stmmac_sysfs_ring_open,
2610 .llseek = seq_lseek,
2611 .release = single_release,
2614 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2616 struct net_device *dev = seq->private;
2617 struct stmmac_priv *priv = netdev_priv(dev);
2619 if (!priv->hw_cap_support) {
2620 seq_printf(seq, "DMA HW features not supported\n");
2624 seq_printf(seq, "==============================\n");
2625 seq_printf(seq, "\tDMA HW features\n");
2626 seq_printf(seq, "==============================\n");
2628 seq_printf(seq, "\t10/100 Mbps %s\n",
2629 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
2630 seq_printf(seq, "\t1000 Mbps %s\n",
2631 (priv->dma_cap.mbps_1000) ? "Y" : "N");
2632 seq_printf(seq, "\tHalf duple %s\n",
2633 (priv->dma_cap.half_duplex) ? "Y" : "N");
2634 seq_printf(seq, "\tHash Filter: %s\n",
2635 (priv->dma_cap.hash_filter) ? "Y" : "N");
2636 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
2637 (priv->dma_cap.multi_addr) ? "Y" : "N");
2638 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
2639 (priv->dma_cap.pcs) ? "Y" : "N");
2640 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
2641 (priv->dma_cap.sma_mdio) ? "Y" : "N");
2642 seq_printf(seq, "\tPMT Remote wake up: %s\n",
2643 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
2644 seq_printf(seq, "\tPMT Magic Frame: %s\n",
2645 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
2646 seq_printf(seq, "\tRMON module: %s\n",
2647 (priv->dma_cap.rmon) ? "Y" : "N");
2648 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
2649 (priv->dma_cap.time_stamp) ? "Y" : "N");
2650 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
2651 (priv->dma_cap.atime_stamp) ? "Y" : "N");
2652 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
2653 (priv->dma_cap.eee) ? "Y" : "N");
2654 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2655 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2656 (priv->dma_cap.tx_coe) ? "Y" : "N");
2657 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
2658 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
2659 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
2660 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
2661 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
2662 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
2663 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
2664 priv->dma_cap.number_rx_channel);
2665 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
2666 priv->dma_cap.number_tx_channel);
2667 seq_printf(seq, "\tEnhanced descriptors: %s\n",
2668 (priv->dma_cap.enh_desc) ? "Y" : "N");
2673 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
2675 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
2678 static const struct file_operations stmmac_dma_cap_fops = {
2679 .owner = THIS_MODULE,
2680 .open = stmmac_sysfs_dma_cap_open,
2682 .llseek = seq_lseek,
2683 .release = single_release,
2686 static int stmmac_init_fs(struct net_device *dev)
2688 struct stmmac_priv *priv = netdev_priv(dev);
2690 /* Create per netdev entries */
2691 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
2693 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
2694 pr_err("ERROR %s/%s, debugfs create directory failed\n",
2695 STMMAC_RESOURCE_NAME, dev->name);
2700 /* Entry to report DMA RX/TX rings */
2701 priv->dbgfs_rings_status =
2702 debugfs_create_file("descriptors_status", S_IRUGO,
2703 priv->dbgfs_dir, dev,
2704 &stmmac_rings_status_fops);
2706 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
2707 pr_info("ERROR creating stmmac ring debugfs file\n");
2708 debugfs_remove_recursive(priv->dbgfs_dir);
2713 /* Entry to report the DMA HW features */
2714 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
2716 dev, &stmmac_dma_cap_fops);
2718 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
2719 pr_info("ERROR creating stmmac MMC debugfs file\n");
2720 debugfs_remove_recursive(priv->dbgfs_dir);
2728 static void stmmac_exit_fs(struct net_device *dev)
2730 struct stmmac_priv *priv = netdev_priv(dev);
2732 debugfs_remove_recursive(priv->dbgfs_dir);
2734 #endif /* CONFIG_DEBUG_FS */
2736 static const struct net_device_ops stmmac_netdev_ops = {
2737 .ndo_open = stmmac_open,
2738 .ndo_start_xmit = stmmac_xmit,
2739 .ndo_stop = stmmac_release,
2740 .ndo_change_mtu = stmmac_change_mtu,
2741 .ndo_fix_features = stmmac_fix_features,
2742 .ndo_set_features = stmmac_set_features,
2743 .ndo_set_rx_mode = stmmac_set_rx_mode,
2744 .ndo_tx_timeout = stmmac_tx_timeout,
2745 .ndo_do_ioctl = stmmac_ioctl,
2746 #ifdef CONFIG_NET_POLL_CONTROLLER
2747 .ndo_poll_controller = stmmac_poll_controller,
2749 .ndo_set_mac_address = stmmac_set_mac_address,
2753 * stmmac_hw_init - Init the MAC device
2754 * @priv: driver private structure
2755 * Description: this function is to configure the MAC device according to
2756 * some platform parameters or the HW capability register. It prepares the
2757 * driver to use either ring or chain modes and to setup either enhanced or
2758 * normal descriptors.
2760 static int stmmac_hw_init(struct stmmac_priv *priv)
2762 struct mac_device_info *mac;
2764 /* Identify the MAC HW device */
2765 if (priv->plat->has_gmac) {
2766 priv->dev->priv_flags |= IFF_UNICAST_FLT;
2767 mac = dwmac1000_setup(priv->ioaddr,
2768 priv->plat->multicast_filter_bins,
2769 priv->plat->unicast_filter_entries);
2771 mac = dwmac100_setup(priv->ioaddr);
2778 /* Get and dump the chip ID */
2779 priv->synopsys_id = stmmac_get_synopsys_id(priv);
2781 /* To use the chained or ring mode */
2783 priv->hw->mode = &chain_mode_ops;
2784 pr_info(" Chain mode enabled\n");
2785 priv->mode = STMMAC_CHAIN_MODE;
2787 priv->hw->mode = &ring_mode_ops;
2788 pr_info(" Ring mode enabled\n");
2789 priv->mode = STMMAC_RING_MODE;
2792 /* Get the HW capability (new GMAC newer than 3.50a) */
2793 priv->hw_cap_support = stmmac_get_hw_features(priv);
2794 if (priv->hw_cap_support) {
2795 pr_info(" DMA HW capability register supported");
2797 /* We can override some gmac/dma configuration fields: e.g.
2798 * enh_desc, tx_coe (e.g. that are passed through the
2799 * platform) with the values from the HW capability
2800 * register (if supported).
2802 priv->plat->enh_desc = priv->dma_cap.enh_desc;
2803 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
2805 /* TXCOE doesn't work in thresh DMA mode */
2806 if (priv->plat->force_thresh_dma_mode)
2807 priv->plat->tx_coe = 0;
2809 priv->plat->tx_coe = priv->dma_cap.tx_coe;
2811 if (priv->dma_cap.rx_coe_type2)
2812 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
2813 else if (priv->dma_cap.rx_coe_type1)
2814 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
2817 pr_info(" No HW DMA feature register supported");
2819 /* To use alternate (extended) or normal descriptor structures */
2820 stmmac_selec_desc_mode(priv);
2822 if (priv->plat->rx_coe) {
2823 priv->hw->rx_csum = priv->plat->rx_coe;
2824 pr_info(" RX Checksum Offload Engine supported (type %d)\n",
2825 priv->plat->rx_coe);
2827 if (priv->plat->tx_coe)
2828 pr_info(" TX Checksum insertion supported\n");
2830 if (priv->plat->pmt) {
2831 pr_info(" Wake-Up On Lan supported\n");
2832 device_set_wakeup_capable(priv->device, 1);
2840 * @device: device pointer
2841 * @plat_dat: platform data pointer
2842 * @res: stmmac resource pointer
2843 * Description: this is the main probe function used to
2844 * call the alloc_etherdev, allocate the priv structure.
2846 * returns 0 on success, otherwise errno.
2848 int stmmac_dvr_probe(struct device *device,
2849 struct plat_stmmacenet_data *plat_dat,
2850 struct stmmac_resources *res)
2853 struct net_device *ndev = NULL;
2854 struct stmmac_priv *priv;
2856 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
2860 SET_NETDEV_DEV(ndev, device);
2862 priv = netdev_priv(ndev);
2863 priv->device = device;
2866 stmmac_set_ethtool_ops(ndev);
2867 priv->pause = pause;
2868 priv->plat = plat_dat;
2869 priv->ioaddr = res->addr;
2870 priv->dev->base_addr = (unsigned long)res->addr;
2872 priv->dev->irq = res->irq;
2873 priv->wol_irq = res->wol_irq;
2874 priv->lpi_irq = res->lpi_irq;
2877 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
2879 dev_set_drvdata(device, priv->dev);
2881 /* Verify driver arguments */
2882 stmmac_verify_args();
2884 /* Override with kernel parameters if supplied XXX CRS XXX
2885 * this needs to have multiple instances
2887 if ((phyaddr >= 0) && (phyaddr <= 31))
2888 priv->plat->phy_addr = phyaddr;
2890 priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
2891 if (IS_ERR(priv->stmmac_clk)) {
2892 dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
2894 /* If failed to obtain stmmac_clk and specific clk_csr value
2895 * is NOT passed from the platform, probe fail.
2897 if (!priv->plat->clk_csr) {
2898 ret = PTR_ERR(priv->stmmac_clk);
2901 priv->stmmac_clk = NULL;
2904 clk_prepare_enable(priv->stmmac_clk);
2906 priv->pclk = devm_clk_get(priv->device, "pclk");
2907 if (IS_ERR(priv->pclk)) {
2908 if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) {
2909 ret = -EPROBE_DEFER;
2910 goto error_pclk_get;
2914 clk_prepare_enable(priv->pclk);
2916 priv->stmmac_rst = devm_reset_control_get(priv->device,
2917 STMMAC_RESOURCE_NAME);
2918 if (IS_ERR(priv->stmmac_rst)) {
2919 if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) {
2920 ret = -EPROBE_DEFER;
2923 dev_info(priv->device, "no reset control found\n");
2924 priv->stmmac_rst = NULL;
2926 if (priv->stmmac_rst)
2927 reset_control_deassert(priv->stmmac_rst);
2929 /* Init MAC and get the capabilities */
2930 ret = stmmac_hw_init(priv);
2934 stmmac_check_ether_addr(priv);
2936 ndev->netdev_ops = &stmmac_netdev_ops;
2938 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2940 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2941 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
2942 #ifdef STMMAC_VLAN_TAG_USED
2943 /* Both mac100 and gmac support receive VLAN tag detection */
2944 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2946 priv->msg_enable = netif_msg_init(debug, default_msg_level);
2949 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
2951 /* Rx Watchdog is available in the COREs newer than the 3.40.
2952 * In some case, for example on bugged HW this feature
2953 * has to be disable and this can be done by passing the
2954 * riwt_off field from the platform.
2956 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
2958 pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
2961 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
2963 spin_lock_init(&priv->lock);
2964 spin_lock_init(&priv->tx_lock);
2966 /* If a specific clk_csr value is passed from the platform
2967 * this means that the CSR Clock Range selection cannot be
2968 * changed at run-time and it is fixed. Viceversa the driver'll try to
2969 * set the MDC clock dynamically according to the csr actual
2972 if (!priv->plat->clk_csr)
2973 stmmac_clk_csr_set(priv);
2975 priv->clk_csr = priv->plat->clk_csr;
2977 stmmac_check_pcs_mode(priv);
2979 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
2980 priv->pcs != STMMAC_PCS_RTBI) {
2981 /* MDIO bus Registration */
2982 ret = stmmac_mdio_register(ndev);
2984 pr_debug("%s: MDIO bus (id: %d) registration failed",
2985 __func__, priv->plat->bus_id);
2986 goto error_mdio_register;
2990 ret = register_netdev(ndev);
2992 netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
2994 goto error_netdev_register;
2999 error_netdev_register:
3000 if (priv->pcs != STMMAC_PCS_RGMII &&
3001 priv->pcs != STMMAC_PCS_TBI &&
3002 priv->pcs != STMMAC_PCS_RTBI)
3003 stmmac_mdio_unregister(ndev);
3004 error_mdio_register:
3005 netif_napi_del(&priv->napi);
3007 clk_disable_unprepare(priv->pclk);
3009 clk_disable_unprepare(priv->stmmac_clk);
3015 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
3019 * @ndev: net device pointer
3020 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
3021 * changes the link status, releases the DMA descriptor rings.
3023 int stmmac_dvr_remove(struct net_device *ndev)
3025 struct stmmac_priv *priv = netdev_priv(ndev);
3027 pr_info("%s:\n\tremoving driver", __func__);
3029 priv->hw->dma->stop_rx(priv->ioaddr);
3030 priv->hw->dma->stop_tx(priv->ioaddr);
3032 stmmac_set_mac(priv->ioaddr, false);
3033 netif_carrier_off(ndev);
3034 unregister_netdev(ndev);
3035 if (priv->stmmac_rst)
3036 reset_control_assert(priv->stmmac_rst);
3037 clk_disable_unprepare(priv->pclk);
3038 clk_disable_unprepare(priv->stmmac_clk);
3039 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
3040 priv->pcs != STMMAC_PCS_RTBI)
3041 stmmac_mdio_unregister(ndev);
3046 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
3049 * stmmac_suspend - suspend callback
3050 * @ndev: net device pointer
3051 * Description: this is the function to suspend the device and it is called
3052 * by the platform driver to stop the network queue, release the resources,
3053 * program the PMT register (for WoL), clean and release driver resources.
3055 int stmmac_suspend(struct net_device *ndev)
3057 struct stmmac_priv *priv = netdev_priv(ndev);
3058 unsigned long flags;
3060 if (!ndev || !netif_running(ndev))
3064 phy_stop(priv->phydev);
3066 spin_lock_irqsave(&priv->lock, flags);
3068 netif_device_detach(ndev);
3069 netif_stop_queue(ndev);
3071 napi_disable(&priv->napi);
3073 if (priv->eee_enabled) {
3074 priv->tx_path_in_lpi_mode = false;
3075 del_timer_sync(&priv->eee_ctrl_timer);
3078 /* Stop TX/RX DMA */
3079 priv->hw->dma->stop_tx(priv->ioaddr);
3080 priv->hw->dma->stop_rx(priv->ioaddr);
3082 /* Enable Power down mode by programming the PMT regs */
3083 if (device_may_wakeup(priv->device)) {
3084 priv->hw->mac->pmt(priv->hw, priv->wolopts);
3087 stmmac_set_mac(priv->ioaddr, false);
3088 pinctrl_pm_select_sleep_state(priv->device);
3089 /* Disable clock in case of PWM is off */
3090 clk_disable(priv->pclk);
3091 clk_disable(priv->stmmac_clk);
3093 spin_unlock_irqrestore(&priv->lock, flags);
3097 priv->oldduplex = -1;
3100 EXPORT_SYMBOL_GPL(stmmac_suspend);
3103 * stmmac_resume - resume callback
3104 * @ndev: net device pointer
3105 * Description: when resume this function is invoked to setup the DMA and CORE
3106 * in a usable state.
3108 int stmmac_resume(struct net_device *ndev)
3110 struct stmmac_priv *priv = netdev_priv(ndev);
3111 unsigned long flags;
3113 if (!netif_running(ndev))
3116 spin_lock_irqsave(&priv->lock, flags);
3118 /* Power Down bit, into the PM register, is cleared
3119 * automatically as soon as a magic packet or a Wake-up frame
3120 * is received. Anyway, it's better to manually clear
3121 * this bit because it can generate problems while resuming
3122 * from another devices (e.g. serial console).
3124 if (device_may_wakeup(priv->device)) {
3125 priv->hw->mac->pmt(priv->hw, 0);
3128 pinctrl_pm_select_default_state(priv->device);
3129 /* enable the clk prevously disabled */
3130 clk_enable(priv->stmmac_clk);
3131 clk_enable(priv->pclk);
3132 /* reset the phy so that it's ready */
3134 stmmac_mdio_reset(priv->mii);
3137 netif_device_attach(ndev);
3143 stmmac_clear_descriptors(priv);
3145 stmmac_hw_setup(ndev, false);
3146 stmmac_init_tx_coalesce(priv);
3147 stmmac_set_rx_mode(ndev);
3149 napi_enable(&priv->napi);
3151 netif_start_queue(ndev);
3153 spin_unlock_irqrestore(&priv->lock, flags);
3156 phy_start(priv->phydev);
3160 EXPORT_SYMBOL_GPL(stmmac_resume);
3163 static int __init stmmac_cmdline_opt(char *str)
3169 while ((opt = strsep(&str, ",")) != NULL) {
3170 if (!strncmp(opt, "debug:", 6)) {
3171 if (kstrtoint(opt + 6, 0, &debug))
3173 } else if (!strncmp(opt, "phyaddr:", 8)) {
3174 if (kstrtoint(opt + 8, 0, &phyaddr))
3176 } else if (!strncmp(opt, "dma_txsize:", 11)) {
3177 if (kstrtoint(opt + 11, 0, &dma_txsize))
3179 } else if (!strncmp(opt, "dma_rxsize:", 11)) {
3180 if (kstrtoint(opt + 11, 0, &dma_rxsize))
3182 } else if (!strncmp(opt, "buf_sz:", 7)) {
3183 if (kstrtoint(opt + 7, 0, &buf_sz))
3185 } else if (!strncmp(opt, "tc:", 3)) {
3186 if (kstrtoint(opt + 3, 0, &tc))
3188 } else if (!strncmp(opt, "watchdog:", 9)) {
3189 if (kstrtoint(opt + 9, 0, &watchdog))
3191 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
3192 if (kstrtoint(opt + 10, 0, &flow_ctrl))
3194 } else if (!strncmp(opt, "pause:", 6)) {
3195 if (kstrtoint(opt + 6, 0, &pause))
3197 } else if (!strncmp(opt, "eee_timer:", 10)) {
3198 if (kstrtoint(opt + 10, 0, &eee_timer))
3200 } else if (!strncmp(opt, "chain_mode:", 11)) {
3201 if (kstrtoint(opt + 11, 0, &chain_mode))
3208 pr_err("%s: ERROR broken module parameter conversion", __func__);
3212 __setup("stmmaceth=", stmmac_cmdline_opt);
3215 static int __init stmmac_init(void)
3217 #ifdef CONFIG_DEBUG_FS
3218 /* Create debugfs main directory if it doesn't exist yet */
3219 if (!stmmac_fs_dir) {
3220 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
3222 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
3223 pr_err("ERROR %s, debugfs create directory failed\n",
3224 STMMAC_RESOURCE_NAME);
3234 static void __exit stmmac_exit(void)
3236 #ifdef CONFIG_DEBUG_FS
3237 debugfs_remove_recursive(stmmac_fs_dir);
3241 module_init(stmmac_init)
3242 module_exit(stmmac_exit)
3244 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3245 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3246 MODULE_LICENSE("GPL");