GNU Linux-libre 4.14.265-gnu1
[releases.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148         u32 queue;
149
150         for (queue = 0; queue < rx_queues_cnt; queue++) {
151                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152
153                 napi_disable(&rx_q->napi);
154         }
155 }
156
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164         u32 queue;
165
166         for (queue = 0; queue < rx_queues_cnt; queue++) {
167                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168
169                 napi_enable(&rx_q->napi);
170         }
171 }
172
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180         u32 queue;
181
182         for (queue = 0; queue < tx_queues_cnt; queue++)
183                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193         u32 queue;
194
195         for (queue = 0; queue < tx_queues_cnt; queue++)
196                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *      If a specific clk_csr value is passed from the platform
206  *      this means that the CSR Clock Range selection cannot be
207  *      changed at run-time and it is fixed (as reported in the driver
208  *      documentation). Viceversa the driver will try to set the MDC
209  *      clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213         u32 clk_rate;
214
215         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216
217         /* Platform provided default clk_csr would be assumed valid
218          * for all other cases except for the below mentioned ones.
219          * For values higher than the IEEE 802.3 specified frequency
220          * we can not estimate the proper divider as it is not known
221          * the frequency of clk_csr_i. So we do not change the default
222          * divider.
223          */
224         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225                 if (clk_rate < CSR_F_35M)
226                         priv->clk_csr = STMMAC_CSR_20_35M;
227                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228                         priv->clk_csr = STMMAC_CSR_35_60M;
229                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230                         priv->clk_csr = STMMAC_CSR_60_100M;
231                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232                         priv->clk_csr = STMMAC_CSR_100_150M;
233                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234                         priv->clk_csr = STMMAC_CSR_150_250M;
235                 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
236                         priv->clk_csr = STMMAC_CSR_250_300M;
237         }
238
239         if (priv->plat->has_sun8i) {
240                 if (clk_rate > 160000000)
241                         priv->clk_csr = 0x03;
242                 else if (clk_rate > 80000000)
243                         priv->clk_csr = 0x02;
244                 else if (clk_rate > 40000000)
245                         priv->clk_csr = 0x01;
246                 else
247                         priv->clk_csr = 0;
248         }
249 }
250
251 static void print_pkt(unsigned char *buf, int len)
252 {
253         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
255 }
256
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
258 {
259         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260         u32 avail;
261
262         if (tx_q->dirty_tx > tx_q->cur_tx)
263                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264         else
265                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
266
267         return avail;
268 }
269
270 /**
271  * stmmac_rx_dirty - Get RX queue dirty
272  * @priv: driver private structure
273  * @queue: RX queue index
274  */
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
276 {
277         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278         u32 dirty;
279
280         if (rx_q->dirty_rx <= rx_q->cur_rx)
281                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
282         else
283                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
284
285         return dirty;
286 }
287
288 /**
289  * stmmac_hw_fix_mac_speed - callback for speed selection
290  * @priv: driver private structure
291  * Description: on some platforms (e.g. ST), some HW system configuration
292  * registers have to be set according to the link speed negotiated.
293  */
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295 {
296         struct net_device *ndev = priv->dev;
297         struct phy_device *phydev = ndev->phydev;
298
299         if (likely(priv->plat->fix_mac_speed))
300                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
301 }
302
303 /**
304  * stmmac_enable_eee_mode - check and enter in LPI mode
305  * @priv: driver private structure
306  * Description: this function is to verify and enter in LPI mode in case of
307  * EEE.
308  */
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310 {
311         u32 tx_cnt = priv->plat->tx_queues_to_use;
312         u32 queue;
313
314         /* check if all TX queues have the work finished */
315         for (queue = 0; queue < tx_cnt; queue++) {
316                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317
318                 if (tx_q->dirty_tx != tx_q->cur_tx)
319                         return; /* still unfinished work */
320         }
321
322         /* Check and enter in LPI mode */
323         if (!priv->tx_path_in_lpi_mode)
324                 priv->hw->mac->set_eee_mode(priv->hw,
325                                             priv->plat->en_tx_lpi_clockgating);
326 }
327
328 /**
329  * stmmac_disable_eee_mode - disable and exit from LPI mode
330  * @priv: driver private structure
331  * Description: this function is to exit and disable EEE in case of
332  * LPI state is true. This is called by the xmit.
333  */
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335 {
336         priv->hw->mac->reset_eee_mode(priv->hw);
337         del_timer_sync(&priv->eee_ctrl_timer);
338         priv->tx_path_in_lpi_mode = false;
339 }
340
341 /**
342  * stmmac_eee_ctrl_timer - EEE TX SW timer.
343  * @arg : data hook
344  * Description:
345  *  if there is no data transfer and if we are not in LPI state,
346  *  then MAC Transmitter can be moved to LPI state.
347  */
348 static void stmmac_eee_ctrl_timer(unsigned long arg)
349 {
350         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
351
352         stmmac_enable_eee_mode(priv);
353         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
354 }
355
356 /**
357  * stmmac_eee_init - init EEE
358  * @priv: driver private structure
359  * Description:
360  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
361  *  can also manage EEE, this function enable the LPI state and start related
362  *  timer.
363  */
364 bool stmmac_eee_init(struct stmmac_priv *priv)
365 {
366         struct net_device *ndev = priv->dev;
367         int interface = priv->plat->interface;
368         bool ret = false;
369
370         if ((interface != PHY_INTERFACE_MODE_MII) &&
371             (interface != PHY_INTERFACE_MODE_GMII) &&
372             !phy_interface_mode_is_rgmii(interface))
373                 goto out;
374
375         /* Using PCS we cannot dial with the phy registers at this stage
376          * so we do not support extra feature like EEE.
377          */
378         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
379             (priv->hw->pcs == STMMAC_PCS_TBI) ||
380             (priv->hw->pcs == STMMAC_PCS_RTBI))
381                 goto out;
382
383         /* MAC core supports the EEE feature. */
384         if (priv->dma_cap.eee) {
385                 int tx_lpi_timer = priv->tx_lpi_timer;
386
387                 /* Check if the PHY supports EEE */
388                 if (phy_init_eee(ndev->phydev, 1)) {
389                         /* To manage at run-time if the EEE cannot be supported
390                          * anymore (for example because the lp caps have been
391                          * changed).
392                          * In that case the driver disable own timers.
393                          */
394                         mutex_lock(&priv->lock);
395                         if (priv->eee_active) {
396                                 netdev_dbg(priv->dev, "disable EEE\n");
397                                 del_timer_sync(&priv->eee_ctrl_timer);
398                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
399                                                              tx_lpi_timer);
400                         }
401                         priv->eee_active = 0;
402                         mutex_unlock(&priv->lock);
403                         goto out;
404                 }
405                 /* Activate the EEE and start timers */
406                 mutex_lock(&priv->lock);
407                 if (!priv->eee_active) {
408                         priv->eee_active = 1;
409                         setup_timer(&priv->eee_ctrl_timer,
410                                     stmmac_eee_ctrl_timer,
411                                     (unsigned long)priv);
412                         mod_timer(&priv->eee_ctrl_timer,
413                                   STMMAC_LPI_T(eee_timer));
414
415                         priv->hw->mac->set_eee_timer(priv->hw,
416                                                      STMMAC_DEFAULT_LIT_LS,
417                                                      tx_lpi_timer);
418                 }
419                 /* Set HW EEE according to the speed */
420                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
421
422                 ret = true;
423                 mutex_unlock(&priv->lock);
424
425                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
426         }
427 out:
428         return ret;
429 }
430
431 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
432  * @priv: driver private structure
433  * @p : descriptor pointer
434  * @skb : the socket buffer
435  * Description :
436  * This function will read timestamp from the descriptor & pass it to stack.
437  * and also perform some sanity checks.
438  */
439 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
440                                    struct dma_desc *p, struct sk_buff *skb)
441 {
442         struct skb_shared_hwtstamps shhwtstamp;
443         u64 ns;
444
445         if (!priv->hwts_tx_en)
446                 return;
447
448         /* exit if skb doesn't support hw tstamp */
449         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
450                 return;
451
452         /* check tx tstamp status */
453         if (priv->hw->desc->get_tx_timestamp_status(p)) {
454                 /* get the valid tstamp */
455                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
456
457                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
458                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
459
460                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
461                 /* pass tstamp to stack */
462                 skb_tstamp_tx(skb, &shhwtstamp);
463         }
464
465         return;
466 }
467
468 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
469  * @priv: driver private structure
470  * @p : descriptor pointer
471  * @np : next descriptor pointer
472  * @skb : the socket buffer
473  * Description :
474  * This function will read received packet's timestamp from the descriptor
475  * and pass it to stack. It also perform some sanity checks.
476  */
477 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
478                                    struct dma_desc *np, struct sk_buff *skb)
479 {
480         struct skb_shared_hwtstamps *shhwtstamp = NULL;
481         struct dma_desc *desc = p;
482         u64 ns;
483
484         if (!priv->hwts_rx_en)
485                 return;
486         /* For GMAC4, the valid timestamp is from CTX next desc. */
487         if (priv->plat->has_gmac4)
488                 desc = np;
489
490         /* Check if timestamp is available */
491         if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
492                 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
493                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
494                 shhwtstamp = skb_hwtstamps(skb);
495                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
496                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
497         } else  {
498                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
499         }
500 }
501
502 /**
503  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
504  *  @dev: device pointer.
505  *  @ifr: An IOCTL specific structure, that can contain a pointer to
506  *  a proprietary structure used to pass information to the driver.
507  *  Description:
508  *  This function configures the MAC to enable/disable both outgoing(TX)
509  *  and incoming(RX) packets time stamping based on user input.
510  *  Return Value:
511  *  0 on success and an appropriate -ve integer on failure.
512  */
513 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
514 {
515         struct stmmac_priv *priv = netdev_priv(dev);
516         struct hwtstamp_config config;
517         struct timespec64 now;
518         u64 temp = 0;
519         u32 ptp_v2 = 0;
520         u32 tstamp_all = 0;
521         u32 ptp_over_ipv4_udp = 0;
522         u32 ptp_over_ipv6_udp = 0;
523         u32 ptp_over_ethernet = 0;
524         u32 snap_type_sel = 0;
525         u32 ts_master_en = 0;
526         u32 ts_event_en = 0;
527         u32 value = 0;
528         u32 sec_inc;
529
530         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
531                 netdev_alert(priv->dev, "No support for HW time stamping\n");
532                 priv->hwts_tx_en = 0;
533                 priv->hwts_rx_en = 0;
534
535                 return -EOPNOTSUPP;
536         }
537
538         if (copy_from_user(&config, ifr->ifr_data,
539                            sizeof(struct hwtstamp_config)))
540                 return -EFAULT;
541
542         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
543                    __func__, config.flags, config.tx_type, config.rx_filter);
544
545         /* reserved for future extensions */
546         if (config.flags)
547                 return -EINVAL;
548
549         if (config.tx_type != HWTSTAMP_TX_OFF &&
550             config.tx_type != HWTSTAMP_TX_ON)
551                 return -ERANGE;
552
553         if (priv->adv_ts) {
554                 switch (config.rx_filter) {
555                 case HWTSTAMP_FILTER_NONE:
556                         /* time stamp no incoming packet at all */
557                         config.rx_filter = HWTSTAMP_FILTER_NONE;
558                         break;
559
560                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
561                         /* PTP v1, UDP, any kind of event packet */
562                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
563                         /* take time stamp for all event messages */
564                         if (priv->plat->has_gmac4)
565                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
566                         else
567                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
568
569                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
570                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
571                         break;
572
573                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
574                         /* PTP v1, UDP, Sync packet */
575                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
576                         /* take time stamp for SYNC messages only */
577                         ts_event_en = PTP_TCR_TSEVNTENA;
578
579                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
580                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
581                         break;
582
583                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
584                         /* PTP v1, UDP, Delay_req packet */
585                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
586                         /* take time stamp for Delay_Req messages only */
587                         ts_master_en = PTP_TCR_TSMSTRENA;
588                         ts_event_en = PTP_TCR_TSEVNTENA;
589
590                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
591                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
592                         break;
593
594                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
595                         /* PTP v2, UDP, any kind of event packet */
596                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
597                         ptp_v2 = PTP_TCR_TSVER2ENA;
598                         /* take time stamp for all event messages */
599                         if (priv->plat->has_gmac4)
600                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
601                         else
602                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
603
604                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606                         break;
607
608                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
609                         /* PTP v2, UDP, Sync packet */
610                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
611                         ptp_v2 = PTP_TCR_TSVER2ENA;
612                         /* take time stamp for SYNC messages only */
613                         ts_event_en = PTP_TCR_TSEVNTENA;
614
615                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
616                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
617                         break;
618
619                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
620                         /* PTP v2, UDP, Delay_req packet */
621                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
622                         ptp_v2 = PTP_TCR_TSVER2ENA;
623                         /* take time stamp for Delay_Req messages only */
624                         ts_master_en = PTP_TCR_TSMSTRENA;
625                         ts_event_en = PTP_TCR_TSEVNTENA;
626
627                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
628                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
629                         break;
630
631                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
632                         /* PTP v2/802.AS1 any layer, any kind of event packet */
633                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
634                         ptp_v2 = PTP_TCR_TSVER2ENA;
635                         /* take time stamp for all event messages */
636                         if (priv->plat->has_gmac4)
637                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
638                         else
639                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
640
641                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643                         ptp_over_ethernet = PTP_TCR_TSIPENA;
644                         break;
645
646                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
647                         /* PTP v2/802.AS1, any layer, Sync packet */
648                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
649                         ptp_v2 = PTP_TCR_TSVER2ENA;
650                         /* take time stamp for SYNC messages only */
651                         ts_event_en = PTP_TCR_TSEVNTENA;
652
653                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
654                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
655                         ptp_over_ethernet = PTP_TCR_TSIPENA;
656                         break;
657
658                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
659                         /* PTP v2/802.AS1, any layer, Delay_req packet */
660                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
661                         ptp_v2 = PTP_TCR_TSVER2ENA;
662                         /* take time stamp for Delay_Req messages only */
663                         ts_master_en = PTP_TCR_TSMSTRENA;
664                         ts_event_en = PTP_TCR_TSEVNTENA;
665
666                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
667                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
668                         ptp_over_ethernet = PTP_TCR_TSIPENA;
669                         break;
670
671                 case HWTSTAMP_FILTER_NTP_ALL:
672                 case HWTSTAMP_FILTER_ALL:
673                         /* time stamp any incoming packet */
674                         config.rx_filter = HWTSTAMP_FILTER_ALL;
675                         tstamp_all = PTP_TCR_TSENALL;
676                         break;
677
678                 default:
679                         return -ERANGE;
680                 }
681         } else {
682                 switch (config.rx_filter) {
683                 case HWTSTAMP_FILTER_NONE:
684                         config.rx_filter = HWTSTAMP_FILTER_NONE;
685                         break;
686                 default:
687                         /* PTP v1, UDP, any kind of event packet */
688                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
689                         break;
690                 }
691         }
692         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
693         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
694
695         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
696                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
697         else {
698                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
699                          tstamp_all | ptp_v2 | ptp_over_ethernet |
700                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
701                          ts_master_en | snap_type_sel);
702                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
703
704                 /* program Sub Second Increment reg */
705                 sec_inc = priv->hw->ptp->config_sub_second_increment(
706                         priv->ptpaddr, priv->plat->clk_ptp_rate,
707                         priv->plat->has_gmac4);
708                 temp = div_u64(1000000000ULL, sec_inc);
709
710                 /* calculate default added value:
711                  * formula is :
712                  * addend = (2^32)/freq_div_ratio;
713                  * where, freq_div_ratio = 1e9ns/sec_inc
714                  */
715                 temp = (u64)(temp << 32);
716                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
717                 priv->hw->ptp->config_addend(priv->ptpaddr,
718                                              priv->default_addend);
719
720                 /* initialize system time */
721                 ktime_get_real_ts64(&now);
722
723                 /* lower 32 bits of tv_sec are safe until y2106 */
724                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
725                                             now.tv_nsec);
726         }
727
728         return copy_to_user(ifr->ifr_data, &config,
729                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
730 }
731
732 /**
733  * stmmac_init_ptp - init PTP
734  * @priv: driver private structure
735  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
736  * This is done by looking at the HW cap. register.
737  * This function also registers the ptp driver.
738  */
739 static int stmmac_init_ptp(struct stmmac_priv *priv)
740 {
741         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
742                 return -EOPNOTSUPP;
743
744         priv->adv_ts = 0;
745         /* Check if adv_ts can be enabled for dwmac 4.x core */
746         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
747                 priv->adv_ts = 1;
748         /* Dwmac 3.x core with extend_desc can support adv_ts */
749         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
750                 priv->adv_ts = 1;
751
752         if (priv->dma_cap.time_stamp)
753                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
754
755         if (priv->adv_ts)
756                 netdev_info(priv->dev,
757                             "IEEE 1588-2008 Advanced Timestamp supported\n");
758
759         priv->hw->ptp = &stmmac_ptp;
760         priv->hwts_tx_en = 0;
761         priv->hwts_rx_en = 0;
762
763         stmmac_ptp_register(priv);
764
765         return 0;
766 }
767
768 static void stmmac_release_ptp(struct stmmac_priv *priv)
769 {
770         if (priv->plat->clk_ptp_ref)
771                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
772         stmmac_ptp_unregister(priv);
773 }
774
775 /**
776  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
777  *  @priv: driver private structure
778  *  Description: It is used for configuring the flow control in all queues
779  */
780 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
781 {
782         u32 tx_cnt = priv->plat->tx_queues_to_use;
783
784         priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
785                                  priv->pause, tx_cnt);
786 }
787
788 /**
789  * stmmac_adjust_link - adjusts the link parameters
790  * @dev: net device structure
791  * Description: this is the helper called by the physical abstraction layer
792  * drivers to communicate the phy link status. According the speed and duplex
793  * this driver can invoke registered glue-logic as well.
794  * It also invoke the eee initialization because it could happen when switch
795  * on different networks (that are eee capable).
796  */
797 static void stmmac_adjust_link(struct net_device *dev)
798 {
799         struct stmmac_priv *priv = netdev_priv(dev);
800         struct phy_device *phydev = dev->phydev;
801         bool new_state = false;
802
803         if (!phydev)
804                 return;
805
806         mutex_lock(&priv->lock);
807
808         if (phydev->link) {
809                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
810
811                 /* Now we make sure that we can be in full duplex mode.
812                  * If not, we operate in half-duplex mode. */
813                 if (phydev->duplex != priv->oldduplex) {
814                         new_state = true;
815                         if (!phydev->duplex)
816                                 ctrl &= ~priv->hw->link.duplex;
817                         else
818                                 ctrl |= priv->hw->link.duplex;
819                         priv->oldduplex = phydev->duplex;
820                 }
821                 /* Flow Control operation */
822                 if (phydev->pause)
823                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
824
825                 if (phydev->speed != priv->speed) {
826                         new_state = true;
827                         ctrl &= ~priv->hw->link.speed_mask;
828                         switch (phydev->speed) {
829                         case SPEED_1000:
830                                 ctrl |= priv->hw->link.speed1000;
831                                 break;
832                         case SPEED_100:
833                                 ctrl |= priv->hw->link.speed100;
834                                 break;
835                         case SPEED_10:
836                                 ctrl |= priv->hw->link.speed10;
837                                 break;
838                         default:
839                                 netif_warn(priv, link, priv->dev,
840                                            "broken speed: %d\n", phydev->speed);
841                                 phydev->speed = SPEED_UNKNOWN;
842                                 break;
843                         }
844                         if (phydev->speed != SPEED_UNKNOWN)
845                                 stmmac_hw_fix_mac_speed(priv);
846                         priv->speed = phydev->speed;
847                 }
848
849                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
850
851                 if (!priv->oldlink) {
852                         new_state = true;
853                         priv->oldlink = true;
854                 }
855         } else if (priv->oldlink) {
856                 new_state = true;
857                 priv->oldlink = false;
858                 priv->speed = SPEED_UNKNOWN;
859                 priv->oldduplex = DUPLEX_UNKNOWN;
860         }
861
862         if (new_state && netif_msg_link(priv))
863                 phy_print_status(phydev);
864
865         mutex_unlock(&priv->lock);
866
867         if (phydev->is_pseudo_fixed_link)
868                 /* Stop PHY layer to call the hook to adjust the link in case
869                  * of a switch is attached to the stmmac driver.
870                  */
871                 phydev->irq = PHY_IGNORE_INTERRUPT;
872         else
873                 /* At this stage, init the EEE if supported.
874                  * Never called in case of fixed_link.
875                  */
876                 priv->eee_enabled = stmmac_eee_init(priv);
877 }
878
879 /**
880  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
881  * @priv: driver private structure
882  * Description: this is to verify if the HW supports the PCS.
883  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
884  * configured for the TBI, RTBI, or SGMII PHY interface.
885  */
886 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
887 {
888         int interface = priv->plat->interface;
889
890         if (priv->dma_cap.pcs) {
891                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
892                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
893                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
894                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
895                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
896                         priv->hw->pcs = STMMAC_PCS_RGMII;
897                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
898                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
899                         priv->hw->pcs = STMMAC_PCS_SGMII;
900                 }
901         }
902 }
903
904 /**
905  * stmmac_init_phy - PHY initialization
906  * @dev: net device structure
907  * Description: it initializes the driver's PHY state, and attaches the PHY
908  * to the mac driver.
909  *  Return value:
910  *  0 on success
911  */
912 static int stmmac_init_phy(struct net_device *dev)
913 {
914         struct stmmac_priv *priv = netdev_priv(dev);
915         u32 tx_cnt = priv->plat->tx_queues_to_use;
916         struct phy_device *phydev;
917         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
918         char bus_id[MII_BUS_ID_SIZE];
919         int interface = priv->plat->interface;
920         int max_speed = priv->plat->max_speed;
921         priv->oldlink = false;
922         priv->speed = SPEED_UNKNOWN;
923         priv->oldduplex = DUPLEX_UNKNOWN;
924
925         if (priv->plat->phy_node) {
926                 phydev = of_phy_connect(dev, priv->plat->phy_node,
927                                         &stmmac_adjust_link, 0, interface);
928         } else {
929                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
930                          priv->plat->bus_id);
931
932                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
933                          priv->plat->phy_addr);
934                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
935                            phy_id_fmt);
936
937                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
938                                      interface);
939         }
940
941         if (IS_ERR_OR_NULL(phydev)) {
942                 netdev_err(priv->dev, "Could not attach to PHY\n");
943                 if (!phydev)
944                         return -ENODEV;
945
946                 return PTR_ERR(phydev);
947         }
948
949         /* Stop Advertising 1000BASE Capability if interface is not GMII */
950         if ((interface == PHY_INTERFACE_MODE_MII) ||
951             (interface == PHY_INTERFACE_MODE_RMII) ||
952                 (max_speed < 1000 && max_speed > 0))
953                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
954                                          SUPPORTED_1000baseT_Full);
955
956         /*
957          * Half-duplex mode not supported with multiqueue
958          * half-duplex can only works with single queue
959          */
960         if (tx_cnt > 1)
961                 phydev->supported &= ~(SUPPORTED_1000baseT_Half |
962                                        SUPPORTED_100baseT_Half |
963                                        SUPPORTED_10baseT_Half);
964
965         /*
966          * Broken HW is sometimes missing the pull-up resistor on the
967          * MDIO line, which results in reads to non-existent devices returning
968          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
969          * device as well.
970          * Note: phydev->phy_id is the result of reading the UID PHY registers.
971          */
972         if (!priv->plat->phy_node && phydev->phy_id == 0) {
973                 phy_disconnect(phydev);
974                 return -ENODEV;
975         }
976
977         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
978          * subsequent PHY polling, make sure we force a link transition if
979          * we have a UP/DOWN/UP transition
980          */
981         if (phydev->is_pseudo_fixed_link)
982                 phydev->irq = PHY_POLL;
983
984         phy_attached_info(phydev);
985         return 0;
986 }
987
988 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
989 {
990         u32 rx_cnt = priv->plat->rx_queues_to_use;
991         void *head_rx;
992         u32 queue;
993
994         /* Display RX rings */
995         for (queue = 0; queue < rx_cnt; queue++) {
996                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
997
998                 pr_info("\tRX Queue %u rings\n", queue);
999
1000                 if (priv->extend_desc)
1001                         head_rx = (void *)rx_q->dma_erx;
1002                 else
1003                         head_rx = (void *)rx_q->dma_rx;
1004
1005                 /* Display RX ring */
1006                 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
1007         }
1008 }
1009
1010 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1011 {
1012         u32 tx_cnt = priv->plat->tx_queues_to_use;
1013         void *head_tx;
1014         u32 queue;
1015
1016         /* Display TX rings */
1017         for (queue = 0; queue < tx_cnt; queue++) {
1018                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1019
1020                 pr_info("\tTX Queue %d rings\n", queue);
1021
1022                 if (priv->extend_desc)
1023                         head_tx = (void *)tx_q->dma_etx;
1024                 else
1025                         head_tx = (void *)tx_q->dma_tx;
1026
1027                 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1028         }
1029 }
1030
1031 static void stmmac_display_rings(struct stmmac_priv *priv)
1032 {
1033         /* Display RX ring */
1034         stmmac_display_rx_rings(priv);
1035
1036         /* Display TX ring */
1037         stmmac_display_tx_rings(priv);
1038 }
1039
1040 static int stmmac_set_bfsize(int mtu, int bufsize)
1041 {
1042         int ret = bufsize;
1043
1044         if (mtu >= BUF_SIZE_8KiB)
1045                 ret = BUF_SIZE_16KiB;
1046         else if (mtu >= BUF_SIZE_4KiB)
1047                 ret = BUF_SIZE_8KiB;
1048         else if (mtu >= BUF_SIZE_2KiB)
1049                 ret = BUF_SIZE_4KiB;
1050         else if (mtu > DEFAULT_BUFSIZE)
1051                 ret = BUF_SIZE_2KiB;
1052         else
1053                 ret = DEFAULT_BUFSIZE;
1054
1055         return ret;
1056 }
1057
1058 /**
1059  * stmmac_clear_rx_descriptors - clear RX descriptors
1060  * @priv: driver private structure
1061  * @queue: RX queue index
1062  * Description: this function is called to clear the RX descriptors
1063  * in case of both basic and extended descriptors are used.
1064  */
1065 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1066 {
1067         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1068         int i;
1069
1070         /* Clear the RX descriptors */
1071         for (i = 0; i < DMA_RX_SIZE; i++)
1072                 if (priv->extend_desc)
1073                         priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1074                                                      priv->use_riwt, priv->mode,
1075                                                      (i == DMA_RX_SIZE - 1),
1076                                                      priv->dma_buf_sz);
1077                 else
1078                         priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1079                                                      priv->use_riwt, priv->mode,
1080                                                      (i == DMA_RX_SIZE - 1),
1081                                                      priv->dma_buf_sz);
1082 }
1083
1084 /**
1085  * stmmac_clear_tx_descriptors - clear tx descriptors
1086  * @priv: driver private structure
1087  * @queue: TX queue index.
1088  * Description: this function is called to clear the TX descriptors
1089  * in case of both basic and extended descriptors are used.
1090  */
1091 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1092 {
1093         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1094         int i;
1095
1096         /* Clear the TX descriptors */
1097         for (i = 0; i < DMA_TX_SIZE; i++)
1098                 if (priv->extend_desc)
1099                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1100                                                      priv->mode,
1101                                                      (i == DMA_TX_SIZE - 1));
1102                 else
1103                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1104                                                      priv->mode,
1105                                                      (i == DMA_TX_SIZE - 1));
1106 }
1107
1108 /**
1109  * stmmac_clear_descriptors - clear descriptors
1110  * @priv: driver private structure
1111  * Description: this function is called to clear the TX and RX descriptors
1112  * in case of both basic and extended descriptors are used.
1113  */
1114 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1115 {
1116         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1117         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1118         u32 queue;
1119
1120         /* Clear the RX descriptors */
1121         for (queue = 0; queue < rx_queue_cnt; queue++)
1122                 stmmac_clear_rx_descriptors(priv, queue);
1123
1124         /* Clear the TX descriptors */
1125         for (queue = 0; queue < tx_queue_cnt; queue++)
1126                 stmmac_clear_tx_descriptors(priv, queue);
1127 }
1128
1129 /**
1130  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1131  * @priv: driver private structure
1132  * @p: descriptor pointer
1133  * @i: descriptor index
1134  * @flags: gfp flag
1135  * @queue: RX queue index
1136  * Description: this function is called to allocate a receive buffer, perform
1137  * the DMA mapping and init the descriptor.
1138  */
1139 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1140                                   int i, gfp_t flags, u32 queue)
1141 {
1142         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1143         struct sk_buff *skb;
1144
1145         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1146         if (!skb) {
1147                 netdev_err(priv->dev,
1148                            "%s: Rx init fails; skb is NULL\n", __func__);
1149                 return -ENOMEM;
1150         }
1151         rx_q->rx_skbuff[i] = skb;
1152         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1153                                                 priv->dma_buf_sz,
1154                                                 DMA_FROM_DEVICE);
1155         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1156                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1157                 dev_kfree_skb_any(skb);
1158                 return -EINVAL;
1159         }
1160
1161         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1162                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1163         else
1164                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1165
1166         if ((priv->hw->mode->init_desc3) &&
1167             (priv->dma_buf_sz == BUF_SIZE_16KiB))
1168                 priv->hw->mode->init_desc3(p);
1169
1170         return 0;
1171 }
1172
1173 /**
1174  * stmmac_free_rx_buffer - free RX dma buffers
1175  * @priv: private structure
1176  * @queue: RX queue index
1177  * @i: buffer index.
1178  */
1179 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1180 {
1181         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1182
1183         if (rx_q->rx_skbuff[i]) {
1184                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1185                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1186                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1187         }
1188         rx_q->rx_skbuff[i] = NULL;
1189 }
1190
1191 /**
1192  * stmmac_free_tx_buffer - free RX dma buffers
1193  * @priv: private structure
1194  * @queue: RX queue index
1195  * @i: buffer index.
1196  */
1197 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1198 {
1199         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1200
1201         if (tx_q->tx_skbuff_dma[i].buf) {
1202                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1203                         dma_unmap_page(priv->device,
1204                                        tx_q->tx_skbuff_dma[i].buf,
1205                                        tx_q->tx_skbuff_dma[i].len,
1206                                        DMA_TO_DEVICE);
1207                 else
1208                         dma_unmap_single(priv->device,
1209                                          tx_q->tx_skbuff_dma[i].buf,
1210                                          tx_q->tx_skbuff_dma[i].len,
1211                                          DMA_TO_DEVICE);
1212         }
1213
1214         if (tx_q->tx_skbuff[i]) {
1215                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1216                 tx_q->tx_skbuff[i] = NULL;
1217                 tx_q->tx_skbuff_dma[i].buf = 0;
1218                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1219         }
1220 }
1221
1222 /**
1223  * init_dma_rx_desc_rings - init the RX descriptor rings
1224  * @dev: net device structure
1225  * @flags: gfp flag.
1226  * Description: this function initializes the DMA RX descriptors
1227  * and allocates the socket buffers. It supports the chained and ring
1228  * modes.
1229  */
1230 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1231 {
1232         struct stmmac_priv *priv = netdev_priv(dev);
1233         u32 rx_count = priv->plat->rx_queues_to_use;
1234         unsigned int bfsize = 0;
1235         int ret = -ENOMEM;
1236         int queue;
1237         int i;
1238
1239         if (priv->hw->mode->set_16kib_bfsize)
1240                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1241
1242         if (bfsize < BUF_SIZE_16KiB)
1243                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1244
1245         priv->dma_buf_sz = bfsize;
1246
1247         /* RX INITIALIZATION */
1248         netif_dbg(priv, probe, priv->dev,
1249                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1250
1251         for (queue = 0; queue < rx_count; queue++) {
1252                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1253
1254                 netif_dbg(priv, probe, priv->dev,
1255                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1256                           (u32)rx_q->dma_rx_phy);
1257
1258                 for (i = 0; i < DMA_RX_SIZE; i++) {
1259                         struct dma_desc *p;
1260
1261                         if (priv->extend_desc)
1262                                 p = &((rx_q->dma_erx + i)->basic);
1263                         else
1264                                 p = rx_q->dma_rx + i;
1265
1266                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1267                                                      queue);
1268                         if (ret)
1269                                 goto err_init_rx_buffers;
1270
1271                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1272                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1273                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1274                 }
1275
1276                 rx_q->cur_rx = 0;
1277                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1278
1279                 stmmac_clear_rx_descriptors(priv, queue);
1280
1281                 /* Setup the chained descriptor addresses */
1282                 if (priv->mode == STMMAC_CHAIN_MODE) {
1283                         if (priv->extend_desc)
1284                                 priv->hw->mode->init(rx_q->dma_erx,
1285                                                      rx_q->dma_rx_phy,
1286                                                      DMA_RX_SIZE, 1);
1287                         else
1288                                 priv->hw->mode->init(rx_q->dma_rx,
1289                                                      rx_q->dma_rx_phy,
1290                                                      DMA_RX_SIZE, 0);
1291                 }
1292         }
1293
1294         buf_sz = bfsize;
1295
1296         return 0;
1297
1298 err_init_rx_buffers:
1299         while (queue >= 0) {
1300                 while (--i >= 0)
1301                         stmmac_free_rx_buffer(priv, queue, i);
1302
1303                 if (queue == 0)
1304                         break;
1305
1306                 i = DMA_RX_SIZE;
1307                 queue--;
1308         }
1309
1310         return ret;
1311 }
1312
1313 /**
1314  * init_dma_tx_desc_rings - init the TX descriptor rings
1315  * @dev: net device structure.
1316  * Description: this function initializes the DMA TX descriptors
1317  * and allocates the socket buffers. It supports the chained and ring
1318  * modes.
1319  */
1320 static int init_dma_tx_desc_rings(struct net_device *dev)
1321 {
1322         struct stmmac_priv *priv = netdev_priv(dev);
1323         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1324         u32 queue;
1325         int i;
1326
1327         for (queue = 0; queue < tx_queue_cnt; queue++) {
1328                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1329
1330                 netif_dbg(priv, probe, priv->dev,
1331                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1332                          (u32)tx_q->dma_tx_phy);
1333
1334                 /* Setup the chained descriptor addresses */
1335                 if (priv->mode == STMMAC_CHAIN_MODE) {
1336                         if (priv->extend_desc)
1337                                 priv->hw->mode->init(tx_q->dma_etx,
1338                                                      tx_q->dma_tx_phy,
1339                                                      DMA_TX_SIZE, 1);
1340                         else
1341                                 priv->hw->mode->init(tx_q->dma_tx,
1342                                                      tx_q->dma_tx_phy,
1343                                                      DMA_TX_SIZE, 0);
1344                 }
1345
1346                 for (i = 0; i < DMA_TX_SIZE; i++) {
1347                         struct dma_desc *p;
1348                         if (priv->extend_desc)
1349                                 p = &((tx_q->dma_etx + i)->basic);
1350                         else
1351                                 p = tx_q->dma_tx + i;
1352
1353                         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1354                                 p->des0 = 0;
1355                                 p->des1 = 0;
1356                                 p->des2 = 0;
1357                                 p->des3 = 0;
1358                         } else {
1359                                 p->des2 = 0;
1360                         }
1361
1362                         tx_q->tx_skbuff_dma[i].buf = 0;
1363                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1364                         tx_q->tx_skbuff_dma[i].len = 0;
1365                         tx_q->tx_skbuff_dma[i].last_segment = false;
1366                         tx_q->tx_skbuff[i] = NULL;
1367                 }
1368
1369                 tx_q->dirty_tx = 0;
1370                 tx_q->cur_tx = 0;
1371
1372                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1373         }
1374
1375         return 0;
1376 }
1377
1378 /**
1379  * init_dma_desc_rings - init the RX/TX descriptor rings
1380  * @dev: net device structure
1381  * @flags: gfp flag.
1382  * Description: this function initializes the DMA RX/TX descriptors
1383  * and allocates the socket buffers. It supports the chained and ring
1384  * modes.
1385  */
1386 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1387 {
1388         struct stmmac_priv *priv = netdev_priv(dev);
1389         int ret;
1390
1391         ret = init_dma_rx_desc_rings(dev, flags);
1392         if (ret)
1393                 return ret;
1394
1395         ret = init_dma_tx_desc_rings(dev);
1396
1397         stmmac_clear_descriptors(priv);
1398
1399         if (netif_msg_hw(priv))
1400                 stmmac_display_rings(priv);
1401
1402         return ret;
1403 }
1404
1405 /**
1406  * dma_free_rx_skbufs - free RX dma buffers
1407  * @priv: private structure
1408  * @queue: RX queue index
1409  */
1410 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1411 {
1412         int i;
1413
1414         for (i = 0; i < DMA_RX_SIZE; i++)
1415                 stmmac_free_rx_buffer(priv, queue, i);
1416 }
1417
1418 /**
1419  * dma_free_tx_skbufs - free TX dma buffers
1420  * @priv: private structure
1421  * @queue: TX queue index
1422  */
1423 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1424 {
1425         int i;
1426
1427         for (i = 0; i < DMA_TX_SIZE; i++)
1428                 stmmac_free_tx_buffer(priv, queue, i);
1429 }
1430
1431 /**
1432  * stmmac_free_tx_skbufs - free TX skb buffers
1433  * @priv: private structure
1434  */
1435 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1436 {
1437         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1438         u32 queue;
1439
1440         for (queue = 0; queue < tx_queue_cnt; queue++)
1441                 dma_free_tx_skbufs(priv, queue);
1442 }
1443
1444 /**
1445  * free_dma_rx_desc_resources - free RX dma desc resources
1446  * @priv: private structure
1447  */
1448 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1449 {
1450         u32 rx_count = priv->plat->rx_queues_to_use;
1451         u32 queue;
1452
1453         /* Free RX queue resources */
1454         for (queue = 0; queue < rx_count; queue++) {
1455                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1456
1457                 /* Release the DMA RX socket buffers */
1458                 dma_free_rx_skbufs(priv, queue);
1459
1460                 /* Free DMA regions of consistent memory previously allocated */
1461                 if (!priv->extend_desc)
1462                         dma_free_coherent(priv->device,
1463                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1464                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1465                 else
1466                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1467                                           sizeof(struct dma_extended_desc),
1468                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1469
1470                 kfree(rx_q->rx_skbuff_dma);
1471                 kfree(rx_q->rx_skbuff);
1472         }
1473 }
1474
1475 /**
1476  * free_dma_tx_desc_resources - free TX dma desc resources
1477  * @priv: private structure
1478  */
1479 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1480 {
1481         u32 tx_count = priv->plat->tx_queues_to_use;
1482         u32 queue;
1483
1484         /* Free TX queue resources */
1485         for (queue = 0; queue < tx_count; queue++) {
1486                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1487
1488                 /* Release the DMA TX socket buffers */
1489                 dma_free_tx_skbufs(priv, queue);
1490
1491                 /* Free DMA regions of consistent memory previously allocated */
1492                 if (!priv->extend_desc)
1493                         dma_free_coherent(priv->device,
1494                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1495                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1496                 else
1497                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1498                                           sizeof(struct dma_extended_desc),
1499                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1500
1501                 kfree(tx_q->tx_skbuff_dma);
1502                 kfree(tx_q->tx_skbuff);
1503         }
1504 }
1505
1506 /**
1507  * alloc_dma_rx_desc_resources - alloc RX resources.
1508  * @priv: private structure
1509  * Description: according to which descriptor can be used (extend or basic)
1510  * this function allocates the resources for TX and RX paths. In case of
1511  * reception, for example, it pre-allocated the RX socket buffer in order to
1512  * allow zero-copy mechanism.
1513  */
1514 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1515 {
1516         u32 rx_count = priv->plat->rx_queues_to_use;
1517         int ret = -ENOMEM;
1518         u32 queue;
1519
1520         /* RX queues buffers and DMA */
1521         for (queue = 0; queue < rx_count; queue++) {
1522                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1523
1524                 rx_q->queue_index = queue;
1525                 rx_q->priv_data = priv;
1526
1527                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1528                                                     sizeof(dma_addr_t),
1529                                                     GFP_KERNEL);
1530                 if (!rx_q->rx_skbuff_dma)
1531                         goto err_dma;
1532
1533                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1534                                                 sizeof(struct sk_buff *),
1535                                                 GFP_KERNEL);
1536                 if (!rx_q->rx_skbuff)
1537                         goto err_dma;
1538
1539                 if (priv->extend_desc) {
1540                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1541                                                             DMA_RX_SIZE *
1542                                                             sizeof(struct
1543                                                             dma_extended_desc),
1544                                                             &rx_q->dma_rx_phy,
1545                                                             GFP_KERNEL);
1546                         if (!rx_q->dma_erx)
1547                                 goto err_dma;
1548
1549                 } else {
1550                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1551                                                            DMA_RX_SIZE *
1552                                                            sizeof(struct
1553                                                            dma_desc),
1554                                                            &rx_q->dma_rx_phy,
1555                                                            GFP_KERNEL);
1556                         if (!rx_q->dma_rx)
1557                                 goto err_dma;
1558                 }
1559         }
1560
1561         return 0;
1562
1563 err_dma:
1564         free_dma_rx_desc_resources(priv);
1565
1566         return ret;
1567 }
1568
1569 /**
1570  * alloc_dma_tx_desc_resources - alloc TX resources.
1571  * @priv: private structure
1572  * Description: according to which descriptor can be used (extend or basic)
1573  * this function allocates the resources for TX and RX paths. In case of
1574  * reception, for example, it pre-allocated the RX socket buffer in order to
1575  * allow zero-copy mechanism.
1576  */
1577 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1578 {
1579         u32 tx_count = priv->plat->tx_queues_to_use;
1580         int ret = -ENOMEM;
1581         u32 queue;
1582
1583         /* TX queues buffers and DMA */
1584         for (queue = 0; queue < tx_count; queue++) {
1585                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1586
1587                 tx_q->queue_index = queue;
1588                 tx_q->priv_data = priv;
1589
1590                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1591                                                     sizeof(*tx_q->tx_skbuff_dma),
1592                                                     GFP_KERNEL);
1593                 if (!tx_q->tx_skbuff_dma)
1594                         goto err_dma;
1595
1596                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1597                                                 sizeof(struct sk_buff *),
1598                                                 GFP_KERNEL);
1599                 if (!tx_q->tx_skbuff)
1600                         goto err_dma;
1601
1602                 if (priv->extend_desc) {
1603                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1604                                                             DMA_TX_SIZE *
1605                                                             sizeof(struct
1606                                                             dma_extended_desc),
1607                                                             &tx_q->dma_tx_phy,
1608                                                             GFP_KERNEL);
1609                         if (!tx_q->dma_etx)
1610                                 goto err_dma;
1611                 } else {
1612                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1613                                                            DMA_TX_SIZE *
1614                                                            sizeof(struct
1615                                                                   dma_desc),
1616                                                            &tx_q->dma_tx_phy,
1617                                                            GFP_KERNEL);
1618                         if (!tx_q->dma_tx)
1619                                 goto err_dma;
1620                 }
1621         }
1622
1623         return 0;
1624
1625 err_dma:
1626         free_dma_tx_desc_resources(priv);
1627
1628         return ret;
1629 }
1630
1631 /**
1632  * alloc_dma_desc_resources - alloc TX/RX resources.
1633  * @priv: private structure
1634  * Description: according to which descriptor can be used (extend or basic)
1635  * this function allocates the resources for TX and RX paths. In case of
1636  * reception, for example, it pre-allocated the RX socket buffer in order to
1637  * allow zero-copy mechanism.
1638  */
1639 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1640 {
1641         /* RX Allocation */
1642         int ret = alloc_dma_rx_desc_resources(priv);
1643
1644         if (ret)
1645                 return ret;
1646
1647         ret = alloc_dma_tx_desc_resources(priv);
1648
1649         return ret;
1650 }
1651
1652 /**
1653  * free_dma_desc_resources - free dma desc resources
1654  * @priv: private structure
1655  */
1656 static void free_dma_desc_resources(struct stmmac_priv *priv)
1657 {
1658         /* Release the DMA RX socket buffers */
1659         free_dma_rx_desc_resources(priv);
1660
1661         /* Release the DMA TX socket buffers */
1662         free_dma_tx_desc_resources(priv);
1663 }
1664
1665 /**
1666  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1667  *  @priv: driver private structure
1668  *  Description: It is used for enabling the rx queues in the MAC
1669  */
1670 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1671 {
1672         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1673         int queue;
1674         u8 mode;
1675
1676         for (queue = 0; queue < rx_queues_count; queue++) {
1677                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1678                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1679         }
1680 }
1681
1682 /**
1683  * stmmac_start_rx_dma - start RX DMA channel
1684  * @priv: driver private structure
1685  * @chan: RX channel index
1686  * Description:
1687  * This starts a RX DMA channel
1688  */
1689 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1690 {
1691         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1692         priv->hw->dma->start_rx(priv->ioaddr, chan);
1693 }
1694
1695 /**
1696  * stmmac_start_tx_dma - start TX DMA channel
1697  * @priv: driver private structure
1698  * @chan: TX channel index
1699  * Description:
1700  * This starts a TX DMA channel
1701  */
1702 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1703 {
1704         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1705         priv->hw->dma->start_tx(priv->ioaddr, chan);
1706 }
1707
1708 /**
1709  * stmmac_stop_rx_dma - stop RX DMA channel
1710  * @priv: driver private structure
1711  * @chan: RX channel index
1712  * Description:
1713  * This stops a RX DMA channel
1714  */
1715 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1716 {
1717         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1718         priv->hw->dma->stop_rx(priv->ioaddr, chan);
1719 }
1720
1721 /**
1722  * stmmac_stop_tx_dma - stop TX DMA channel
1723  * @priv: driver private structure
1724  * @chan: TX channel index
1725  * Description:
1726  * This stops a TX DMA channel
1727  */
1728 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1729 {
1730         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1731         priv->hw->dma->stop_tx(priv->ioaddr, chan);
1732 }
1733
1734 /**
1735  * stmmac_start_all_dma - start all RX and TX DMA channels
1736  * @priv: driver private structure
1737  * Description:
1738  * This starts all the RX and TX DMA channels
1739  */
1740 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1741 {
1742         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1743         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1744         u32 chan = 0;
1745
1746         for (chan = 0; chan < rx_channels_count; chan++)
1747                 stmmac_start_rx_dma(priv, chan);
1748
1749         for (chan = 0; chan < tx_channels_count; chan++)
1750                 stmmac_start_tx_dma(priv, chan);
1751 }
1752
1753 /**
1754  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1755  * @priv: driver private structure
1756  * Description:
1757  * This stops the RX and TX DMA channels
1758  */
1759 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1760 {
1761         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1762         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1763         u32 chan = 0;
1764
1765         for (chan = 0; chan < rx_channels_count; chan++)
1766                 stmmac_stop_rx_dma(priv, chan);
1767
1768         for (chan = 0; chan < tx_channels_count; chan++)
1769                 stmmac_stop_tx_dma(priv, chan);
1770 }
1771
1772 /**
1773  *  stmmac_dma_operation_mode - HW DMA operation mode
1774  *  @priv: driver private structure
1775  *  Description: it is used for configuring the DMA operation mode register in
1776  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1777  */
1778 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1779 {
1780         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1781         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1782         int rxfifosz = priv->plat->rx_fifo_size;
1783         int txfifosz = priv->plat->tx_fifo_size;
1784         u32 txmode = 0;
1785         u32 rxmode = 0;
1786         u32 chan = 0;
1787
1788         if (rxfifosz == 0)
1789                 rxfifosz = priv->dma_cap.rx_fifo_size;
1790         if (txfifosz == 0)
1791                 txfifosz = priv->dma_cap.tx_fifo_size;
1792
1793         /* Adjust for real per queue fifo size */
1794         rxfifosz /= rx_channels_count;
1795         txfifosz /= tx_channels_count;
1796
1797         if (priv->plat->force_thresh_dma_mode) {
1798                 txmode = tc;
1799                 rxmode = tc;
1800         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1801                 /*
1802                  * In case of GMAC, SF mode can be enabled
1803                  * to perform the TX COE in HW. This depends on:
1804                  * 1) TX COE if actually supported
1805                  * 2) There is no bugged Jumbo frame support
1806                  *    that needs to not insert csum in the TDES.
1807                  */
1808                 txmode = SF_DMA_MODE;
1809                 rxmode = SF_DMA_MODE;
1810                 priv->xstats.threshold = SF_DMA_MODE;
1811         } else {
1812                 txmode = tc;
1813                 rxmode = SF_DMA_MODE;
1814         }
1815
1816         /* configure all channels */
1817         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1818                 for (chan = 0; chan < rx_channels_count; chan++)
1819                         priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1820                                                    rxfifosz);
1821
1822                 for (chan = 0; chan < tx_channels_count; chan++)
1823                         priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1824                                                    txfifosz);
1825         } else {
1826                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1827                                         rxfifosz);
1828         }
1829 }
1830
1831 /**
1832  * stmmac_tx_clean - to manage the transmission completion
1833  * @priv: driver private structure
1834  * @queue: TX queue index
1835  * Description: it reclaims the transmit resources after transmission completes.
1836  */
1837 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1838 {
1839         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1840         unsigned int bytes_compl = 0, pkts_compl = 0;
1841         unsigned int entry;
1842
1843         netif_tx_lock(priv->dev);
1844
1845         priv->xstats.tx_clean++;
1846
1847         entry = tx_q->dirty_tx;
1848         while (entry != tx_q->cur_tx) {
1849                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1850                 struct dma_desc *p;
1851                 int status;
1852
1853                 if (priv->extend_desc)
1854                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1855                 else
1856                         p = tx_q->dma_tx + entry;
1857
1858                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1859                                                       &priv->xstats, p,
1860                                                       priv->ioaddr);
1861                 /* Check if the descriptor is owned by the DMA */
1862                 if (unlikely(status & tx_dma_own))
1863                         break;
1864
1865                 /* Make sure descriptor fields are read after reading
1866                  * the own bit.
1867                  */
1868                 dma_rmb();
1869
1870                 /* Just consider the last segment and ...*/
1871                 if (likely(!(status & tx_not_ls))) {
1872                         /* ... verify the status error condition */
1873                         if (unlikely(status & tx_err)) {
1874                                 priv->dev->stats.tx_errors++;
1875                         } else {
1876                                 priv->dev->stats.tx_packets++;
1877                                 priv->xstats.tx_pkt_n++;
1878                         }
1879                         stmmac_get_tx_hwtstamp(priv, p, skb);
1880                 }
1881
1882                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1883                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1884                                 dma_unmap_page(priv->device,
1885                                                tx_q->tx_skbuff_dma[entry].buf,
1886                                                tx_q->tx_skbuff_dma[entry].len,
1887                                                DMA_TO_DEVICE);
1888                         else
1889                                 dma_unmap_single(priv->device,
1890                                                  tx_q->tx_skbuff_dma[entry].buf,
1891                                                  tx_q->tx_skbuff_dma[entry].len,
1892                                                  DMA_TO_DEVICE);
1893                         tx_q->tx_skbuff_dma[entry].buf = 0;
1894                         tx_q->tx_skbuff_dma[entry].len = 0;
1895                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1896                 }
1897
1898                 if (priv->hw->mode->clean_desc3)
1899                         priv->hw->mode->clean_desc3(tx_q, p);
1900
1901                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1902                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1903
1904                 if (likely(skb != NULL)) {
1905                         pkts_compl++;
1906                         bytes_compl += skb->len;
1907                         dev_consume_skb_any(skb);
1908                         tx_q->tx_skbuff[entry] = NULL;
1909                 }
1910
1911                 priv->hw->desc->release_tx_desc(p, priv->mode);
1912
1913                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1914         }
1915         tx_q->dirty_tx = entry;
1916
1917         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1918                                   pkts_compl, bytes_compl);
1919
1920         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1921                                                                 queue))) &&
1922             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1923
1924                 netif_dbg(priv, tx_done, priv->dev,
1925                           "%s: restart transmit\n", __func__);
1926                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1927         }
1928
1929         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1930                 stmmac_enable_eee_mode(priv);
1931                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1932         }
1933         netif_tx_unlock(priv->dev);
1934 }
1935
1936 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1937 {
1938         priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1939 }
1940
1941 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1942 {
1943         priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1944 }
1945
1946 /**
1947  * stmmac_tx_err - to manage the tx error
1948  * @priv: driver private structure
1949  * @chan: channel index
1950  * Description: it cleans the descriptors and restarts the transmission
1951  * in case of transmission errors.
1952  */
1953 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1954 {
1955         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1956         int i;
1957
1958         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1959
1960         stmmac_stop_tx_dma(priv, chan);
1961         dma_free_tx_skbufs(priv, chan);
1962         for (i = 0; i < DMA_TX_SIZE; i++)
1963                 if (priv->extend_desc)
1964                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1965                                                      priv->mode,
1966                                                      (i == DMA_TX_SIZE - 1));
1967                 else
1968                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1969                                                      priv->mode,
1970                                                      (i == DMA_TX_SIZE - 1));
1971         tx_q->dirty_tx = 0;
1972         tx_q->cur_tx = 0;
1973         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1974         stmmac_start_tx_dma(priv, chan);
1975
1976         priv->dev->stats.tx_errors++;
1977         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1978 }
1979
1980 /**
1981  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1982  *  @priv: driver private structure
1983  *  @txmode: TX operating mode
1984  *  @rxmode: RX operating mode
1985  *  @chan: channel index
1986  *  Description: it is used for configuring of the DMA operation mode in
1987  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1988  *  mode.
1989  */
1990 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1991                                           u32 rxmode, u32 chan)
1992 {
1993         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1994         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1995         int rxfifosz = priv->plat->rx_fifo_size;
1996         int txfifosz = priv->plat->tx_fifo_size;
1997
1998         if (rxfifosz == 0)
1999                 rxfifosz = priv->dma_cap.rx_fifo_size;
2000         if (txfifosz == 0)
2001                 txfifosz = priv->dma_cap.tx_fifo_size;
2002
2003         /* Adjust for real per queue fifo size */
2004         rxfifosz /= rx_channels_count;
2005         txfifosz /= tx_channels_count;
2006
2007         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2008                 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
2009                                            rxfifosz);
2010                 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
2011                                            txfifosz);
2012         } else {
2013                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
2014                                         rxfifosz);
2015         }
2016 }
2017
2018 /**
2019  * stmmac_dma_interrupt - DMA ISR
2020  * @priv: driver private structure
2021  * Description: this is the DMA ISR. It is called by the main ISR.
2022  * It calls the dwmac dma routine and schedule poll method in case of some
2023  * work can be done.
2024  */
2025 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2026 {
2027         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2028         int status;
2029         u32 chan;
2030
2031         for (chan = 0; chan < tx_channel_count; chan++) {
2032                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2033
2034                 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
2035                                                       &priv->xstats, chan);
2036                 if (likely((status & handle_rx)) || (status & handle_tx)) {
2037                         if (likely(napi_schedule_prep(&rx_q->napi))) {
2038                                 stmmac_disable_dma_irq(priv, chan);
2039                                 __napi_schedule(&rx_q->napi);
2040                         }
2041                 }
2042
2043                 if (unlikely(status & tx_hard_error_bump_tc)) {
2044                         /* Try to bump up the dma threshold on this failure */
2045                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2046                             (tc <= 256)) {
2047                                 tc += 64;
2048                                 if (priv->plat->force_thresh_dma_mode)
2049                                         stmmac_set_dma_operation_mode(priv,
2050                                                                       tc,
2051                                                                       tc,
2052                                                                       chan);
2053                                 else
2054                                         stmmac_set_dma_operation_mode(priv,
2055                                                                     tc,
2056                                                                     SF_DMA_MODE,
2057                                                                     chan);
2058                                 priv->xstats.threshold = tc;
2059                         }
2060                 } else if (unlikely(status == tx_hard_error)) {
2061                         stmmac_tx_err(priv, chan);
2062                 }
2063         }
2064 }
2065
2066 /**
2067  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2068  * @priv: driver private structure
2069  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2070  */
2071 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2072 {
2073         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2074                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2075
2076         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2077                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2078                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2079         } else {
2080                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2081                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2082         }
2083
2084         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2085
2086         if (priv->dma_cap.rmon) {
2087                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2088                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2089         } else
2090                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2091 }
2092
2093 /**
2094  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2095  * @priv: driver private structure
2096  * Description: select the Enhanced/Alternate or Normal descriptors.
2097  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2098  * supported by the HW capability register.
2099  */
2100 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2101 {
2102         if (priv->plat->enh_desc) {
2103                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2104
2105                 /* GMAC older than 3.50 has no extended descriptors */
2106                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2107                         dev_info(priv->device, "Enabled extended descriptors\n");
2108                         priv->extend_desc = 1;
2109                 } else
2110                         dev_warn(priv->device, "Extended descriptors not supported\n");
2111
2112                 priv->hw->desc = &enh_desc_ops;
2113         } else {
2114                 dev_info(priv->device, "Normal descriptors\n");
2115                 priv->hw->desc = &ndesc_ops;
2116         }
2117 }
2118
2119 /**
2120  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2121  * @priv: driver private structure
2122  * Description:
2123  *  new GMAC chip generations have a new register to indicate the
2124  *  presence of the optional feature/functions.
2125  *  This can be also used to override the value passed through the
2126  *  platform and necessary for old MAC10/100 and GMAC chips.
2127  */
2128 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2129 {
2130         u32 ret = 0;
2131
2132         if (priv->hw->dma->get_hw_feature) {
2133                 priv->hw->dma->get_hw_feature(priv->ioaddr,
2134                                               &priv->dma_cap);
2135                 ret = 1;
2136         }
2137
2138         return ret;
2139 }
2140
2141 /**
2142  * stmmac_check_ether_addr - check if the MAC addr is valid
2143  * @priv: driver private structure
2144  * Description:
2145  * it is to verify if the MAC address is valid, in case of failures it
2146  * generates a random MAC address
2147  */
2148 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2149 {
2150         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2151                 priv->hw->mac->get_umac_addr(priv->hw,
2152                                              priv->dev->dev_addr, 0);
2153                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2154                         eth_hw_addr_random(priv->dev);
2155                 netdev_info(priv->dev, "device MAC address %pM\n",
2156                             priv->dev->dev_addr);
2157         }
2158 }
2159
2160 /**
2161  * stmmac_init_dma_engine - DMA init.
2162  * @priv: driver private structure
2163  * Description:
2164  * It inits the DMA invoking the specific MAC/GMAC callback.
2165  * Some DMA parameters can be passed from the platform;
2166  * in case of these are not passed a default is kept for the MAC or GMAC.
2167  */
2168 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2169 {
2170         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2171         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2172         struct stmmac_rx_queue *rx_q;
2173         struct stmmac_tx_queue *tx_q;
2174         u32 dummy_dma_rx_phy = 0;
2175         u32 dummy_dma_tx_phy = 0;
2176         u32 chan = 0;
2177         int atds = 0;
2178         int ret = 0;
2179
2180         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2181                 dev_err(priv->device, "Invalid DMA configuration\n");
2182                 return -EINVAL;
2183         }
2184
2185         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2186                 atds = 1;
2187
2188         ret = priv->hw->dma->reset(priv->ioaddr);
2189         if (ret) {
2190                 dev_err(priv->device, "Failed to reset the dma\n");
2191                 return ret;
2192         }
2193
2194         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2195                 /* DMA Configuration */
2196                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2197                                     dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2198
2199                 /* DMA RX Channel Configuration */
2200                 for (chan = 0; chan < rx_channels_count; chan++) {
2201                         rx_q = &priv->rx_queue[chan];
2202
2203                         priv->hw->dma->init_rx_chan(priv->ioaddr,
2204                                                     priv->plat->dma_cfg,
2205                                                     rx_q->dma_rx_phy, chan);
2206
2207                         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2208                                     (DMA_RX_SIZE * sizeof(struct dma_desc));
2209                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2210                                                        rx_q->rx_tail_addr,
2211                                                        chan);
2212                 }
2213
2214                 /* DMA TX Channel Configuration */
2215                 for (chan = 0; chan < tx_channels_count; chan++) {
2216                         tx_q = &priv->tx_queue[chan];
2217
2218                         priv->hw->dma->init_chan(priv->ioaddr,
2219                                                  priv->plat->dma_cfg,
2220                                                  chan);
2221
2222                         priv->hw->dma->init_tx_chan(priv->ioaddr,
2223                                                     priv->plat->dma_cfg,
2224                                                     tx_q->dma_tx_phy, chan);
2225
2226                         tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2227                         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2228                                                        tx_q->tx_tail_addr,
2229                                                        chan);
2230                 }
2231         } else {
2232                 rx_q = &priv->rx_queue[chan];
2233                 tx_q = &priv->tx_queue[chan];
2234                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2235                                     tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2236         }
2237
2238         if (priv->plat->axi && priv->hw->dma->axi)
2239                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2240
2241         return ret;
2242 }
2243
2244 /**
2245  * stmmac_tx_timer - mitigation sw timer for tx.
2246  * @data: data pointer
2247  * Description:
2248  * This is the timer handler to directly invoke the stmmac_tx_clean.
2249  */
2250 static void stmmac_tx_timer(unsigned long data)
2251 {
2252         struct stmmac_priv *priv = (struct stmmac_priv *)data;
2253         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2254         u32 queue;
2255
2256         /* let's scan all the tx queues */
2257         for (queue = 0; queue < tx_queues_count; queue++)
2258                 stmmac_tx_clean(priv, queue);
2259 }
2260
2261 /**
2262  * stmmac_init_tx_coalesce - init tx mitigation options.
2263  * @priv: driver private structure
2264  * Description:
2265  * This inits the transmit coalesce parameters: i.e. timer rate,
2266  * timer handler and default threshold used for enabling the
2267  * interrupt on completion bit.
2268  */
2269 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2270 {
2271         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2272         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2273         init_timer(&priv->txtimer);
2274         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2275         priv->txtimer.data = (unsigned long)priv;
2276         priv->txtimer.function = stmmac_tx_timer;
2277         add_timer(&priv->txtimer);
2278 }
2279
2280 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2281 {
2282         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2283         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2284         u32 chan;
2285
2286         /* set TX ring length */
2287         if (priv->hw->dma->set_tx_ring_len) {
2288                 for (chan = 0; chan < tx_channels_count; chan++)
2289                         priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2290                                                        (DMA_TX_SIZE - 1), chan);
2291         }
2292
2293         /* set RX ring length */
2294         if (priv->hw->dma->set_rx_ring_len) {
2295                 for (chan = 0; chan < rx_channels_count; chan++)
2296                         priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2297                                                        (DMA_RX_SIZE - 1), chan);
2298         }
2299 }
2300
2301 /**
2302  *  stmmac_set_tx_queue_weight - Set TX queue weight
2303  *  @priv: driver private structure
2304  *  Description: It is used for setting TX queues weight
2305  */
2306 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2307 {
2308         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2309         u32 weight;
2310         u32 queue;
2311
2312         for (queue = 0; queue < tx_queues_count; queue++) {
2313                 weight = priv->plat->tx_queues_cfg[queue].weight;
2314                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2315         }
2316 }
2317
2318 /**
2319  *  stmmac_configure_cbs - Configure CBS in TX queue
2320  *  @priv: driver private structure
2321  *  Description: It is used for configuring CBS in AVB TX queues
2322  */
2323 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2324 {
2325         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2326         u32 mode_to_use;
2327         u32 queue;
2328
2329         /* queue 0 is reserved for legacy traffic */
2330         for (queue = 1; queue < tx_queues_count; queue++) {
2331                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2332                 if (mode_to_use == MTL_QUEUE_DCB)
2333                         continue;
2334
2335                 priv->hw->mac->config_cbs(priv->hw,
2336                                 priv->plat->tx_queues_cfg[queue].send_slope,
2337                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2338                                 priv->plat->tx_queues_cfg[queue].high_credit,
2339                                 priv->plat->tx_queues_cfg[queue].low_credit,
2340                                 queue);
2341         }
2342 }
2343
2344 /**
2345  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2346  *  @priv: driver private structure
2347  *  Description: It is used for mapping RX queues to RX dma channels
2348  */
2349 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2350 {
2351         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2352         u32 queue;
2353         u32 chan;
2354
2355         for (queue = 0; queue < rx_queues_count; queue++) {
2356                 chan = priv->plat->rx_queues_cfg[queue].chan;
2357                 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2358         }
2359 }
2360
2361 /**
2362  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2363  *  @priv: driver private structure
2364  *  Description: It is used for configuring the RX Queue Priority
2365  */
2366 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2367 {
2368         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2369         u32 queue;
2370         u32 prio;
2371
2372         for (queue = 0; queue < rx_queues_count; queue++) {
2373                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2374                         continue;
2375
2376                 prio = priv->plat->rx_queues_cfg[queue].prio;
2377                 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2378         }
2379 }
2380
2381 /**
2382  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2383  *  @priv: driver private structure
2384  *  Description: It is used for configuring the TX Queue Priority
2385  */
2386 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2387 {
2388         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2389         u32 queue;
2390         u32 prio;
2391
2392         for (queue = 0; queue < tx_queues_count; queue++) {
2393                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2394                         continue;
2395
2396                 prio = priv->plat->tx_queues_cfg[queue].prio;
2397                 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2398         }
2399 }
2400
2401 /**
2402  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2403  *  @priv: driver private structure
2404  *  Description: It is used for configuring the RX queue routing
2405  */
2406 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2407 {
2408         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2409         u32 queue;
2410         u8 packet;
2411
2412         for (queue = 0; queue < rx_queues_count; queue++) {
2413                 /* no specific packet type routing specified for the queue */
2414                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2415                         continue;
2416
2417                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2418                 priv->hw->mac->rx_queue_routing(priv->hw, packet, queue);
2419         }
2420 }
2421
2422 /**
2423  *  stmmac_mtl_configuration - Configure MTL
2424  *  @priv: driver private structure
2425  *  Description: It is used for configurring MTL
2426  */
2427 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2428 {
2429         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2430         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2431
2432         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2433                 stmmac_set_tx_queue_weight(priv);
2434
2435         /* Configure MTL RX algorithms */
2436         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2437                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2438                                                 priv->plat->rx_sched_algorithm);
2439
2440         /* Configure MTL TX algorithms */
2441         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2442                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2443                                                 priv->plat->tx_sched_algorithm);
2444
2445         /* Configure CBS in AVB TX queues */
2446         if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2447                 stmmac_configure_cbs(priv);
2448
2449         /* Map RX MTL to DMA channels */
2450         if (priv->hw->mac->map_mtl_to_dma)
2451                 stmmac_rx_queue_dma_chan_map(priv);
2452
2453         /* Enable MAC RX Queues */
2454         if (priv->hw->mac->rx_queue_enable)
2455                 stmmac_mac_enable_rx_queues(priv);
2456
2457         /* Set RX priorities */
2458         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2459                 stmmac_mac_config_rx_queues_prio(priv);
2460
2461         /* Set TX priorities */
2462         if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2463                 stmmac_mac_config_tx_queues_prio(priv);
2464
2465         /* Set RX routing */
2466         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2467                 stmmac_mac_config_rx_queues_routing(priv);
2468 }
2469
2470 /**
2471  * stmmac_hw_setup - setup mac in a usable state.
2472  *  @dev : pointer to the device structure.
2473  *  Description:
2474  *  this is the main function to setup the HW in a usable state because the
2475  *  dma engine is reset, the core registers are configured (e.g. AXI,
2476  *  Checksum features, timers). The DMA is ready to start receiving and
2477  *  transmitting.
2478  *  Return value:
2479  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2480  *  file on failure.
2481  */
2482 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2483 {
2484         struct stmmac_priv *priv = netdev_priv(dev);
2485         u32 rx_cnt = priv->plat->rx_queues_to_use;
2486         u32 tx_cnt = priv->plat->tx_queues_to_use;
2487         u32 chan;
2488         int ret;
2489
2490         /* DMA initialization and SW reset */
2491         ret = stmmac_init_dma_engine(priv);
2492         if (ret < 0) {
2493                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2494                            __func__);
2495                 return ret;
2496         }
2497
2498         /* Copy the MAC addr into the HW  */
2499         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2500
2501         /* PS and related bits will be programmed according to the speed */
2502         if (priv->hw->pcs) {
2503                 int speed = priv->plat->mac_port_sel_speed;
2504
2505                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2506                     (speed == SPEED_1000)) {
2507                         priv->hw->ps = speed;
2508                 } else {
2509                         dev_warn(priv->device, "invalid port speed\n");
2510                         priv->hw->ps = 0;
2511                 }
2512         }
2513
2514         /* Initialize the MAC Core */
2515         priv->hw->mac->core_init(priv->hw, dev);
2516
2517         /* Initialize MTL*/
2518         if (priv->synopsys_id >= DWMAC_CORE_4_00)
2519                 stmmac_mtl_configuration(priv);
2520
2521         ret = priv->hw->mac->rx_ipc(priv->hw);
2522         if (!ret) {
2523                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2524                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2525                 priv->hw->rx_csum = 0;
2526         }
2527
2528         /* Enable the MAC Rx/Tx */
2529         priv->hw->mac->set_mac(priv->ioaddr, true);
2530
2531         /* Set the HW DMA mode and the COE */
2532         stmmac_dma_operation_mode(priv);
2533
2534         stmmac_mmc_setup(priv);
2535
2536         if (init_ptp) {
2537                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2538                 if (ret < 0)
2539                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2540
2541                 ret = stmmac_init_ptp(priv);
2542                 if (ret == -EOPNOTSUPP)
2543                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2544                 else if (ret)
2545                         netdev_warn(priv->dev, "PTP init failed\n");
2546         }
2547
2548         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2549
2550         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2551                 priv->rx_riwt = MAX_DMA_RIWT;
2552                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2553         }
2554
2555         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2556                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2557
2558         /* set TX and RX rings length */
2559         stmmac_set_rings_length(priv);
2560
2561         /* Enable TSO */
2562         if (priv->tso) {
2563                 for (chan = 0; chan < tx_cnt; chan++)
2564                         priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2565         }
2566
2567         /* Start the ball rolling... */
2568         stmmac_start_all_dma(priv);
2569
2570         return 0;
2571 }
2572
2573 static void stmmac_hw_teardown(struct net_device *dev)
2574 {
2575         struct stmmac_priv *priv = netdev_priv(dev);
2576
2577         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2578 }
2579
2580 /**
2581  *  stmmac_open - open entry point of the driver
2582  *  @dev : pointer to the device structure.
2583  *  Description:
2584  *  This function is the open entry point of the driver.
2585  *  Return value:
2586  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2587  *  file on failure.
2588  */
2589 static int stmmac_open(struct net_device *dev)
2590 {
2591         struct stmmac_priv *priv = netdev_priv(dev);
2592         int ret;
2593
2594         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2595             priv->hw->pcs != STMMAC_PCS_TBI &&
2596             priv->hw->pcs != STMMAC_PCS_RTBI) {
2597                 ret = stmmac_init_phy(dev);
2598                 if (ret) {
2599                         netdev_err(priv->dev,
2600                                    "%s: Cannot attach to PHY (error: %d)\n",
2601                                    __func__, ret);
2602                         return ret;
2603                 }
2604         }
2605
2606         /* Extra statistics */
2607         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2608         priv->xstats.threshold = tc;
2609
2610         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2611         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2612         priv->mss = 0;
2613
2614         ret = alloc_dma_desc_resources(priv);
2615         if (ret < 0) {
2616                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2617                            __func__);
2618                 goto dma_desc_error;
2619         }
2620
2621         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2622         if (ret < 0) {
2623                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2624                            __func__);
2625                 goto init_error;
2626         }
2627
2628         ret = stmmac_hw_setup(dev, true);
2629         if (ret < 0) {
2630                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2631                 goto init_error;
2632         }
2633
2634         stmmac_init_tx_coalesce(priv);
2635
2636         if (dev->phydev)
2637                 phy_start(dev->phydev);
2638
2639         /* Request the IRQ lines */
2640         ret = request_irq(dev->irq, stmmac_interrupt,
2641                           IRQF_SHARED, dev->name, dev);
2642         if (unlikely(ret < 0)) {
2643                 netdev_err(priv->dev,
2644                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2645                            __func__, dev->irq, ret);
2646                 goto irq_error;
2647         }
2648
2649         /* Request the Wake IRQ in case of another line is used for WoL */
2650         if (priv->wol_irq != dev->irq) {
2651                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2652                                   IRQF_SHARED, dev->name, dev);
2653                 if (unlikely(ret < 0)) {
2654                         netdev_err(priv->dev,
2655                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2656                                    __func__, priv->wol_irq, ret);
2657                         goto wolirq_error;
2658                 }
2659         }
2660
2661         /* Request the IRQ lines */
2662         if (priv->lpi_irq > 0) {
2663                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2664                                   dev->name, dev);
2665                 if (unlikely(ret < 0)) {
2666                         netdev_err(priv->dev,
2667                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2668                                    __func__, priv->lpi_irq, ret);
2669                         goto lpiirq_error;
2670                 }
2671         }
2672
2673         stmmac_enable_all_queues(priv);
2674         stmmac_start_all_queues(priv);
2675
2676         return 0;
2677
2678 lpiirq_error:
2679         if (priv->wol_irq != dev->irq)
2680                 free_irq(priv->wol_irq, dev);
2681 wolirq_error:
2682         free_irq(dev->irq, dev);
2683 irq_error:
2684         if (dev->phydev)
2685                 phy_stop(dev->phydev);
2686
2687         del_timer_sync(&priv->txtimer);
2688         stmmac_hw_teardown(dev);
2689 init_error:
2690         free_dma_desc_resources(priv);
2691 dma_desc_error:
2692         if (dev->phydev)
2693                 phy_disconnect(dev->phydev);
2694
2695         return ret;
2696 }
2697
2698 /**
2699  *  stmmac_release - close entry point of the driver
2700  *  @dev : device pointer.
2701  *  Description:
2702  *  This is the stop entry point of the driver.
2703  */
2704 static int stmmac_release(struct net_device *dev)
2705 {
2706         struct stmmac_priv *priv = netdev_priv(dev);
2707
2708         /* Stop and disconnect the PHY */
2709         if (dev->phydev) {
2710                 phy_stop(dev->phydev);
2711                 phy_disconnect(dev->phydev);
2712         }
2713
2714         stmmac_stop_all_queues(priv);
2715
2716         stmmac_disable_all_queues(priv);
2717
2718         del_timer_sync(&priv->txtimer);
2719
2720         /* Free the IRQ lines */
2721         free_irq(dev->irq, dev);
2722         if (priv->wol_irq != dev->irq)
2723                 free_irq(priv->wol_irq, dev);
2724         if (priv->lpi_irq > 0)
2725                 free_irq(priv->lpi_irq, dev);
2726
2727         if (priv->eee_enabled) {
2728                 priv->tx_path_in_lpi_mode = false;
2729                 del_timer_sync(&priv->eee_ctrl_timer);
2730         }
2731
2732         /* Stop TX/RX DMA and clear the descriptors */
2733         stmmac_stop_all_dma(priv);
2734
2735         /* Release and free the Rx/Tx resources */
2736         free_dma_desc_resources(priv);
2737
2738         /* Disable the MAC Rx/Tx */
2739         priv->hw->mac->set_mac(priv->ioaddr, false);
2740
2741         netif_carrier_off(dev);
2742
2743         stmmac_release_ptp(priv);
2744
2745         return 0;
2746 }
2747
2748 /**
2749  *  stmmac_tso_allocator - close entry point of the driver
2750  *  @priv: driver private structure
2751  *  @des: buffer start address
2752  *  @total_len: total length to fill in descriptors
2753  *  @last_segmant: condition for the last descriptor
2754  *  @queue: TX queue index
2755  *  Description:
2756  *  This function fills descriptor and request new descriptors according to
2757  *  buffer length to fill
2758  */
2759 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2760                                  int total_len, bool last_segment, u32 queue)
2761 {
2762         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2763         struct dma_desc *desc;
2764         u32 buff_size;
2765         int tmp_len;
2766
2767         tmp_len = total_len;
2768
2769         while (tmp_len > 0) {
2770                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2771                 desc = tx_q->dma_tx + tx_q->cur_tx;
2772
2773                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2774                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2775                             TSO_MAX_BUFF_SIZE : tmp_len;
2776
2777                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2778                         0, 1,
2779                         (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2780                         0, 0);
2781
2782                 tmp_len -= TSO_MAX_BUFF_SIZE;
2783         }
2784 }
2785
2786 /**
2787  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2788  *  @skb : the socket buffer
2789  *  @dev : device pointer
2790  *  Description: this is the transmit function that is called on TSO frames
2791  *  (support available on GMAC4 and newer chips).
2792  *  Diagram below show the ring programming in case of TSO frames:
2793  *
2794  *  First Descriptor
2795  *   --------
2796  *   | DES0 |---> buffer1 = L2/L3/L4 header
2797  *   | DES1 |---> TCP Payload (can continue on next descr...)
2798  *   | DES2 |---> buffer 1 and 2 len
2799  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2800  *   --------
2801  *      |
2802  *     ...
2803  *      |
2804  *   --------
2805  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2806  *   | DES1 | --|
2807  *   | DES2 | --> buffer 1 and 2 len
2808  *   | DES3 |
2809  *   --------
2810  *
2811  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2812  */
2813 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2814 {
2815         struct dma_desc *desc, *first, *mss_desc = NULL;
2816         struct stmmac_priv *priv = netdev_priv(dev);
2817         int nfrags = skb_shinfo(skb)->nr_frags;
2818         u32 queue = skb_get_queue_mapping(skb);
2819         unsigned int first_entry, des;
2820         struct stmmac_tx_queue *tx_q;
2821         int tmp_pay_len = 0;
2822         u32 pay_len, mss;
2823         u8 proto_hdr_len;
2824         int i;
2825
2826         tx_q = &priv->tx_queue[queue];
2827
2828         /* Compute header lengths */
2829         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2830
2831         /* Desc availability based on threshold should be enough safe */
2832         if (unlikely(stmmac_tx_avail(priv, queue) <
2833                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2834                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2835                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2836                                                                 queue));
2837                         /* This is a hard error, log it. */
2838                         netdev_err(priv->dev,
2839                                    "%s: Tx Ring full when queue awake\n",
2840                                    __func__);
2841                 }
2842                 return NETDEV_TX_BUSY;
2843         }
2844
2845         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2846
2847         mss = skb_shinfo(skb)->gso_size;
2848
2849         /* set new MSS value if needed */
2850         if (mss != priv->mss) {
2851                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2852                 priv->hw->desc->set_mss(mss_desc, mss);
2853                 priv->mss = mss;
2854                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2855         }
2856
2857         if (netif_msg_tx_queued(priv)) {
2858                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2859                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2860                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2861                         skb->data_len);
2862         }
2863
2864         first_entry = tx_q->cur_tx;
2865
2866         desc = tx_q->dma_tx + first_entry;
2867         first = desc;
2868
2869         /* first descriptor: fill Headers on Buf1 */
2870         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2871                              DMA_TO_DEVICE);
2872         if (dma_mapping_error(priv->device, des))
2873                 goto dma_map_err;
2874
2875         tx_q->tx_skbuff_dma[first_entry].buf = des;
2876         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2877
2878         first->des0 = cpu_to_le32(des);
2879
2880         /* Fill start of payload in buff2 of first descriptor */
2881         if (pay_len)
2882                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2883
2884         /* If needed take extra descriptors to fill the remaining payload */
2885         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2886
2887         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2888
2889         /* Prepare fragments */
2890         for (i = 0; i < nfrags; i++) {
2891                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2892
2893                 des = skb_frag_dma_map(priv->device, frag, 0,
2894                                        skb_frag_size(frag),
2895                                        DMA_TO_DEVICE);
2896                 if (dma_mapping_error(priv->device, des))
2897                         goto dma_map_err;
2898
2899                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2900                                      (i == nfrags - 1), queue);
2901
2902                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2903                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2904                 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2905                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2906         }
2907
2908         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2909
2910         /* Only the last descriptor gets to point to the skb. */
2911         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2912
2913         /* We've used all descriptors we need for this skb, however,
2914          * advance cur_tx so that it references a fresh descriptor.
2915          * ndo_start_xmit will fill this descriptor the next time it's
2916          * called and stmmac_tx_clean may clean up to this descriptor.
2917          */
2918         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2919
2920         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2921                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2922                           __func__);
2923                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2924         }
2925
2926         dev->stats.tx_bytes += skb->len;
2927         priv->xstats.tx_tso_frames++;
2928         priv->xstats.tx_tso_nfrags += nfrags;
2929
2930         /* Manage tx mitigation */
2931         priv->tx_count_frames += nfrags + 1;
2932         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2933                 mod_timer(&priv->txtimer,
2934                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2935         } else {
2936                 priv->tx_count_frames = 0;
2937                 priv->hw->desc->set_tx_ic(desc);
2938                 priv->xstats.tx_set_ic_bit++;
2939         }
2940
2941         skb_tx_timestamp(skb);
2942
2943         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2944                      priv->hwts_tx_en)) {
2945                 /* declare that device is doing timestamping */
2946                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2947                 priv->hw->desc->enable_tx_timestamp(first);
2948         }
2949
2950         /* Complete the first descriptor before granting the DMA */
2951         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2952                         proto_hdr_len,
2953                         pay_len,
2954                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2955                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2956
2957         /* If context desc is used to change MSS */
2958         if (mss_desc) {
2959                 /* Make sure that first descriptor has been completely
2960                  * written, including its own bit. This is because MSS is
2961                  * actually before first descriptor, so we need to make
2962                  * sure that MSS's own bit is the last thing written.
2963                  */
2964                 dma_wmb();
2965                 priv->hw->desc->set_tx_owner(mss_desc);
2966         }
2967
2968         /* The own bit must be the latest setting done when prepare the
2969          * descriptor and then barrier is needed to make sure that
2970          * all is coherent before granting the DMA engine.
2971          */
2972         dma_wmb();
2973
2974         if (netif_msg_pktdata(priv)) {
2975                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2976                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2977                         tx_q->cur_tx, first, nfrags);
2978
2979                 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2980                                              0);
2981
2982                 pr_info(">>> frame to be transmitted: ");
2983                 print_pkt(skb->data, skb_headlen(skb));
2984         }
2985
2986         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2987
2988         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
2989         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2990                                        queue);
2991
2992         return NETDEV_TX_OK;
2993
2994 dma_map_err:
2995         dev_err(priv->device, "Tx dma map failed\n");
2996         dev_kfree_skb(skb);
2997         priv->dev->stats.tx_dropped++;
2998         return NETDEV_TX_OK;
2999 }
3000
3001 /**
3002  *  stmmac_xmit - Tx entry point of the driver
3003  *  @skb : the socket buffer
3004  *  @dev : device pointer
3005  *  Description : this is the tx entry point of the driver.
3006  *  It programs the chain or the ring and supports oversized frames
3007  *  and SG feature.
3008  */
3009 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3010 {
3011         struct stmmac_priv *priv = netdev_priv(dev);
3012         unsigned int nopaged_len = skb_headlen(skb);
3013         int i, csum_insertion = 0, is_jumbo = 0;
3014         u32 queue = skb_get_queue_mapping(skb);
3015         int nfrags = skb_shinfo(skb)->nr_frags;
3016         int entry;
3017         unsigned int first_entry;
3018         struct dma_desc *desc, *first;
3019         struct stmmac_tx_queue *tx_q;
3020         unsigned int enh_desc;
3021         unsigned int des;
3022
3023         tx_q = &priv->tx_queue[queue];
3024
3025         if (priv->tx_path_in_lpi_mode)
3026                 stmmac_disable_eee_mode(priv);
3027
3028         /* Manage oversized TCP frames for GMAC4 device */
3029         if (skb_is_gso(skb) && priv->tso) {
3030                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3031                         /*
3032                          * There is no way to determine the number of TSO
3033                          * capable Queues. Let's use always the Queue 0
3034                          * because if TSO is supported then at least this
3035                          * one will be capable.
3036                          */
3037                         skb_set_queue_mapping(skb, 0);
3038
3039                         return stmmac_tso_xmit(skb, dev);
3040                 }
3041         }
3042
3043         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3044                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3045                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3046                                                                 queue));
3047                         /* This is a hard error, log it. */
3048                         netdev_err(priv->dev,
3049                                    "%s: Tx Ring full when queue awake\n",
3050                                    __func__);
3051                 }
3052                 return NETDEV_TX_BUSY;
3053         }
3054
3055         entry = tx_q->cur_tx;
3056         first_entry = entry;
3057
3058         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3059
3060         if (likely(priv->extend_desc))
3061                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3062         else
3063                 desc = tx_q->dma_tx + entry;
3064
3065         first = desc;
3066
3067         enh_desc = priv->plat->enh_desc;
3068         /* To program the descriptors according to the size of the frame */
3069         if (enh_desc)
3070                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3071
3072         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3073                                          DWMAC_CORE_4_00)) {
3074                 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3075                 if (unlikely(entry < 0))
3076                         goto dma_map_err;
3077         }
3078
3079         for (i = 0; i < nfrags; i++) {
3080                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3081                 int len = skb_frag_size(frag);
3082                 bool last_segment = (i == (nfrags - 1));
3083
3084                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3085
3086                 if (likely(priv->extend_desc))
3087                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3088                 else
3089                         desc = tx_q->dma_tx + entry;
3090
3091                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3092                                        DMA_TO_DEVICE);
3093                 if (dma_mapping_error(priv->device, des))
3094                         goto dma_map_err; /* should reuse desc w/o issues */
3095
3096                 tx_q->tx_skbuff[entry] = NULL;
3097
3098                 tx_q->tx_skbuff_dma[entry].buf = des;
3099                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3100                         desc->des0 = cpu_to_le32(des);
3101                 else
3102                         desc->des2 = cpu_to_le32(des);
3103
3104                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3105                 tx_q->tx_skbuff_dma[entry].len = len;
3106                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3107
3108                 /* Prepare the descriptor and set the own bit too */
3109                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3110                                                 priv->mode, 1, last_segment,
3111                                                 skb->len);
3112         }
3113
3114         /* Only the last descriptor gets to point to the skb. */
3115         tx_q->tx_skbuff[entry] = skb;
3116
3117         /* We've used all descriptors we need for this skb, however,
3118          * advance cur_tx so that it references a fresh descriptor.
3119          * ndo_start_xmit will fill this descriptor the next time it's
3120          * called and stmmac_tx_clean may clean up to this descriptor.
3121          */
3122         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3123         tx_q->cur_tx = entry;
3124
3125         if (netif_msg_pktdata(priv)) {
3126                 void *tx_head;
3127
3128                 netdev_dbg(priv->dev,
3129                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3130                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3131                            entry, first, nfrags);
3132
3133                 if (priv->extend_desc)
3134                         tx_head = (void *)tx_q->dma_etx;
3135                 else
3136                         tx_head = (void *)tx_q->dma_tx;
3137
3138                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3139
3140                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3141                 print_pkt(skb->data, skb->len);
3142         }
3143
3144         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3145                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3146                           __func__);
3147                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3148         }
3149
3150         dev->stats.tx_bytes += skb->len;
3151
3152         /* According to the coalesce parameter the IC bit for the latest
3153          * segment is reset and the timer re-started to clean the tx status.
3154          * This approach takes care about the fragments: desc is the first
3155          * element in case of no SG.
3156          */
3157         priv->tx_count_frames += nfrags + 1;
3158         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3159                 mod_timer(&priv->txtimer,
3160                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
3161         } else {
3162                 priv->tx_count_frames = 0;
3163                 priv->hw->desc->set_tx_ic(desc);
3164                 priv->xstats.tx_set_ic_bit++;
3165         }
3166
3167         skb_tx_timestamp(skb);
3168
3169         /* Ready to fill the first descriptor and set the OWN bit w/o any
3170          * problems because all the descriptors are actually ready to be
3171          * passed to the DMA engine.
3172          */
3173         if (likely(!is_jumbo)) {
3174                 bool last_segment = (nfrags == 0);
3175
3176                 des = dma_map_single(priv->device, skb->data,
3177                                      nopaged_len, DMA_TO_DEVICE);
3178                 if (dma_mapping_error(priv->device, des))
3179                         goto dma_map_err;
3180
3181                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3182                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3183                         first->des0 = cpu_to_le32(des);
3184                 else
3185                         first->des2 = cpu_to_le32(des);
3186
3187                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3188                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3189
3190                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3191                              priv->hwts_tx_en)) {
3192                         /* declare that device is doing timestamping */
3193                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3194                         priv->hw->desc->enable_tx_timestamp(first);
3195                 }
3196
3197                 /* Prepare the first descriptor setting the OWN bit too */
3198                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3199                                                 csum_insertion, priv->mode, 1,
3200                                                 last_segment, skb->len);
3201
3202                 /* The own bit must be the latest setting done when prepare the
3203                  * descriptor and then barrier is needed to make sure that
3204                  * all is coherent before granting the DMA engine.
3205                  */
3206                 dma_wmb();
3207         }
3208
3209         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3210
3211         if (priv->synopsys_id < DWMAC_CORE_4_00)
3212                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3213         else {
3214                 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3215                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3216                                                queue);
3217         }
3218
3219         return NETDEV_TX_OK;
3220
3221 dma_map_err:
3222         netdev_err(priv->dev, "Tx DMA map failed\n");
3223         dev_kfree_skb(skb);
3224         priv->dev->stats.tx_dropped++;
3225         return NETDEV_TX_OK;
3226 }
3227
3228 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3229 {
3230         struct ethhdr *ehdr;
3231         u16 vlanid;
3232
3233         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3234             NETIF_F_HW_VLAN_CTAG_RX &&
3235             !__vlan_get_tag(skb, &vlanid)) {
3236                 /* pop the vlan tag */
3237                 ehdr = (struct ethhdr *)skb->data;
3238                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3239                 skb_pull(skb, VLAN_HLEN);
3240                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3241         }
3242 }
3243
3244
3245 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3246 {
3247         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3248                 return 0;
3249
3250         return 1;
3251 }
3252
3253 /**
3254  * stmmac_rx_refill - refill used skb preallocated buffers
3255  * @priv: driver private structure
3256  * @queue: RX queue index
3257  * Description : this is to reallocate the skb for the reception process
3258  * that is based on zero-copy.
3259  */
3260 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3261 {
3262         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3263         int dirty = stmmac_rx_dirty(priv, queue);
3264         unsigned int entry = rx_q->dirty_rx;
3265
3266         int bfsize = priv->dma_buf_sz;
3267
3268         while (dirty-- > 0) {
3269                 struct dma_desc *p;
3270
3271                 if (priv->extend_desc)
3272                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3273                 else
3274                         p = rx_q->dma_rx + entry;
3275
3276                 if (likely(!rx_q->rx_skbuff[entry])) {
3277                         struct sk_buff *skb;
3278
3279                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3280                         if (unlikely(!skb)) {
3281                                 /* so for a while no zero-copy! */
3282                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3283                                 if (unlikely(net_ratelimit()))
3284                                         dev_err(priv->device,
3285                                                 "fail to alloc skb entry %d\n",
3286                                                 entry);
3287                                 break;
3288                         }
3289
3290                         rx_q->rx_skbuff[entry] = skb;
3291                         rx_q->rx_skbuff_dma[entry] =
3292                             dma_map_single(priv->device, skb->data, bfsize,
3293                                            DMA_FROM_DEVICE);
3294                         if (dma_mapping_error(priv->device,
3295                                               rx_q->rx_skbuff_dma[entry])) {
3296                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3297                                 dev_kfree_skb(skb);
3298                                 break;
3299                         }
3300
3301                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3302                                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3303                                 p->des1 = 0;
3304                         } else {
3305                                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3306                         }
3307                         if (priv->hw->mode->refill_desc3)
3308                                 priv->hw->mode->refill_desc3(rx_q, p);
3309
3310                         if (rx_q->rx_zeroc_thresh > 0)
3311                                 rx_q->rx_zeroc_thresh--;
3312
3313                         netif_dbg(priv, rx_status, priv->dev,
3314                                   "refill entry #%d\n", entry);
3315                 }
3316                 dma_wmb();
3317
3318                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3319                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0, priv->dma_buf_sz);
3320                 else
3321                         priv->hw->desc->set_rx_owner(p);
3322
3323                 dma_wmb();
3324
3325                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3326         }
3327         rx_q->dirty_rx = entry;
3328 }
3329
3330 /**
3331  * stmmac_rx - manage the receive process
3332  * @priv: driver private structure
3333  * @limit: napi bugget
3334  * @queue: RX queue index.
3335  * Description :  this the function called by the napi poll method.
3336  * It gets all the frames inside the ring.
3337  */
3338 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3339 {
3340         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3341         int coe = priv->hw->rx_csum;
3342         unsigned int next_entry = rx_q->cur_rx;
3343         unsigned int count = 0;
3344
3345         if (netif_msg_rx_status(priv)) {
3346                 void *rx_head;
3347
3348                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3349                 if (priv->extend_desc)
3350                         rx_head = (void *)rx_q->dma_erx;
3351                 else
3352                         rx_head = (void *)rx_q->dma_rx;
3353
3354                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3355         }
3356         while (count < limit) {
3357                 int entry, status;
3358                 struct dma_desc *p;
3359                 struct dma_desc *np;
3360
3361                 entry = next_entry;
3362
3363                 if (priv->extend_desc)
3364                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3365                 else
3366                         p = rx_q->dma_rx + entry;
3367
3368                 /* read the status of the incoming frame */
3369                 status = priv->hw->desc->rx_status(&priv->dev->stats,
3370                                                    &priv->xstats, p);
3371                 /* check if managed by the DMA otherwise go ahead */
3372                 if (unlikely(status & dma_own))
3373                         break;
3374
3375                 count++;
3376
3377                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3378                 next_entry = rx_q->cur_rx;
3379
3380                 if (priv->extend_desc)
3381                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3382                 else
3383                         np = rx_q->dma_rx + next_entry;
3384
3385                 prefetch(np);
3386
3387                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3388                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
3389                                                            &priv->xstats,
3390                                                            rx_q->dma_erx +
3391                                                            entry);
3392                 if (unlikely(status == discard_frame)) {
3393                         priv->dev->stats.rx_errors++;
3394                         if (priv->hwts_rx_en && !priv->extend_desc) {
3395                                 /* DESC2 & DESC3 will be overwritten by device
3396                                  * with timestamp value, hence reinitialize
3397                                  * them in stmmac_rx_refill() function so that
3398                                  * device can reuse it.
3399                                  */
3400                                 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3401                                 rx_q->rx_skbuff[entry] = NULL;
3402                                 dma_unmap_single(priv->device,
3403                                                  rx_q->rx_skbuff_dma[entry],
3404                                                  priv->dma_buf_sz,
3405                                                  DMA_FROM_DEVICE);
3406                         }
3407                 } else {
3408                         struct sk_buff *skb;
3409                         int frame_len;
3410                         unsigned int des;
3411
3412                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3413                                 des = le32_to_cpu(p->des0);
3414                         else
3415                                 des = le32_to_cpu(p->des2);
3416
3417                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3418
3419                         /*  If frame length is greater than skb buffer size
3420                          *  (preallocated during init) then the packet is
3421                          *  ignored
3422                          */
3423                         if (frame_len > priv->dma_buf_sz) {
3424                                 if (net_ratelimit())
3425                                         netdev_err(priv->dev,
3426                                                    "len %d larger than size (%d)\n",
3427                                                    frame_len, priv->dma_buf_sz);
3428                                 priv->dev->stats.rx_length_errors++;
3429                                 continue;
3430                         }
3431
3432                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3433                          * Type frames (LLC/LLC-SNAP)
3434                          *
3435                          * llc_snap is never checked in GMAC >= 4, so this ACS
3436                          * feature is always disabled and packets need to be
3437                          * stripped manually.
3438                          */
3439                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3440                             unlikely(status != llc_snap))
3441                                 frame_len -= ETH_FCS_LEN;
3442
3443                         if (netif_msg_rx_status(priv)) {
3444                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3445                                            p, entry, des);
3446                                 if (frame_len > ETH_FRAME_LEN)
3447                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3448                                                    frame_len, status);
3449                         }
3450
3451                         /* The zero-copy is always used for all the sizes
3452                          * in case of GMAC4 because it needs
3453                          * to refill the used descriptors, always.
3454                          */
3455                         if (unlikely(!priv->plat->has_gmac4 &&
3456                                      ((frame_len < priv->rx_copybreak) ||
3457                                      stmmac_rx_threshold_count(rx_q)))) {
3458                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3459                                                                 frame_len);
3460                                 if (unlikely(!skb)) {
3461                                         if (net_ratelimit())
3462                                                 dev_warn(priv->device,
3463                                                          "packet dropped\n");
3464                                         priv->dev->stats.rx_dropped++;
3465                                         continue;
3466                                 }
3467
3468                                 dma_sync_single_for_cpu(priv->device,
3469                                                         rx_q->rx_skbuff_dma
3470                                                         [entry], frame_len,
3471                                                         DMA_FROM_DEVICE);
3472                                 skb_copy_to_linear_data(skb,
3473                                                         rx_q->
3474                                                         rx_skbuff[entry]->data,
3475                                                         frame_len);
3476
3477                                 skb_put(skb, frame_len);
3478                                 dma_sync_single_for_device(priv->device,
3479                                                            rx_q->rx_skbuff_dma
3480                                                            [entry], frame_len,
3481                                                            DMA_FROM_DEVICE);
3482                         } else {
3483                                 skb = rx_q->rx_skbuff[entry];
3484                                 if (unlikely(!skb)) {
3485                                         if (net_ratelimit())
3486                                                 netdev_err(priv->dev,
3487                                                            "%s: Inconsistent Rx chain\n",
3488                                                            priv->dev->name);
3489                                         priv->dev->stats.rx_dropped++;
3490                                         continue;
3491                                 }
3492                                 prefetch(skb->data - NET_IP_ALIGN);
3493                                 rx_q->rx_skbuff[entry] = NULL;
3494                                 rx_q->rx_zeroc_thresh++;
3495
3496                                 skb_put(skb, frame_len);
3497                                 dma_unmap_single(priv->device,
3498                                                  rx_q->rx_skbuff_dma[entry],
3499                                                  priv->dma_buf_sz,
3500                                                  DMA_FROM_DEVICE);
3501                         }
3502
3503                         if (netif_msg_pktdata(priv)) {
3504                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3505                                            frame_len);
3506                                 print_pkt(skb->data, frame_len);
3507                         }
3508
3509                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3510
3511                         stmmac_rx_vlan(priv->dev, skb);
3512
3513                         skb->protocol = eth_type_trans(skb, priv->dev);
3514
3515                         if (unlikely(!coe))
3516                                 skb_checksum_none_assert(skb);
3517                         else
3518                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3519
3520                         napi_gro_receive(&rx_q->napi, skb);
3521
3522                         priv->dev->stats.rx_packets++;
3523                         priv->dev->stats.rx_bytes += frame_len;
3524                 }
3525         }
3526
3527         stmmac_rx_refill(priv, queue);
3528
3529         priv->xstats.rx_pkt_n += count;
3530
3531         return count;
3532 }
3533
3534 /**
3535  *  stmmac_poll - stmmac poll method (NAPI)
3536  *  @napi : pointer to the napi structure.
3537  *  @budget : maximum number of packets that the current CPU can receive from
3538  *            all interfaces.
3539  *  Description :
3540  *  To look at the incoming frames and clear the tx resources.
3541  */
3542 static int stmmac_poll(struct napi_struct *napi, int budget)
3543 {
3544         struct stmmac_rx_queue *rx_q =
3545                 container_of(napi, struct stmmac_rx_queue, napi);
3546         struct stmmac_priv *priv = rx_q->priv_data;
3547         u32 tx_count = priv->plat->tx_queues_to_use;
3548         u32 chan = rx_q->queue_index;
3549         int work_done = 0;
3550         u32 queue;
3551
3552         priv->xstats.napi_poll++;
3553
3554         /* check all the queues */
3555         for (queue = 0; queue < tx_count; queue++)
3556                 stmmac_tx_clean(priv, queue);
3557
3558         work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3559         if (work_done < budget) {
3560                 napi_complete_done(napi, work_done);
3561                 stmmac_enable_dma_irq(priv, chan);
3562         }
3563         return work_done;
3564 }
3565
3566 /**
3567  *  stmmac_tx_timeout
3568  *  @dev : Pointer to net device structure
3569  *  Description: this function is called when a packet transmission fails to
3570  *   complete within a reasonable time. The driver will mark the error in the
3571  *   netdev structure and arrange for the device to be reset to a sane state
3572  *   in order to transmit a new packet.
3573  */
3574 static void stmmac_tx_timeout(struct net_device *dev)
3575 {
3576         struct stmmac_priv *priv = netdev_priv(dev);
3577         u32 tx_count = priv->plat->tx_queues_to_use;
3578         u32 chan;
3579
3580         /* Clear Tx resources and restart transmitting again */
3581         for (chan = 0; chan < tx_count; chan++)
3582                 stmmac_tx_err(priv, chan);
3583 }
3584
3585 /**
3586  *  stmmac_set_rx_mode - entry point for multicast addressing
3587  *  @dev : pointer to the device structure
3588  *  Description:
3589  *  This function is a driver entry point which gets called by the kernel
3590  *  whenever multicast addresses must be enabled/disabled.
3591  *  Return value:
3592  *  void.
3593  */
3594 static void stmmac_set_rx_mode(struct net_device *dev)
3595 {
3596         struct stmmac_priv *priv = netdev_priv(dev);
3597
3598         priv->hw->mac->set_filter(priv->hw, dev);
3599 }
3600
3601 /**
3602  *  stmmac_change_mtu - entry point to change MTU size for the device.
3603  *  @dev : device pointer.
3604  *  @new_mtu : the new MTU size for the device.
3605  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3606  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3607  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3608  *  Return value:
3609  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3610  *  file on failure.
3611  */
3612 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3613 {
3614         struct stmmac_priv *priv = netdev_priv(dev);
3615         int txfifosz = priv->plat->tx_fifo_size;
3616         const int mtu = new_mtu;
3617
3618         if (txfifosz == 0)
3619                 txfifosz = priv->dma_cap.tx_fifo_size;
3620
3621         txfifosz /= priv->plat->tx_queues_to_use;
3622
3623         if (netif_running(dev)) {
3624                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3625                 return -EBUSY;
3626         }
3627
3628         new_mtu = STMMAC_ALIGN(new_mtu);
3629
3630         /* If condition true, FIFO is too small or MTU too large */
3631         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
3632                 return -EINVAL;
3633
3634         dev->mtu = mtu;
3635
3636         netdev_update_features(dev);
3637
3638         return 0;
3639 }
3640
3641 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3642                                              netdev_features_t features)
3643 {
3644         struct stmmac_priv *priv = netdev_priv(dev);
3645
3646         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3647                 features &= ~NETIF_F_RXCSUM;
3648
3649         if (!priv->plat->tx_coe)
3650                 features &= ~NETIF_F_CSUM_MASK;
3651
3652         /* Some GMAC devices have a bugged Jumbo frame support that
3653          * needs to have the Tx COE disabled for oversized frames
3654          * (due to limited buffer sizes). In this case we disable
3655          * the TX csum insertion in the TDES and not use SF.
3656          */
3657         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3658                 features &= ~NETIF_F_CSUM_MASK;
3659
3660         /* Disable tso if asked by ethtool */
3661         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3662                 if (features & NETIF_F_TSO)
3663                         priv->tso = true;
3664                 else
3665                         priv->tso = false;
3666         }
3667
3668         return features;
3669 }
3670
3671 static int stmmac_set_features(struct net_device *netdev,
3672                                netdev_features_t features)
3673 {
3674         struct stmmac_priv *priv = netdev_priv(netdev);
3675
3676         /* Keep the COE Type in case of csum is supporting */
3677         if (features & NETIF_F_RXCSUM)
3678                 priv->hw->rx_csum = priv->plat->rx_coe;
3679         else
3680                 priv->hw->rx_csum = 0;
3681         /* No check needed because rx_coe has been set before and it will be
3682          * fixed in case of issue.
3683          */
3684         priv->hw->mac->rx_ipc(priv->hw);
3685
3686         return 0;
3687 }
3688
3689 /**
3690  *  stmmac_interrupt - main ISR
3691  *  @irq: interrupt number.
3692  *  @dev_id: to pass the net device pointer.
3693  *  Description: this is the main driver interrupt service routine.
3694  *  It can call:
3695  *  o DMA service routine (to manage incoming frame reception and transmission
3696  *    status)
3697  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3698  *    interrupts.
3699  */
3700 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3701 {
3702         struct net_device *dev = (struct net_device *)dev_id;
3703         struct stmmac_priv *priv = netdev_priv(dev);
3704         u32 rx_cnt = priv->plat->rx_queues_to_use;
3705         u32 tx_cnt = priv->plat->tx_queues_to_use;
3706         u32 queues_count;
3707         u32 queue;
3708
3709         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3710
3711         if (priv->irq_wake)
3712                 pm_wakeup_event(priv->device, 0);
3713
3714         if (unlikely(!dev)) {
3715                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3716                 return IRQ_NONE;
3717         }
3718
3719         /* To handle GMAC own interrupts */
3720         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3721                 int status = priv->hw->mac->host_irq_status(priv->hw,
3722                                                             &priv->xstats);
3723
3724                 if (unlikely(status)) {
3725                         /* For LPI we need to save the tx status */
3726                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3727                                 priv->tx_path_in_lpi_mode = true;
3728                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3729                                 priv->tx_path_in_lpi_mode = false;
3730                 }
3731
3732                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3733                         for (queue = 0; queue < queues_count; queue++) {
3734                                 struct stmmac_rx_queue *rx_q =
3735                                 &priv->rx_queue[queue];
3736
3737                                 status |=
3738                                 priv->hw->mac->host_mtl_irq_status(priv->hw,
3739                                                                    queue);
3740
3741                                 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3742                                     priv->hw->dma->set_rx_tail_ptr)
3743                                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3744                                                                 rx_q->rx_tail_addr,
3745                                                                 queue);
3746                         }
3747                 }
3748
3749                 /* PCS link status */
3750                 if (priv->hw->pcs) {
3751                         if (priv->xstats.pcs_link)
3752                                 netif_carrier_on(dev);
3753                         else
3754                                 netif_carrier_off(dev);
3755                 }
3756         }
3757
3758         /* To handle DMA interrupts */
3759         stmmac_dma_interrupt(priv);
3760
3761         return IRQ_HANDLED;
3762 }
3763
3764 #ifdef CONFIG_NET_POLL_CONTROLLER
3765 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3766  * to allow network I/O with interrupts disabled.
3767  */
3768 static void stmmac_poll_controller(struct net_device *dev)
3769 {
3770         disable_irq(dev->irq);
3771         stmmac_interrupt(dev->irq, dev);
3772         enable_irq(dev->irq);
3773 }
3774 #endif
3775
3776 /**
3777  *  stmmac_ioctl - Entry point for the Ioctl
3778  *  @dev: Device pointer.
3779  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3780  *  a proprietary structure used to pass information to the driver.
3781  *  @cmd: IOCTL command
3782  *  Description:
3783  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3784  */
3785 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3786 {
3787         int ret = -EOPNOTSUPP;
3788
3789         if (!netif_running(dev))
3790                 return -EINVAL;
3791
3792         switch (cmd) {
3793         case SIOCGMIIPHY:
3794         case SIOCGMIIREG:
3795         case SIOCSMIIREG:
3796                 if (!dev->phydev)
3797                         return -EINVAL;
3798                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3799                 break;
3800         case SIOCSHWTSTAMP:
3801                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3802                 break;
3803         default:
3804                 break;
3805         }
3806
3807         return ret;
3808 }
3809
3810 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3811 {
3812         struct stmmac_priv *priv = netdev_priv(ndev);
3813         int ret = 0;
3814
3815         ret = eth_mac_addr(ndev, addr);
3816         if (ret)
3817                 return ret;
3818
3819         priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
3820
3821         return ret;
3822 }
3823
3824 #ifdef CONFIG_DEBUG_FS
3825 static struct dentry *stmmac_fs_dir;
3826
3827 static void sysfs_display_ring(void *head, int size, int extend_desc,
3828                                struct seq_file *seq)
3829 {
3830         int i;
3831         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3832         struct dma_desc *p = (struct dma_desc *)head;
3833
3834         for (i = 0; i < size; i++) {
3835                 if (extend_desc) {
3836                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3837                                    i, (unsigned int)virt_to_phys(ep),
3838                                    le32_to_cpu(ep->basic.des0),
3839                                    le32_to_cpu(ep->basic.des1),
3840                                    le32_to_cpu(ep->basic.des2),
3841                                    le32_to_cpu(ep->basic.des3));
3842                         ep++;
3843                 } else {
3844                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3845                                    i, (unsigned int)virt_to_phys(p),
3846                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3847                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3848                         p++;
3849                 }
3850                 seq_printf(seq, "\n");
3851         }
3852 }
3853
3854 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3855 {
3856         struct net_device *dev = seq->private;
3857         struct stmmac_priv *priv = netdev_priv(dev);
3858         u32 rx_count = priv->plat->rx_queues_to_use;
3859         u32 tx_count = priv->plat->tx_queues_to_use;
3860         u32 queue;
3861
3862         if ((dev->flags & IFF_UP) == 0)
3863                 return 0;
3864
3865         for (queue = 0; queue < rx_count; queue++) {
3866                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3867
3868                 seq_printf(seq, "RX Queue %d:\n", queue);
3869
3870                 if (priv->extend_desc) {
3871                         seq_printf(seq, "Extended descriptor ring:\n");
3872                         sysfs_display_ring((void *)rx_q->dma_erx,
3873                                            DMA_RX_SIZE, 1, seq);
3874                 } else {
3875                         seq_printf(seq, "Descriptor ring:\n");
3876                         sysfs_display_ring((void *)rx_q->dma_rx,
3877                                            DMA_RX_SIZE, 0, seq);
3878                 }
3879         }
3880
3881         for (queue = 0; queue < tx_count; queue++) {
3882                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3883
3884                 seq_printf(seq, "TX Queue %d:\n", queue);
3885
3886                 if (priv->extend_desc) {
3887                         seq_printf(seq, "Extended descriptor ring:\n");
3888                         sysfs_display_ring((void *)tx_q->dma_etx,
3889                                            DMA_TX_SIZE, 1, seq);
3890                 } else {
3891                         seq_printf(seq, "Descriptor ring:\n");
3892                         sysfs_display_ring((void *)tx_q->dma_tx,
3893                                            DMA_TX_SIZE, 0, seq);
3894                 }
3895         }
3896
3897         return 0;
3898 }
3899
3900 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3901 {
3902         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3903 }
3904
3905 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3906
3907 static const struct file_operations stmmac_rings_status_fops = {
3908         .owner = THIS_MODULE,
3909         .open = stmmac_sysfs_ring_open,
3910         .read = seq_read,
3911         .llseek = seq_lseek,
3912         .release = single_release,
3913 };
3914
3915 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3916 {
3917         struct net_device *dev = seq->private;
3918         struct stmmac_priv *priv = netdev_priv(dev);
3919
3920         if (!priv->hw_cap_support) {
3921                 seq_printf(seq, "DMA HW features not supported\n");
3922                 return 0;
3923         }
3924
3925         seq_printf(seq, "==============================\n");
3926         seq_printf(seq, "\tDMA HW features\n");
3927         seq_printf(seq, "==============================\n");
3928
3929         seq_printf(seq, "\t10/100 Mbps: %s\n",
3930                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3931         seq_printf(seq, "\t1000 Mbps: %s\n",
3932                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3933         seq_printf(seq, "\tHalf duplex: %s\n",
3934                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3935         seq_printf(seq, "\tHash Filter: %s\n",
3936                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3937         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3938                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3939         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3940                    (priv->dma_cap.pcs) ? "Y" : "N");
3941         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3942                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3943         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3944                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3945         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3946                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3947         seq_printf(seq, "\tRMON module: %s\n",
3948                    (priv->dma_cap.rmon) ? "Y" : "N");
3949         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3950                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3951         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3952                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3953         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3954                    (priv->dma_cap.eee) ? "Y" : "N");
3955         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3956         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3957                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3958         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3959                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3960                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3961         } else {
3962                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3963                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3964                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3965                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3966         }
3967         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3968                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3969         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3970                    priv->dma_cap.number_rx_channel);
3971         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3972                    priv->dma_cap.number_tx_channel);
3973         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3974                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3975
3976         return 0;
3977 }
3978
3979 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3980 {
3981         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3982 }
3983
3984 static const struct file_operations stmmac_dma_cap_fops = {
3985         .owner = THIS_MODULE,
3986         .open = stmmac_sysfs_dma_cap_open,
3987         .read = seq_read,
3988         .llseek = seq_lseek,
3989         .release = single_release,
3990 };
3991
3992 static int stmmac_init_fs(struct net_device *dev)
3993 {
3994         struct stmmac_priv *priv = netdev_priv(dev);
3995
3996         /* Create per netdev entries */
3997         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3998
3999         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4000                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4001
4002                 return -ENOMEM;
4003         }
4004
4005         /* Entry to report DMA RX/TX rings */
4006         priv->dbgfs_rings_status =
4007                 debugfs_create_file("descriptors_status", S_IRUGO,
4008                                     priv->dbgfs_dir, dev,
4009                                     &stmmac_rings_status_fops);
4010
4011         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4012                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4013                 debugfs_remove_recursive(priv->dbgfs_dir);
4014
4015                 return -ENOMEM;
4016         }
4017
4018         /* Entry to report the DMA HW features */
4019         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
4020                                             priv->dbgfs_dir,
4021                                             dev, &stmmac_dma_cap_fops);
4022
4023         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4024                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4025                 debugfs_remove_recursive(priv->dbgfs_dir);
4026
4027                 return -ENOMEM;
4028         }
4029
4030         return 0;
4031 }
4032
4033 static void stmmac_exit_fs(struct net_device *dev)
4034 {
4035         struct stmmac_priv *priv = netdev_priv(dev);
4036
4037         debugfs_remove_recursive(priv->dbgfs_dir);
4038 }
4039 #endif /* CONFIG_DEBUG_FS */
4040
4041 static const struct net_device_ops stmmac_netdev_ops = {
4042         .ndo_open = stmmac_open,
4043         .ndo_start_xmit = stmmac_xmit,
4044         .ndo_stop = stmmac_release,
4045         .ndo_change_mtu = stmmac_change_mtu,
4046         .ndo_fix_features = stmmac_fix_features,
4047         .ndo_set_features = stmmac_set_features,
4048         .ndo_set_rx_mode = stmmac_set_rx_mode,
4049         .ndo_tx_timeout = stmmac_tx_timeout,
4050         .ndo_do_ioctl = stmmac_ioctl,
4051 #ifdef CONFIG_NET_POLL_CONTROLLER
4052         .ndo_poll_controller = stmmac_poll_controller,
4053 #endif
4054         .ndo_set_mac_address = stmmac_set_mac_address,
4055 };
4056
4057 /**
4058  *  stmmac_hw_init - Init the MAC device
4059  *  @priv: driver private structure
4060  *  Description: this function is to configure the MAC device according to
4061  *  some platform parameters or the HW capability register. It prepares the
4062  *  driver to use either ring or chain modes and to setup either enhanced or
4063  *  normal descriptors.
4064  */
4065 static int stmmac_hw_init(struct stmmac_priv *priv)
4066 {
4067         struct mac_device_info *mac;
4068
4069         /* Identify the MAC HW device */
4070         if (priv->plat->setup) {
4071                 mac = priv->plat->setup(priv);
4072         } else if (priv->plat->has_gmac) {
4073                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
4074                 mac = dwmac1000_setup(priv->ioaddr,
4075                                       priv->plat->multicast_filter_bins,
4076                                       priv->plat->unicast_filter_entries,
4077                                       &priv->synopsys_id);
4078         } else if (priv->plat->has_gmac4) {
4079                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
4080                 mac = dwmac4_setup(priv->ioaddr,
4081                                    priv->plat->multicast_filter_bins,
4082                                    priv->plat->unicast_filter_entries,
4083                                    &priv->synopsys_id);
4084         } else {
4085                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
4086         }
4087         if (!mac)
4088                 return -ENOMEM;
4089
4090         priv->hw = mac;
4091
4092         /* dwmac-sun8i only work in chain mode */
4093         if (priv->plat->has_sun8i)
4094                 chain_mode = 1;
4095
4096         /* To use the chained or ring mode */
4097         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4098                 priv->hw->mode = &dwmac4_ring_mode_ops;
4099         } else {
4100                 if (chain_mode) {
4101                         priv->hw->mode = &chain_mode_ops;
4102                         dev_info(priv->device, "Chain mode enabled\n");
4103                         priv->mode = STMMAC_CHAIN_MODE;
4104                 } else {
4105                         priv->hw->mode = &ring_mode_ops;
4106                         dev_info(priv->device, "Ring mode enabled\n");
4107                         priv->mode = STMMAC_RING_MODE;
4108                 }
4109         }
4110
4111         /* Get the HW capability (new GMAC newer than 3.50a) */
4112         priv->hw_cap_support = stmmac_get_hw_features(priv);
4113         if (priv->hw_cap_support) {
4114                 dev_info(priv->device, "DMA HW capability register supported\n");
4115
4116                 /* We can override some gmac/dma configuration fields: e.g.
4117                  * enh_desc, tx_coe (e.g. that are passed through the
4118                  * platform) with the values from the HW capability
4119                  * register (if supported).
4120                  */
4121                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4122                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4123                 priv->hw->pmt = priv->plat->pmt;
4124
4125                 /* TXCOE doesn't work in thresh DMA mode */
4126                 if (priv->plat->force_thresh_dma_mode)
4127                         priv->plat->tx_coe = 0;
4128                 else
4129                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4130
4131                 /* In case of GMAC4 rx_coe is from HW cap register. */
4132                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4133
4134                 if (priv->dma_cap.rx_coe_type2)
4135                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4136                 else if (priv->dma_cap.rx_coe_type1)
4137                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4138
4139         } else {
4140                 dev_info(priv->device, "No HW DMA feature register supported\n");
4141         }
4142
4143         /* To use alternate (extended), normal or GMAC4 descriptor structures */
4144         if (priv->synopsys_id >= DWMAC_CORE_4_00)
4145                 priv->hw->desc = &dwmac4_desc_ops;
4146         else
4147                 stmmac_selec_desc_mode(priv);
4148
4149         if (priv->plat->rx_coe) {
4150                 priv->hw->rx_csum = priv->plat->rx_coe;
4151                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4152                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4153                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4154         }
4155         if (priv->plat->tx_coe)
4156                 dev_info(priv->device, "TX Checksum insertion supported\n");
4157
4158         if (priv->plat->pmt) {
4159                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4160                 device_set_wakeup_capable(priv->device, 1);
4161         }
4162
4163         if (priv->dma_cap.tsoen)
4164                 dev_info(priv->device, "TSO supported\n");
4165
4166         return 0;
4167 }
4168
4169 /**
4170  * stmmac_dvr_probe
4171  * @device: device pointer
4172  * @plat_dat: platform data pointer
4173  * @res: stmmac resource pointer
4174  * Description: this is the main probe function used to
4175  * call the alloc_etherdev, allocate the priv structure.
4176  * Return:
4177  * returns 0 on success, otherwise errno.
4178  */
4179 int stmmac_dvr_probe(struct device *device,
4180                      struct plat_stmmacenet_data *plat_dat,
4181                      struct stmmac_resources *res)
4182 {
4183         struct net_device *ndev = NULL;
4184         struct stmmac_priv *priv;
4185         int ret = 0;
4186         u32 queue;
4187
4188         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4189                                   MTL_MAX_TX_QUEUES,
4190                                   MTL_MAX_RX_QUEUES);
4191         if (!ndev)
4192                 return -ENOMEM;
4193
4194         SET_NETDEV_DEV(ndev, device);
4195
4196         priv = netdev_priv(ndev);
4197         priv->device = device;
4198         priv->dev = ndev;
4199
4200         stmmac_set_ethtool_ops(ndev);
4201         priv->pause = pause;
4202         priv->plat = plat_dat;
4203         priv->ioaddr = res->addr;
4204         priv->dev->base_addr = (unsigned long)res->addr;
4205
4206         priv->dev->irq = res->irq;
4207         priv->wol_irq = res->wol_irq;
4208         priv->lpi_irq = res->lpi_irq;
4209
4210         if (res->mac)
4211                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4212
4213         dev_set_drvdata(device, priv->dev);
4214
4215         /* Verify driver arguments */
4216         stmmac_verify_args();
4217
4218         /* Override with kernel parameters if supplied XXX CRS XXX
4219          * this needs to have multiple instances
4220          */
4221         if ((phyaddr >= 0) && (phyaddr <= 31))
4222                 priv->plat->phy_addr = phyaddr;
4223
4224         if (priv->plat->stmmac_rst) {
4225                 ret = reset_control_assert(priv->plat->stmmac_rst);
4226                 reset_control_deassert(priv->plat->stmmac_rst);
4227                 /* Some reset controllers have only reset callback instead of
4228                  * assert + deassert callbacks pair.
4229                  */
4230                 if (ret == -ENOTSUPP)
4231                         reset_control_reset(priv->plat->stmmac_rst);
4232         }
4233
4234         /* Init MAC and get the capabilities */
4235         ret = stmmac_hw_init(priv);
4236         if (ret)
4237                 goto error_hw_init;
4238
4239         stmmac_check_ether_addr(priv);
4240
4241         /* Configure real RX and TX queues */
4242         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4243         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4244
4245         ndev->netdev_ops = &stmmac_netdev_ops;
4246
4247         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4248                             NETIF_F_RXCSUM;
4249
4250         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4251                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4252                 priv->tso = true;
4253                 dev_info(priv->device, "TSO feature enabled\n");
4254         }
4255         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4256         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4257 #ifdef STMMAC_VLAN_TAG_USED
4258         /* Both mac100 and gmac support receive VLAN tag detection */
4259         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4260 #endif
4261         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4262
4263         /* MTU range: 46 - hw-specific max */
4264         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4265         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4266                 ndev->max_mtu = JUMBO_LEN;
4267         else
4268                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4269         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4270          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4271          */
4272         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4273             (priv->plat->maxmtu >= ndev->min_mtu))
4274                 ndev->max_mtu = priv->plat->maxmtu;
4275         else if (priv->plat->maxmtu < ndev->min_mtu)
4276                 dev_warn(priv->device,
4277                          "%s: warning: maxmtu having invalid value (%d)\n",
4278                          __func__, priv->plat->maxmtu);
4279
4280         if (flow_ctrl)
4281                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4282
4283         /* Rx Watchdog is available in the COREs newer than the 3.40.
4284          * In some case, for example on bugged HW this feature
4285          * has to be disable and this can be done by passing the
4286          * riwt_off field from the platform.
4287          */
4288         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4289                 priv->use_riwt = 1;
4290                 dev_info(priv->device,
4291                          "Enable RX Mitigation via HW Watchdog Timer\n");
4292         }
4293
4294         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4295                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4296
4297                 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4298                                (8 * priv->plat->rx_queues_to_use));
4299         }
4300
4301         mutex_init(&priv->lock);
4302
4303         /* If a specific clk_csr value is passed from the platform
4304          * this means that the CSR Clock Range selection cannot be
4305          * changed at run-time and it is fixed. Viceversa the driver'll try to
4306          * set the MDC clock dynamically according to the csr actual
4307          * clock input.
4308          */
4309         if (!priv->plat->clk_csr)
4310                 stmmac_clk_csr_set(priv);
4311         else
4312                 priv->clk_csr = priv->plat->clk_csr;
4313
4314         stmmac_check_pcs_mode(priv);
4315
4316         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4317             priv->hw->pcs != STMMAC_PCS_TBI &&
4318             priv->hw->pcs != STMMAC_PCS_RTBI) {
4319                 /* MDIO bus Registration */
4320                 ret = stmmac_mdio_register(ndev);
4321                 if (ret < 0) {
4322                         dev_err(priv->device,
4323                                 "%s: MDIO bus (id: %d) registration failed",
4324                                 __func__, priv->plat->bus_id);
4325                         goto error_mdio_register;
4326                 }
4327         }
4328
4329         ret = register_netdev(ndev);
4330         if (ret) {
4331                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4332                         __func__, ret);
4333                 goto error_netdev_register;
4334         }
4335
4336 #ifdef CONFIG_DEBUG_FS
4337         ret = stmmac_init_fs(ndev);
4338         if (ret < 0)
4339                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4340                             __func__);
4341 #endif
4342
4343         return ret;
4344
4345 error_netdev_register:
4346         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4347             priv->hw->pcs != STMMAC_PCS_TBI &&
4348             priv->hw->pcs != STMMAC_PCS_RTBI)
4349                 stmmac_mdio_unregister(ndev);
4350 error_mdio_register:
4351         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4352                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4353
4354                 netif_napi_del(&rx_q->napi);
4355         }
4356 error_hw_init:
4357         free_netdev(ndev);
4358
4359         return ret;
4360 }
4361 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4362
4363 /**
4364  * stmmac_dvr_remove
4365  * @dev: device pointer
4366  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4367  * changes the link status, releases the DMA descriptor rings.
4368  */
4369 int stmmac_dvr_remove(struct device *dev)
4370 {
4371         struct net_device *ndev = dev_get_drvdata(dev);
4372         struct stmmac_priv *priv = netdev_priv(ndev);
4373
4374         netdev_info(priv->dev, "%s: removing driver", __func__);
4375
4376 #ifdef CONFIG_DEBUG_FS
4377         stmmac_exit_fs(ndev);
4378 #endif
4379         stmmac_stop_all_dma(priv);
4380
4381         priv->hw->mac->set_mac(priv->ioaddr, false);
4382         netif_carrier_off(ndev);
4383         unregister_netdev(ndev);
4384         if (priv->plat->stmmac_rst)
4385                 reset_control_assert(priv->plat->stmmac_rst);
4386         clk_disable_unprepare(priv->plat->pclk);
4387         clk_disable_unprepare(priv->plat->stmmac_clk);
4388         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4389             priv->hw->pcs != STMMAC_PCS_TBI &&
4390             priv->hw->pcs != STMMAC_PCS_RTBI)
4391                 stmmac_mdio_unregister(ndev);
4392         mutex_destroy(&priv->lock);
4393         free_netdev(ndev);
4394
4395         return 0;
4396 }
4397 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4398
4399 /**
4400  * stmmac_suspend - suspend callback
4401  * @dev: device pointer
4402  * Description: this is the function to suspend the device and it is called
4403  * by the platform driver to stop the network queue, release the resources,
4404  * program the PMT register (for WoL), clean and release driver resources.
4405  */
4406 int stmmac_suspend(struct device *dev)
4407 {
4408         struct net_device *ndev = dev_get_drvdata(dev);
4409         struct stmmac_priv *priv = netdev_priv(ndev);
4410
4411         if (!ndev || !netif_running(ndev))
4412                 return 0;
4413
4414         if (ndev->phydev)
4415                 phy_stop(ndev->phydev);
4416
4417         mutex_lock(&priv->lock);
4418
4419         netif_device_detach(ndev);
4420         stmmac_stop_all_queues(priv);
4421
4422         stmmac_disable_all_queues(priv);
4423
4424         if (priv->eee_enabled) {
4425                 priv->tx_path_in_lpi_mode = false;
4426                 del_timer_sync(&priv->eee_ctrl_timer);
4427         }
4428
4429         /* Stop TX/RX DMA */
4430         stmmac_stop_all_dma(priv);
4431
4432         /* Enable Power down mode by programming the PMT regs */
4433         if (device_may_wakeup(priv->device)) {
4434                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4435                 priv->irq_wake = 1;
4436         } else {
4437                 priv->hw->mac->set_mac(priv->ioaddr, false);
4438                 pinctrl_pm_select_sleep_state(priv->device);
4439                 /* Disable clock in case of PWM is off */
4440                 if (priv->plat->clk_ptp_ref)
4441                         clk_disable_unprepare(priv->plat->clk_ptp_ref);
4442                 clk_disable_unprepare(priv->plat->pclk);
4443                 clk_disable_unprepare(priv->plat->stmmac_clk);
4444         }
4445         mutex_unlock(&priv->lock);
4446
4447         priv->oldlink = false;
4448         priv->speed = SPEED_UNKNOWN;
4449         priv->oldduplex = DUPLEX_UNKNOWN;
4450         return 0;
4451 }
4452 EXPORT_SYMBOL_GPL(stmmac_suspend);
4453
4454 /**
4455  * stmmac_reset_queues_param - reset queue parameters
4456  * @dev: device pointer
4457  */
4458 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4459 {
4460         u32 rx_cnt = priv->plat->rx_queues_to_use;
4461         u32 tx_cnt = priv->plat->tx_queues_to_use;
4462         u32 queue;
4463
4464         for (queue = 0; queue < rx_cnt; queue++) {
4465                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4466
4467                 rx_q->cur_rx = 0;
4468                 rx_q->dirty_rx = 0;
4469         }
4470
4471         for (queue = 0; queue < tx_cnt; queue++) {
4472                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4473
4474                 tx_q->cur_tx = 0;
4475                 tx_q->dirty_tx = 0;
4476         }
4477 }
4478
4479 /**
4480  * stmmac_resume - resume callback
4481  * @dev: device pointer
4482  * Description: when resume this function is invoked to setup the DMA and CORE
4483  * in a usable state.
4484  */
4485 int stmmac_resume(struct device *dev)
4486 {
4487         struct net_device *ndev = dev_get_drvdata(dev);
4488         struct stmmac_priv *priv = netdev_priv(ndev);
4489
4490         if (!netif_running(ndev))
4491                 return 0;
4492
4493         /* Power Down bit, into the PM register, is cleared
4494          * automatically as soon as a magic packet or a Wake-up frame
4495          * is received. Anyway, it's better to manually clear
4496          * this bit because it can generate problems while resuming
4497          * from another devices (e.g. serial console).
4498          */
4499         if (device_may_wakeup(priv->device)) {
4500                 mutex_lock(&priv->lock);
4501                 priv->hw->mac->pmt(priv->hw, 0);
4502                 mutex_unlock(&priv->lock);
4503                 priv->irq_wake = 0;
4504         } else {
4505                 pinctrl_pm_select_default_state(priv->device);
4506                 /* enable the clk previously disabled */
4507                 clk_prepare_enable(priv->plat->stmmac_clk);
4508                 clk_prepare_enable(priv->plat->pclk);
4509                 if (priv->plat->clk_ptp_ref)
4510                         clk_prepare_enable(priv->plat->clk_ptp_ref);
4511                 /* reset the phy so that it's ready */
4512                 if (priv->mii)
4513                         stmmac_mdio_reset(priv->mii);
4514         }
4515
4516         netif_device_attach(ndev);
4517
4518         mutex_lock(&priv->lock);
4519
4520         stmmac_reset_queues_param(priv);
4521
4522         /* reset private mss value to force mss context settings at
4523          * next tso xmit (only used for gmac4).
4524          */
4525         priv->mss = 0;
4526
4527         stmmac_free_tx_skbufs(priv);
4528         stmmac_clear_descriptors(priv);
4529
4530         stmmac_hw_setup(ndev, false);
4531         stmmac_init_tx_coalesce(priv);
4532         stmmac_set_rx_mode(ndev);
4533
4534         stmmac_enable_all_queues(priv);
4535
4536         stmmac_start_all_queues(priv);
4537
4538         mutex_unlock(&priv->lock);
4539
4540         if (ndev->phydev)
4541                 phy_start(ndev->phydev);
4542
4543         return 0;
4544 }
4545 EXPORT_SYMBOL_GPL(stmmac_resume);
4546
4547 #ifndef MODULE
4548 static int __init stmmac_cmdline_opt(char *str)
4549 {
4550         char *opt;
4551
4552         if (!str || !*str)
4553                 return -EINVAL;
4554         while ((opt = strsep(&str, ",")) != NULL) {
4555                 if (!strncmp(opt, "debug:", 6)) {
4556                         if (kstrtoint(opt + 6, 0, &debug))
4557                                 goto err;
4558                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4559                         if (kstrtoint(opt + 8, 0, &phyaddr))
4560                                 goto err;
4561                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4562                         if (kstrtoint(opt + 7, 0, &buf_sz))
4563                                 goto err;
4564                 } else if (!strncmp(opt, "tc:", 3)) {
4565                         if (kstrtoint(opt + 3, 0, &tc))
4566                                 goto err;
4567                 } else if (!strncmp(opt, "watchdog:", 9)) {
4568                         if (kstrtoint(opt + 9, 0, &watchdog))
4569                                 goto err;
4570                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4571                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4572                                 goto err;
4573                 } else if (!strncmp(opt, "pause:", 6)) {
4574                         if (kstrtoint(opt + 6, 0, &pause))
4575                                 goto err;
4576                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4577                         if (kstrtoint(opt + 10, 0, &eee_timer))
4578                                 goto err;
4579                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4580                         if (kstrtoint(opt + 11, 0, &chain_mode))
4581                                 goto err;
4582                 }
4583         }
4584         return 0;
4585
4586 err:
4587         pr_err("%s: ERROR broken module parameter conversion", __func__);
4588         return -EINVAL;
4589 }
4590
4591 __setup("stmmaceth=", stmmac_cmdline_opt);
4592 #endif /* MODULE */
4593
4594 static int __init stmmac_init(void)
4595 {
4596 #ifdef CONFIG_DEBUG_FS
4597         /* Create debugfs main directory if it doesn't exist yet */
4598         if (!stmmac_fs_dir) {
4599                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4600
4601                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4602                         pr_err("ERROR %s, debugfs create directory failed\n",
4603                                STMMAC_RESOURCE_NAME);
4604
4605                         return -ENOMEM;
4606                 }
4607         }
4608 #endif
4609
4610         return 0;
4611 }
4612
4613 static void __exit stmmac_exit(void)
4614 {
4615 #ifdef CONFIG_DEBUG_FS
4616         debugfs_remove_recursive(stmmac_fs_dir);
4617 #endif
4618 }
4619
4620 module_init(stmmac_init)
4621 module_exit(stmmac_exit)
4622
4623 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4624 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4625 MODULE_LICENSE("GPL");