GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "dwxgmac2.h"
55 #include "hwif.h"
56
57 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
58 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
59
60 /* Module parameters */
61 #define TX_TIMEO        5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73
74 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
75 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
76
77 static int flow_ctrl = FLOW_OFF;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89
90 #define DEFAULT_BUFSIZE 1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94
95 #define STMMAC_RX_COPYBREAK     256
96
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
99                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100
101 #define STMMAC_DEFAULT_LPI_TIMER        1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115
116 #ifdef CONFIG_DEBUG_FS
117 static int stmmac_init_fs(struct net_device *dev);
118 static void stmmac_exit_fs(struct net_device *dev);
119 #endif
120
121 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122
123 /**
124  * stmmac_verify_args - verify the driver parameters.
125  * Description: it checks the driver parameters and set a default in case of
126  * errors.
127  */
128 static void stmmac_verify_args(void)
129 {
130         if (unlikely(watchdog < 0))
131                 watchdog = TX_TIMEO;
132         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133                 buf_sz = DEFAULT_BUFSIZE;
134         if (unlikely(flow_ctrl > 1))
135                 flow_ctrl = FLOW_AUTO;
136         else if (likely(flow_ctrl < 0))
137                 flow_ctrl = FLOW_OFF;
138         if (unlikely((pause < 0) || (pause > 0xffff)))
139                 pause = PAUSE_TIME;
140         if (eee_timer < 0)
141                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
142 }
143
144 /**
145  * stmmac_disable_all_queues - Disable all queues
146  * @priv: driver private structure
147  */
148 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149 {
150         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
152         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
153         u32 queue;
154
155         for (queue = 0; queue < maxq; queue++) {
156                 struct stmmac_channel *ch = &priv->channel[queue];
157
158                 napi_disable(&ch->napi);
159         }
160 }
161
162 /**
163  * stmmac_enable_all_queues - Enable all queues
164  * @priv: driver private structure
165  */
166 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
167 {
168         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
169         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
170         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
171         u32 queue;
172
173         for (queue = 0; queue < maxq; queue++) {
174                 struct stmmac_channel *ch = &priv->channel[queue];
175
176                 napi_enable(&ch->napi);
177         }
178 }
179
180 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
181 {
182         if (!test_bit(STMMAC_DOWN, &priv->state) &&
183             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
184                 queue_work(priv->wq, &priv->service_task);
185 }
186
187 static void stmmac_global_err(struct stmmac_priv *priv)
188 {
189         netif_carrier_off(priv->dev);
190         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
191         stmmac_service_event_schedule(priv);
192 }
193
194 /**
195  * stmmac_clk_csr_set - dynamically set the MDC clock
196  * @priv: driver private structure
197  * Description: this is to dynamically set the MDC clock according to the csr
198  * clock input.
199  * Note:
200  *      If a specific clk_csr value is passed from the platform
201  *      this means that the CSR Clock Range selection cannot be
202  *      changed at run-time and it is fixed (as reported in the driver
203  *      documentation). Viceversa the driver will try to set the MDC
204  *      clock dynamically according to the actual clock input.
205  */
206 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
207 {
208         u32 clk_rate;
209
210         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
211
212         /* Platform provided default clk_csr would be assumed valid
213          * for all other cases except for the below mentioned ones.
214          * For values higher than the IEEE 802.3 specified frequency
215          * we can not estimate the proper divider as it is not known
216          * the frequency of clk_csr_i. So we do not change the default
217          * divider.
218          */
219         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
220                 if (clk_rate < CSR_F_35M)
221                         priv->clk_csr = STMMAC_CSR_20_35M;
222                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
223                         priv->clk_csr = STMMAC_CSR_35_60M;
224                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
225                         priv->clk_csr = STMMAC_CSR_60_100M;
226                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
227                         priv->clk_csr = STMMAC_CSR_100_150M;
228                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
229                         priv->clk_csr = STMMAC_CSR_150_250M;
230                 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
231                         priv->clk_csr = STMMAC_CSR_250_300M;
232         }
233
234         if (priv->plat->has_sun8i) {
235                 if (clk_rate > 160000000)
236                         priv->clk_csr = 0x03;
237                 else if (clk_rate > 80000000)
238                         priv->clk_csr = 0x02;
239                 else if (clk_rate > 40000000)
240                         priv->clk_csr = 0x01;
241                 else
242                         priv->clk_csr = 0;
243         }
244
245         if (priv->plat->has_xgmac) {
246                 if (clk_rate > 400000000)
247                         priv->clk_csr = 0x5;
248                 else if (clk_rate > 350000000)
249                         priv->clk_csr = 0x4;
250                 else if (clk_rate > 300000000)
251                         priv->clk_csr = 0x3;
252                 else if (clk_rate > 250000000)
253                         priv->clk_csr = 0x2;
254                 else if (clk_rate > 150000000)
255                         priv->clk_csr = 0x1;
256                 else
257                         priv->clk_csr = 0x0;
258         }
259 }
260
261 static void print_pkt(unsigned char *buf, int len)
262 {
263         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
264         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
265 }
266
267 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
268 {
269         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
270         u32 avail;
271
272         if (tx_q->dirty_tx > tx_q->cur_tx)
273                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
274         else
275                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
276
277         return avail;
278 }
279
280 /**
281  * stmmac_rx_dirty - Get RX queue dirty
282  * @priv: driver private structure
283  * @queue: RX queue index
284  */
285 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
286 {
287         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
288         u32 dirty;
289
290         if (rx_q->dirty_rx <= rx_q->cur_rx)
291                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
292         else
293                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
294
295         return dirty;
296 }
297
298 /**
299  * stmmac_hw_fix_mac_speed - callback for speed selection
300  * @priv: driver private structure
301  * Description: on some platforms (e.g. ST), some HW system configuration
302  * registers have to be set according to the link speed negotiated.
303  */
304 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
305 {
306         struct net_device *ndev = priv->dev;
307         struct phy_device *phydev = ndev->phydev;
308
309         if (likely(priv->plat->fix_mac_speed))
310                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
311 }
312
313 /**
314  * stmmac_enable_eee_mode - check and enter in LPI mode
315  * @priv: driver private structure
316  * Description: this function is to verify and enter in LPI mode in case of
317  * EEE.
318  */
319 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
320 {
321         u32 tx_cnt = priv->plat->tx_queues_to_use;
322         u32 queue;
323
324         /* check if all TX queues have the work finished */
325         for (queue = 0; queue < tx_cnt; queue++) {
326                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
327
328                 if (tx_q->dirty_tx != tx_q->cur_tx)
329                         return; /* still unfinished work */
330         }
331
332         /* Check and enter in LPI mode */
333         if (!priv->tx_path_in_lpi_mode)
334                 stmmac_set_eee_mode(priv, priv->hw,
335                                 priv->plat->en_tx_lpi_clockgating);
336 }
337
338 /**
339  * stmmac_disable_eee_mode - disable and exit from LPI mode
340  * @priv: driver private structure
341  * Description: this function is to exit and disable EEE in case of
342  * LPI state is true. This is called by the xmit.
343  */
344 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
345 {
346         stmmac_reset_eee_mode(priv, priv->hw);
347         del_timer_sync(&priv->eee_ctrl_timer);
348         priv->tx_path_in_lpi_mode = false;
349 }
350
351 /**
352  * stmmac_eee_ctrl_timer - EEE TX SW timer.
353  * @arg : data hook
354  * Description:
355  *  if there is no data transfer and if we are not in LPI state,
356  *  then MAC Transmitter can be moved to LPI state.
357  */
358 static void stmmac_eee_ctrl_timer(struct timer_list *t)
359 {
360         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
361
362         stmmac_enable_eee_mode(priv);
363         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
364 }
365
366 /**
367  * stmmac_eee_init - init EEE
368  * @priv: driver private structure
369  * Description:
370  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
371  *  can also manage EEE, this function enable the LPI state and start related
372  *  timer.
373  */
374 bool stmmac_eee_init(struct stmmac_priv *priv)
375 {
376         struct net_device *ndev = priv->dev;
377         int interface = priv->plat->interface;
378         bool ret = false;
379
380         if ((interface != PHY_INTERFACE_MODE_MII) &&
381             (interface != PHY_INTERFACE_MODE_GMII) &&
382             !phy_interface_mode_is_rgmii(interface))
383                 goto out;
384
385         /* Using PCS we cannot dial with the phy registers at this stage
386          * so we do not support extra feature like EEE.
387          */
388         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
389             (priv->hw->pcs == STMMAC_PCS_TBI) ||
390             (priv->hw->pcs == STMMAC_PCS_RTBI))
391                 goto out;
392
393         /* MAC core supports the EEE feature. */
394         if (priv->dma_cap.eee) {
395                 int tx_lpi_timer = priv->tx_lpi_timer;
396
397                 /* Check if the PHY supports EEE */
398                 if (phy_init_eee(ndev->phydev, 1)) {
399                         /* To manage at run-time if the EEE cannot be supported
400                          * anymore (for example because the lp caps have been
401                          * changed).
402                          * In that case the driver disable own timers.
403                          */
404                         mutex_lock(&priv->lock);
405                         if (priv->eee_active) {
406                                 netdev_dbg(priv->dev, "disable EEE\n");
407                                 del_timer_sync(&priv->eee_ctrl_timer);
408                                 stmmac_set_eee_timer(priv, priv->hw, 0,
409                                                 tx_lpi_timer);
410                         }
411                         priv->eee_active = 0;
412                         mutex_unlock(&priv->lock);
413                         goto out;
414                 }
415                 /* Activate the EEE and start timers */
416                 mutex_lock(&priv->lock);
417                 if (!priv->eee_active) {
418                         priv->eee_active = 1;
419                         timer_setup(&priv->eee_ctrl_timer,
420                                     stmmac_eee_ctrl_timer, 0);
421                         mod_timer(&priv->eee_ctrl_timer,
422                                   STMMAC_LPI_T(eee_timer));
423
424                         stmmac_set_eee_timer(priv, priv->hw,
425                                         STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
426                 }
427                 /* Set HW EEE according to the speed */
428                 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
429
430                 ret = true;
431                 mutex_unlock(&priv->lock);
432
433                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
434         }
435 out:
436         return ret;
437 }
438
439 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
440  * @priv: driver private structure
441  * @p : descriptor pointer
442  * @skb : the socket buffer
443  * Description :
444  * This function will read timestamp from the descriptor & pass it to stack.
445  * and also perform some sanity checks.
446  */
447 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
448                                    struct dma_desc *p, struct sk_buff *skb)
449 {
450         struct skb_shared_hwtstamps shhwtstamp;
451         u64 ns = 0;
452
453         if (!priv->hwts_tx_en)
454                 return;
455
456         /* exit if skb doesn't support hw tstamp */
457         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
458                 return;
459
460         /* check tx tstamp status */
461         if (stmmac_get_tx_timestamp_status(priv, p)) {
462                 /* get the valid tstamp */
463                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
464
465                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
466                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
467
468                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
469                 /* pass tstamp to stack */
470                 skb_tstamp_tx(skb, &shhwtstamp);
471         }
472
473         return;
474 }
475
476 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
477  * @priv: driver private structure
478  * @p : descriptor pointer
479  * @np : next descriptor pointer
480  * @skb : the socket buffer
481  * Description :
482  * This function will read received packet's timestamp from the descriptor
483  * and pass it to stack. It also perform some sanity checks.
484  */
485 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
486                                    struct dma_desc *np, struct sk_buff *skb)
487 {
488         struct skb_shared_hwtstamps *shhwtstamp = NULL;
489         struct dma_desc *desc = p;
490         u64 ns = 0;
491
492         if (!priv->hwts_rx_en)
493                 return;
494         /* For GMAC4, the valid timestamp is from CTX next desc. */
495         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
496                 desc = np;
497
498         /* Check if timestamp is available */
499         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
500                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
501                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
502                 shhwtstamp = skb_hwtstamps(skb);
503                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
504                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
505         } else  {
506                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
507         }
508 }
509
510 /**
511  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
512  *  @dev: device pointer.
513  *  @ifr: An IOCTL specific structure, that can contain a pointer to
514  *  a proprietary structure used to pass information to the driver.
515  *  Description:
516  *  This function configures the MAC to enable/disable both outgoing(TX)
517  *  and incoming(RX) packets time stamping based on user input.
518  *  Return Value:
519  *  0 on success and an appropriate -ve integer on failure.
520  */
521 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
522 {
523         struct stmmac_priv *priv = netdev_priv(dev);
524         struct hwtstamp_config config;
525         struct timespec64 now;
526         u64 temp = 0;
527         u32 ptp_v2 = 0;
528         u32 tstamp_all = 0;
529         u32 ptp_over_ipv4_udp = 0;
530         u32 ptp_over_ipv6_udp = 0;
531         u32 ptp_over_ethernet = 0;
532         u32 snap_type_sel = 0;
533         u32 ts_master_en = 0;
534         u32 ts_event_en = 0;
535         u32 sec_inc = 0;
536         u32 value = 0;
537         bool xmac;
538
539         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
540
541         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
542                 netdev_alert(priv->dev, "No support for HW time stamping\n");
543                 priv->hwts_tx_en = 0;
544                 priv->hwts_rx_en = 0;
545
546                 return -EOPNOTSUPP;
547         }
548
549         if (copy_from_user(&config, ifr->ifr_data,
550                            sizeof(struct hwtstamp_config)))
551                 return -EFAULT;
552
553         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
554                    __func__, config.flags, config.tx_type, config.rx_filter);
555
556         /* reserved for future extensions */
557         if (config.flags)
558                 return -EINVAL;
559
560         if (config.tx_type != HWTSTAMP_TX_OFF &&
561             config.tx_type != HWTSTAMP_TX_ON)
562                 return -ERANGE;
563
564         if (priv->adv_ts) {
565                 switch (config.rx_filter) {
566                 case HWTSTAMP_FILTER_NONE:
567                         /* time stamp no incoming packet at all */
568                         config.rx_filter = HWTSTAMP_FILTER_NONE;
569                         break;
570
571                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
572                         /* PTP v1, UDP, any kind of event packet */
573                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
574                         /* take time stamp for all event messages */
575                         if (xmac)
576                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
577                         else
578                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
579
580                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
581                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
582                         break;
583
584                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
585                         /* PTP v1, UDP, Sync packet */
586                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
587                         /* take time stamp for SYNC messages only */
588                         ts_event_en = PTP_TCR_TSEVNTENA;
589
590                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
591                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
592                         break;
593
594                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
595                         /* PTP v1, UDP, Delay_req packet */
596                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
597                         /* take time stamp for Delay_Req messages only */
598                         ts_master_en = PTP_TCR_TSMSTRENA;
599                         ts_event_en = PTP_TCR_TSEVNTENA;
600
601                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
602                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
603                         break;
604
605                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
606                         /* PTP v2, UDP, any kind of event packet */
607                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
608                         ptp_v2 = PTP_TCR_TSVER2ENA;
609                         /* take time stamp for all event messages */
610                         if (xmac)
611                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
612                         else
613                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
614
615                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
616                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
617                         break;
618
619                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
620                         /* PTP v2, UDP, Sync packet */
621                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
622                         ptp_v2 = PTP_TCR_TSVER2ENA;
623                         /* take time stamp for SYNC messages only */
624                         ts_event_en = PTP_TCR_TSEVNTENA;
625
626                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
627                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
628                         break;
629
630                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
631                         /* PTP v2, UDP, Delay_req packet */
632                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
633                         ptp_v2 = PTP_TCR_TSVER2ENA;
634                         /* take time stamp for Delay_Req messages only */
635                         ts_master_en = PTP_TCR_TSMSTRENA;
636                         ts_event_en = PTP_TCR_TSEVNTENA;
637
638                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
639                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
640                         break;
641
642                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
643                         /* PTP v2/802.AS1 any layer, any kind of event packet */
644                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
645                         ptp_v2 = PTP_TCR_TSVER2ENA;
646                         /* take time stamp for all event messages */
647                         if (xmac)
648                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
649                         else
650                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
651
652                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
653                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
654                         ptp_over_ethernet = PTP_TCR_TSIPENA;
655                         break;
656
657                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
658                         /* PTP v2/802.AS1, any layer, Sync packet */
659                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
660                         ptp_v2 = PTP_TCR_TSVER2ENA;
661                         /* take time stamp for SYNC messages only */
662                         ts_event_en = PTP_TCR_TSEVNTENA;
663
664                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
665                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
666                         ptp_over_ethernet = PTP_TCR_TSIPENA;
667                         break;
668
669                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
670                         /* PTP v2/802.AS1, any layer, Delay_req packet */
671                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
672                         ptp_v2 = PTP_TCR_TSVER2ENA;
673                         /* take time stamp for Delay_Req messages only */
674                         ts_master_en = PTP_TCR_TSMSTRENA;
675                         ts_event_en = PTP_TCR_TSEVNTENA;
676
677                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679                         ptp_over_ethernet = PTP_TCR_TSIPENA;
680                         break;
681
682                 case HWTSTAMP_FILTER_NTP_ALL:
683                 case HWTSTAMP_FILTER_ALL:
684                         /* time stamp any incoming packet */
685                         config.rx_filter = HWTSTAMP_FILTER_ALL;
686                         tstamp_all = PTP_TCR_TSENALL;
687                         break;
688
689                 default:
690                         return -ERANGE;
691                 }
692         } else {
693                 switch (config.rx_filter) {
694                 case HWTSTAMP_FILTER_NONE:
695                         config.rx_filter = HWTSTAMP_FILTER_NONE;
696                         break;
697                 default:
698                         /* PTP v1, UDP, any kind of event packet */
699                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
700                         break;
701                 }
702         }
703         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
704         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
705
706         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
707                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
708         else {
709                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
710                          tstamp_all | ptp_v2 | ptp_over_ethernet |
711                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
712                          ts_master_en | snap_type_sel);
713                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
714
715                 /* program Sub Second Increment reg */
716                 stmmac_config_sub_second_increment(priv,
717                                 priv->ptpaddr, priv->plat->clk_ptp_rate,
718                                 xmac, &sec_inc);
719                 temp = div_u64(1000000000ULL, sec_inc);
720
721                 /* Store sub second increment and flags for later use */
722                 priv->sub_second_inc = sec_inc;
723                 priv->systime_flags = value;
724
725                 /* calculate default added value:
726                  * formula is :
727                  * addend = (2^32)/freq_div_ratio;
728                  * where, freq_div_ratio = 1e9ns/sec_inc
729                  */
730                 temp = (u64)(temp << 32);
731                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
732                 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
733
734                 /* initialize system time */
735                 ktime_get_real_ts64(&now);
736
737                 /* lower 32 bits of tv_sec are safe until y2106 */
738                 stmmac_init_systime(priv, priv->ptpaddr,
739                                 (u32)now.tv_sec, now.tv_nsec);
740         }
741
742         return copy_to_user(ifr->ifr_data, &config,
743                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
744 }
745
746 /**
747  * stmmac_init_ptp - init PTP
748  * @priv: driver private structure
749  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
750  * This is done by looking at the HW cap. register.
751  * This function also registers the ptp driver.
752  */
753 static int stmmac_init_ptp(struct stmmac_priv *priv)
754 {
755         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
756
757         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
758                 return -EOPNOTSUPP;
759
760         priv->adv_ts = 0;
761         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
762         if (xmac && priv->dma_cap.atime_stamp)
763                 priv->adv_ts = 1;
764         /* Dwmac 3.x core with extend_desc can support adv_ts */
765         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
766                 priv->adv_ts = 1;
767
768         if (priv->dma_cap.time_stamp)
769                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
770
771         if (priv->adv_ts)
772                 netdev_info(priv->dev,
773                             "IEEE 1588-2008 Advanced Timestamp supported\n");
774
775         priv->hwts_tx_en = 0;
776         priv->hwts_rx_en = 0;
777
778         stmmac_ptp_register(priv);
779
780         return 0;
781 }
782
783 static void stmmac_release_ptp(struct stmmac_priv *priv)
784 {
785         if (priv->plat->clk_ptp_ref)
786                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
787         stmmac_ptp_unregister(priv);
788 }
789
790 /**
791  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
792  *  @priv: driver private structure
793  *  Description: It is used for configuring the flow control in all queues
794  */
795 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
796 {
797         u32 tx_cnt = priv->plat->tx_queues_to_use;
798
799         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
800                         priv->pause, tx_cnt);
801 }
802
803 /**
804  * stmmac_adjust_link - adjusts the link parameters
805  * @dev: net device structure
806  * Description: this is the helper called by the physical abstraction layer
807  * drivers to communicate the phy link status. According the speed and duplex
808  * this driver can invoke registered glue-logic as well.
809  * It also invoke the eee initialization because it could happen when switch
810  * on different networks (that are eee capable).
811  */
812 static void stmmac_adjust_link(struct net_device *dev)
813 {
814         struct stmmac_priv *priv = netdev_priv(dev);
815         struct phy_device *phydev = dev->phydev;
816         bool new_state = false;
817
818         if (!phydev)
819                 return;
820
821         mutex_lock(&priv->lock);
822
823         if (phydev->link) {
824                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
825
826                 /* Now we make sure that we can be in full duplex mode.
827                  * If not, we operate in half-duplex mode. */
828                 if (phydev->duplex != priv->oldduplex) {
829                         new_state = true;
830                         if (!phydev->duplex)
831                                 ctrl &= ~priv->hw->link.duplex;
832                         else
833                                 ctrl |= priv->hw->link.duplex;
834                         priv->oldduplex = phydev->duplex;
835                 }
836                 /* Flow Control operation */
837                 if (phydev->pause)
838                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
839
840                 if (phydev->speed != priv->speed) {
841                         new_state = true;
842                         ctrl &= ~priv->hw->link.speed_mask;
843                         switch (phydev->speed) {
844                         case SPEED_1000:
845                                 ctrl |= priv->hw->link.speed1000;
846                                 break;
847                         case SPEED_100:
848                                 ctrl |= priv->hw->link.speed100;
849                                 break;
850                         case SPEED_10:
851                                 ctrl |= priv->hw->link.speed10;
852                                 break;
853                         default:
854                                 netif_warn(priv, link, priv->dev,
855                                            "broken speed: %d\n", phydev->speed);
856                                 phydev->speed = SPEED_UNKNOWN;
857                                 break;
858                         }
859                         if (phydev->speed != SPEED_UNKNOWN)
860                                 stmmac_hw_fix_mac_speed(priv);
861                         priv->speed = phydev->speed;
862                 }
863
864                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
865
866                 if (!priv->oldlink) {
867                         new_state = true;
868                         priv->oldlink = true;
869                 }
870         } else if (priv->oldlink) {
871                 new_state = true;
872                 priv->oldlink = false;
873                 priv->speed = SPEED_UNKNOWN;
874                 priv->oldduplex = DUPLEX_UNKNOWN;
875         }
876
877         if (new_state && netif_msg_link(priv))
878                 phy_print_status(phydev);
879
880         mutex_unlock(&priv->lock);
881
882         if (phydev->is_pseudo_fixed_link)
883                 /* Stop PHY layer to call the hook to adjust the link in case
884                  * of a switch is attached to the stmmac driver.
885                  */
886                 phydev->irq = PHY_IGNORE_INTERRUPT;
887         else
888                 /* At this stage, init the EEE if supported.
889                  * Never called in case of fixed_link.
890                  */
891                 priv->eee_enabled = stmmac_eee_init(priv);
892 }
893
894 /**
895  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
896  * @priv: driver private structure
897  * Description: this is to verify if the HW supports the PCS.
898  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
899  * configured for the TBI, RTBI, or SGMII PHY interface.
900  */
901 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
902 {
903         int interface = priv->plat->interface;
904
905         if (priv->dma_cap.pcs) {
906                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
907                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
908                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
909                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
910                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
911                         priv->hw->pcs = STMMAC_PCS_RGMII;
912                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
913                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
914                         priv->hw->pcs = STMMAC_PCS_SGMII;
915                 }
916         }
917 }
918
919 /**
920  * stmmac_init_phy - PHY initialization
921  * @dev: net device structure
922  * Description: it initializes the driver's PHY state, and attaches the PHY
923  * to the mac driver.
924  *  Return value:
925  *  0 on success
926  */
927 static int stmmac_init_phy(struct net_device *dev)
928 {
929         struct stmmac_priv *priv = netdev_priv(dev);
930         u32 tx_cnt = priv->plat->tx_queues_to_use;
931         struct phy_device *phydev;
932         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
933         char bus_id[MII_BUS_ID_SIZE];
934         int interface = priv->plat->interface;
935         int max_speed = priv->plat->max_speed;
936         priv->oldlink = false;
937         priv->speed = SPEED_UNKNOWN;
938         priv->oldduplex = DUPLEX_UNKNOWN;
939
940         if (priv->plat->phy_node) {
941                 phydev = of_phy_connect(dev, priv->plat->phy_node,
942                                         &stmmac_adjust_link, 0, interface);
943         } else {
944                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
945                          priv->plat->bus_id);
946
947                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
948                          priv->plat->phy_addr);
949                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
950                            phy_id_fmt);
951
952                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
953                                      interface);
954         }
955
956         if (IS_ERR_OR_NULL(phydev)) {
957                 netdev_err(priv->dev, "Could not attach to PHY\n");
958                 if (!phydev)
959                         return -ENODEV;
960
961                 return PTR_ERR(phydev);
962         }
963
964         /* Stop Advertising 1000BASE Capability if interface is not GMII */
965         if ((interface == PHY_INTERFACE_MODE_MII) ||
966             (interface == PHY_INTERFACE_MODE_RMII) ||
967                 (max_speed < 1000 && max_speed > 0))
968                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
969                                          SUPPORTED_1000baseT_Full);
970
971         /*
972          * Half-duplex mode not supported with multiqueue
973          * half-duplex can only works with single queue
974          */
975         if (tx_cnt > 1)
976                 phydev->supported &= ~(SUPPORTED_1000baseT_Half |
977                                        SUPPORTED_100baseT_Half |
978                                        SUPPORTED_10baseT_Half);
979
980         /*
981          * Broken HW is sometimes missing the pull-up resistor on the
982          * MDIO line, which results in reads to non-existent devices returning
983          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
984          * device as well.
985          * Note: phydev->phy_id is the result of reading the UID PHY registers.
986          */
987         if (!priv->plat->phy_node && phydev->phy_id == 0) {
988                 phy_disconnect(phydev);
989                 return -ENODEV;
990         }
991
992         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
993          * subsequent PHY polling, make sure we force a link transition if
994          * we have a UP/DOWN/UP transition
995          */
996         if (phydev->is_pseudo_fixed_link)
997                 phydev->irq = PHY_POLL;
998
999         phy_attached_info(phydev);
1000         return 0;
1001 }
1002
1003 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1004 {
1005         u32 rx_cnt = priv->plat->rx_queues_to_use;
1006         void *head_rx;
1007         u32 queue;
1008
1009         /* Display RX rings */
1010         for (queue = 0; queue < rx_cnt; queue++) {
1011                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1012
1013                 pr_info("\tRX Queue %u rings\n", queue);
1014
1015                 if (priv->extend_desc)
1016                         head_rx = (void *)rx_q->dma_erx;
1017                 else
1018                         head_rx = (void *)rx_q->dma_rx;
1019
1020                 /* Display RX ring */
1021                 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1022         }
1023 }
1024
1025 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1026 {
1027         u32 tx_cnt = priv->plat->tx_queues_to_use;
1028         void *head_tx;
1029         u32 queue;
1030
1031         /* Display TX rings */
1032         for (queue = 0; queue < tx_cnt; queue++) {
1033                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1034
1035                 pr_info("\tTX Queue %d rings\n", queue);
1036
1037                 if (priv->extend_desc)
1038                         head_tx = (void *)tx_q->dma_etx;
1039                 else
1040                         head_tx = (void *)tx_q->dma_tx;
1041
1042                 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1043         }
1044 }
1045
1046 static void stmmac_display_rings(struct stmmac_priv *priv)
1047 {
1048         /* Display RX ring */
1049         stmmac_display_rx_rings(priv);
1050
1051         /* Display TX ring */
1052         stmmac_display_tx_rings(priv);
1053 }
1054
1055 static int stmmac_set_bfsize(int mtu, int bufsize)
1056 {
1057         int ret = bufsize;
1058
1059         if (mtu >= BUF_SIZE_8KiB)
1060                 ret = BUF_SIZE_16KiB;
1061         else if (mtu >= BUF_SIZE_4KiB)
1062                 ret = BUF_SIZE_8KiB;
1063         else if (mtu >= BUF_SIZE_2KiB)
1064                 ret = BUF_SIZE_4KiB;
1065         else if (mtu > DEFAULT_BUFSIZE)
1066                 ret = BUF_SIZE_2KiB;
1067         else
1068                 ret = DEFAULT_BUFSIZE;
1069
1070         return ret;
1071 }
1072
1073 /**
1074  * stmmac_clear_rx_descriptors - clear RX descriptors
1075  * @priv: driver private structure
1076  * @queue: RX queue index
1077  * Description: this function is called to clear the RX descriptors
1078  * in case of both basic and extended descriptors are used.
1079  */
1080 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1081 {
1082         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1083         int i;
1084
1085         /* Clear the RX descriptors */
1086         for (i = 0; i < DMA_RX_SIZE; i++)
1087                 if (priv->extend_desc)
1088                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1089                                         priv->use_riwt, priv->mode,
1090                                         (i == DMA_RX_SIZE - 1),
1091                                         priv->dma_buf_sz);
1092                 else
1093                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1094                                         priv->use_riwt, priv->mode,
1095                                         (i == DMA_RX_SIZE - 1),
1096                                         priv->dma_buf_sz);
1097 }
1098
1099 /**
1100  * stmmac_clear_tx_descriptors - clear tx descriptors
1101  * @priv: driver private structure
1102  * @queue: TX queue index.
1103  * Description: this function is called to clear the TX descriptors
1104  * in case of both basic and extended descriptors are used.
1105  */
1106 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1107 {
1108         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1109         int i;
1110
1111         /* Clear the TX descriptors */
1112         for (i = 0; i < DMA_TX_SIZE; i++)
1113                 if (priv->extend_desc)
1114                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1115                                         priv->mode, (i == DMA_TX_SIZE - 1));
1116                 else
1117                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1118                                         priv->mode, (i == DMA_TX_SIZE - 1));
1119 }
1120
1121 /**
1122  * stmmac_clear_descriptors - clear descriptors
1123  * @priv: driver private structure
1124  * Description: this function is called to clear the TX and RX descriptors
1125  * in case of both basic and extended descriptors are used.
1126  */
1127 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1128 {
1129         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1130         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1131         u32 queue;
1132
1133         /* Clear the RX descriptors */
1134         for (queue = 0; queue < rx_queue_cnt; queue++)
1135                 stmmac_clear_rx_descriptors(priv, queue);
1136
1137         /* Clear the TX descriptors */
1138         for (queue = 0; queue < tx_queue_cnt; queue++)
1139                 stmmac_clear_tx_descriptors(priv, queue);
1140 }
1141
1142 /**
1143  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1144  * @priv: driver private structure
1145  * @p: descriptor pointer
1146  * @i: descriptor index
1147  * @flags: gfp flag
1148  * @queue: RX queue index
1149  * Description: this function is called to allocate a receive buffer, perform
1150  * the DMA mapping and init the descriptor.
1151  */
1152 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1153                                   int i, gfp_t flags, u32 queue)
1154 {
1155         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1156         struct sk_buff *skb;
1157
1158         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1159         if (!skb) {
1160                 netdev_err(priv->dev,
1161                            "%s: Rx init fails; skb is NULL\n", __func__);
1162                 return -ENOMEM;
1163         }
1164         rx_q->rx_skbuff[i] = skb;
1165         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1166                                                 priv->dma_buf_sz,
1167                                                 DMA_FROM_DEVICE);
1168         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1169                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1170                 dev_kfree_skb_any(skb);
1171                 return -EINVAL;
1172         }
1173
1174         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1175
1176         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1177                 stmmac_init_desc3(priv, p);
1178
1179         return 0;
1180 }
1181
1182 /**
1183  * stmmac_free_rx_buffer - free RX dma buffers
1184  * @priv: private structure
1185  * @queue: RX queue index
1186  * @i: buffer index.
1187  */
1188 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1189 {
1190         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1191
1192         if (rx_q->rx_skbuff[i]) {
1193                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1194                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1195                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1196         }
1197         rx_q->rx_skbuff[i] = NULL;
1198 }
1199
1200 /**
1201  * stmmac_free_tx_buffer - free RX dma buffers
1202  * @priv: private structure
1203  * @queue: RX queue index
1204  * @i: buffer index.
1205  */
1206 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1207 {
1208         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1209
1210         if (tx_q->tx_skbuff_dma[i].buf) {
1211                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1212                         dma_unmap_page(priv->device,
1213                                        tx_q->tx_skbuff_dma[i].buf,
1214                                        tx_q->tx_skbuff_dma[i].len,
1215                                        DMA_TO_DEVICE);
1216                 else
1217                         dma_unmap_single(priv->device,
1218                                          tx_q->tx_skbuff_dma[i].buf,
1219                                          tx_q->tx_skbuff_dma[i].len,
1220                                          DMA_TO_DEVICE);
1221         }
1222
1223         if (tx_q->tx_skbuff[i]) {
1224                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1225                 tx_q->tx_skbuff[i] = NULL;
1226                 tx_q->tx_skbuff_dma[i].buf = 0;
1227                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1228         }
1229 }
1230
1231 /**
1232  * init_dma_rx_desc_rings - init the RX descriptor rings
1233  * @dev: net device structure
1234  * @flags: gfp flag.
1235  * Description: this function initializes the DMA RX descriptors
1236  * and allocates the socket buffers. It supports the chained and ring
1237  * modes.
1238  */
1239 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1240 {
1241         struct stmmac_priv *priv = netdev_priv(dev);
1242         u32 rx_count = priv->plat->rx_queues_to_use;
1243         int ret = -ENOMEM;
1244         int bfsize = 0;
1245         int queue;
1246         int i;
1247
1248         bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1249         if (bfsize < 0)
1250                 bfsize = 0;
1251
1252         if (bfsize < BUF_SIZE_16KiB)
1253                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1254
1255         priv->dma_buf_sz = bfsize;
1256
1257         /* RX INITIALIZATION */
1258         netif_dbg(priv, probe, priv->dev,
1259                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1260
1261         for (queue = 0; queue < rx_count; queue++) {
1262                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1263
1264                 netif_dbg(priv, probe, priv->dev,
1265                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1266                           (u32)rx_q->dma_rx_phy);
1267
1268                 for (i = 0; i < DMA_RX_SIZE; i++) {
1269                         struct dma_desc *p;
1270
1271                         if (priv->extend_desc)
1272                                 p = &((rx_q->dma_erx + i)->basic);
1273                         else
1274                                 p = rx_q->dma_rx + i;
1275
1276                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1277                                                      queue);
1278                         if (ret)
1279                                 goto err_init_rx_buffers;
1280
1281                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1282                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1283                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1284                 }
1285
1286                 rx_q->cur_rx = 0;
1287                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1288
1289                 stmmac_clear_rx_descriptors(priv, queue);
1290
1291                 /* Setup the chained descriptor addresses */
1292                 if (priv->mode == STMMAC_CHAIN_MODE) {
1293                         if (priv->extend_desc)
1294                                 stmmac_mode_init(priv, rx_q->dma_erx,
1295                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1296                         else
1297                                 stmmac_mode_init(priv, rx_q->dma_rx,
1298                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1299                 }
1300         }
1301
1302         buf_sz = bfsize;
1303
1304         return 0;
1305
1306 err_init_rx_buffers:
1307         while (queue >= 0) {
1308                 while (--i >= 0)
1309                         stmmac_free_rx_buffer(priv, queue, i);
1310
1311                 if (queue == 0)
1312                         break;
1313
1314                 i = DMA_RX_SIZE;
1315                 queue--;
1316         }
1317
1318         return ret;
1319 }
1320
1321 /**
1322  * init_dma_tx_desc_rings - init the TX descriptor rings
1323  * @dev: net device structure.
1324  * Description: this function initializes the DMA TX descriptors
1325  * and allocates the socket buffers. It supports the chained and ring
1326  * modes.
1327  */
1328 static int init_dma_tx_desc_rings(struct net_device *dev)
1329 {
1330         struct stmmac_priv *priv = netdev_priv(dev);
1331         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1332         u32 queue;
1333         int i;
1334
1335         for (queue = 0; queue < tx_queue_cnt; queue++) {
1336                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1337
1338                 netif_dbg(priv, probe, priv->dev,
1339                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1340                          (u32)tx_q->dma_tx_phy);
1341
1342                 /* Setup the chained descriptor addresses */
1343                 if (priv->mode == STMMAC_CHAIN_MODE) {
1344                         if (priv->extend_desc)
1345                                 stmmac_mode_init(priv, tx_q->dma_etx,
1346                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1347                         else
1348                                 stmmac_mode_init(priv, tx_q->dma_tx,
1349                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1350                 }
1351
1352                 for (i = 0; i < DMA_TX_SIZE; i++) {
1353                         struct dma_desc *p;
1354                         if (priv->extend_desc)
1355                                 p = &((tx_q->dma_etx + i)->basic);
1356                         else
1357                                 p = tx_q->dma_tx + i;
1358
1359                         stmmac_clear_desc(priv, p);
1360
1361                         tx_q->tx_skbuff_dma[i].buf = 0;
1362                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1363                         tx_q->tx_skbuff_dma[i].len = 0;
1364                         tx_q->tx_skbuff_dma[i].last_segment = false;
1365                         tx_q->tx_skbuff[i] = NULL;
1366                 }
1367
1368                 tx_q->dirty_tx = 0;
1369                 tx_q->cur_tx = 0;
1370                 tx_q->mss = 0;
1371
1372                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1373         }
1374
1375         return 0;
1376 }
1377
1378 /**
1379  * init_dma_desc_rings - init the RX/TX descriptor rings
1380  * @dev: net device structure
1381  * @flags: gfp flag.
1382  * Description: this function initializes the DMA RX/TX descriptors
1383  * and allocates the socket buffers. It supports the chained and ring
1384  * modes.
1385  */
1386 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1387 {
1388         struct stmmac_priv *priv = netdev_priv(dev);
1389         int ret;
1390
1391         ret = init_dma_rx_desc_rings(dev, flags);
1392         if (ret)
1393                 return ret;
1394
1395         ret = init_dma_tx_desc_rings(dev);
1396
1397         stmmac_clear_descriptors(priv);
1398
1399         if (netif_msg_hw(priv))
1400                 stmmac_display_rings(priv);
1401
1402         return ret;
1403 }
1404
1405 /**
1406  * dma_free_rx_skbufs - free RX dma buffers
1407  * @priv: private structure
1408  * @queue: RX queue index
1409  */
1410 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1411 {
1412         int i;
1413
1414         for (i = 0; i < DMA_RX_SIZE; i++)
1415                 stmmac_free_rx_buffer(priv, queue, i);
1416 }
1417
1418 /**
1419  * dma_free_tx_skbufs - free TX dma buffers
1420  * @priv: private structure
1421  * @queue: TX queue index
1422  */
1423 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1424 {
1425         int i;
1426
1427         for (i = 0; i < DMA_TX_SIZE; i++)
1428                 stmmac_free_tx_buffer(priv, queue, i);
1429 }
1430
1431 /**
1432  * stmmac_free_tx_skbufs - free TX skb buffers
1433  * @priv: private structure
1434  */
1435 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1436 {
1437         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1438         u32 queue;
1439
1440         for (queue = 0; queue < tx_queue_cnt; queue++)
1441                 dma_free_tx_skbufs(priv, queue);
1442 }
1443
1444 /**
1445  * free_dma_rx_desc_resources - free RX dma desc resources
1446  * @priv: private structure
1447  */
1448 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1449 {
1450         u32 rx_count = priv->plat->rx_queues_to_use;
1451         u32 queue;
1452
1453         /* Free RX queue resources */
1454         for (queue = 0; queue < rx_count; queue++) {
1455                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1456
1457                 /* Release the DMA RX socket buffers */
1458                 dma_free_rx_skbufs(priv, queue);
1459
1460                 /* Free DMA regions of consistent memory previously allocated */
1461                 if (!priv->extend_desc)
1462                         dma_free_coherent(priv->device,
1463                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1464                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1465                 else
1466                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1467                                           sizeof(struct dma_extended_desc),
1468                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1469
1470                 kfree(rx_q->rx_skbuff_dma);
1471                 kfree(rx_q->rx_skbuff);
1472         }
1473 }
1474
1475 /**
1476  * free_dma_tx_desc_resources - free TX dma desc resources
1477  * @priv: private structure
1478  */
1479 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1480 {
1481         u32 tx_count = priv->plat->tx_queues_to_use;
1482         u32 queue;
1483
1484         /* Free TX queue resources */
1485         for (queue = 0; queue < tx_count; queue++) {
1486                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1487
1488                 /* Release the DMA TX socket buffers */
1489                 dma_free_tx_skbufs(priv, queue);
1490
1491                 /* Free DMA regions of consistent memory previously allocated */
1492                 if (!priv->extend_desc)
1493                         dma_free_coherent(priv->device,
1494                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1495                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1496                 else
1497                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1498                                           sizeof(struct dma_extended_desc),
1499                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1500
1501                 kfree(tx_q->tx_skbuff_dma);
1502                 kfree(tx_q->tx_skbuff);
1503         }
1504 }
1505
1506 /**
1507  * alloc_dma_rx_desc_resources - alloc RX resources.
1508  * @priv: private structure
1509  * Description: according to which descriptor can be used (extend or basic)
1510  * this function allocates the resources for TX and RX paths. In case of
1511  * reception, for example, it pre-allocated the RX socket buffer in order to
1512  * allow zero-copy mechanism.
1513  */
1514 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1515 {
1516         u32 rx_count = priv->plat->rx_queues_to_use;
1517         int ret = -ENOMEM;
1518         u32 queue;
1519
1520         /* RX queues buffers and DMA */
1521         for (queue = 0; queue < rx_count; queue++) {
1522                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1523
1524                 rx_q->queue_index = queue;
1525                 rx_q->priv_data = priv;
1526
1527                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1528                                                     sizeof(dma_addr_t),
1529                                                     GFP_KERNEL);
1530                 if (!rx_q->rx_skbuff_dma)
1531                         goto err_dma;
1532
1533                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1534                                                 sizeof(struct sk_buff *),
1535                                                 GFP_KERNEL);
1536                 if (!rx_q->rx_skbuff)
1537                         goto err_dma;
1538
1539                 if (priv->extend_desc) {
1540                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1541                                                             DMA_RX_SIZE *
1542                                                             sizeof(struct
1543                                                             dma_extended_desc),
1544                                                             &rx_q->dma_rx_phy,
1545                                                             GFP_KERNEL);
1546                         if (!rx_q->dma_erx)
1547                                 goto err_dma;
1548
1549                 } else {
1550                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1551                                                            DMA_RX_SIZE *
1552                                                            sizeof(struct
1553                                                            dma_desc),
1554                                                            &rx_q->dma_rx_phy,
1555                                                            GFP_KERNEL);
1556                         if (!rx_q->dma_rx)
1557                                 goto err_dma;
1558                 }
1559         }
1560
1561         return 0;
1562
1563 err_dma:
1564         free_dma_rx_desc_resources(priv);
1565
1566         return ret;
1567 }
1568
1569 /**
1570  * alloc_dma_tx_desc_resources - alloc TX resources.
1571  * @priv: private structure
1572  * Description: according to which descriptor can be used (extend or basic)
1573  * this function allocates the resources for TX and RX paths. In case of
1574  * reception, for example, it pre-allocated the RX socket buffer in order to
1575  * allow zero-copy mechanism.
1576  */
1577 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1578 {
1579         u32 tx_count = priv->plat->tx_queues_to_use;
1580         int ret = -ENOMEM;
1581         u32 queue;
1582
1583         /* TX queues buffers and DMA */
1584         for (queue = 0; queue < tx_count; queue++) {
1585                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1586
1587                 tx_q->queue_index = queue;
1588                 tx_q->priv_data = priv;
1589
1590                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1591                                                     sizeof(*tx_q->tx_skbuff_dma),
1592                                                     GFP_KERNEL);
1593                 if (!tx_q->tx_skbuff_dma)
1594                         goto err_dma;
1595
1596                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1597                                                 sizeof(struct sk_buff *),
1598                                                 GFP_KERNEL);
1599                 if (!tx_q->tx_skbuff)
1600                         goto err_dma;
1601
1602                 if (priv->extend_desc) {
1603                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1604                                                             DMA_TX_SIZE *
1605                                                             sizeof(struct
1606                                                             dma_extended_desc),
1607                                                             &tx_q->dma_tx_phy,
1608                                                             GFP_KERNEL);
1609                         if (!tx_q->dma_etx)
1610                                 goto err_dma;
1611                 } else {
1612                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1613                                                            DMA_TX_SIZE *
1614                                                            sizeof(struct
1615                                                                   dma_desc),
1616                                                            &tx_q->dma_tx_phy,
1617                                                            GFP_KERNEL);
1618                         if (!tx_q->dma_tx)
1619                                 goto err_dma;
1620                 }
1621         }
1622
1623         return 0;
1624
1625 err_dma:
1626         free_dma_tx_desc_resources(priv);
1627
1628         return ret;
1629 }
1630
1631 /**
1632  * alloc_dma_desc_resources - alloc TX/RX resources.
1633  * @priv: private structure
1634  * Description: according to which descriptor can be used (extend or basic)
1635  * this function allocates the resources for TX and RX paths. In case of
1636  * reception, for example, it pre-allocated the RX socket buffer in order to
1637  * allow zero-copy mechanism.
1638  */
1639 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1640 {
1641         /* RX Allocation */
1642         int ret = alloc_dma_rx_desc_resources(priv);
1643
1644         if (ret)
1645                 return ret;
1646
1647         ret = alloc_dma_tx_desc_resources(priv);
1648
1649         return ret;
1650 }
1651
1652 /**
1653  * free_dma_desc_resources - free dma desc resources
1654  * @priv: private structure
1655  */
1656 static void free_dma_desc_resources(struct stmmac_priv *priv)
1657 {
1658         /* Release the DMA RX socket buffers */
1659         free_dma_rx_desc_resources(priv);
1660
1661         /* Release the DMA TX socket buffers */
1662         free_dma_tx_desc_resources(priv);
1663 }
1664
1665 /**
1666  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1667  *  @priv: driver private structure
1668  *  Description: It is used for enabling the rx queues in the MAC
1669  */
1670 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1671 {
1672         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1673         int queue;
1674         u8 mode;
1675
1676         for (queue = 0; queue < rx_queues_count; queue++) {
1677                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1678                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1679         }
1680 }
1681
1682 /**
1683  * stmmac_start_rx_dma - start RX DMA channel
1684  * @priv: driver private structure
1685  * @chan: RX channel index
1686  * Description:
1687  * This starts a RX DMA channel
1688  */
1689 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1690 {
1691         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1692         stmmac_start_rx(priv, priv->ioaddr, chan);
1693 }
1694
1695 /**
1696  * stmmac_start_tx_dma - start TX DMA channel
1697  * @priv: driver private structure
1698  * @chan: TX channel index
1699  * Description:
1700  * This starts a TX DMA channel
1701  */
1702 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1703 {
1704         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1705         stmmac_start_tx(priv, priv->ioaddr, chan);
1706 }
1707
1708 /**
1709  * stmmac_stop_rx_dma - stop RX DMA channel
1710  * @priv: driver private structure
1711  * @chan: RX channel index
1712  * Description:
1713  * This stops a RX DMA channel
1714  */
1715 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1716 {
1717         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1718         stmmac_stop_rx(priv, priv->ioaddr, chan);
1719 }
1720
1721 /**
1722  * stmmac_stop_tx_dma - stop TX DMA channel
1723  * @priv: driver private structure
1724  * @chan: TX channel index
1725  * Description:
1726  * This stops a TX DMA channel
1727  */
1728 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1729 {
1730         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1731         stmmac_stop_tx(priv, priv->ioaddr, chan);
1732 }
1733
1734 /**
1735  * stmmac_start_all_dma - start all RX and TX DMA channels
1736  * @priv: driver private structure
1737  * Description:
1738  * This starts all the RX and TX DMA channels
1739  */
1740 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1741 {
1742         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1743         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1744         u32 chan = 0;
1745
1746         for (chan = 0; chan < rx_channels_count; chan++)
1747                 stmmac_start_rx_dma(priv, chan);
1748
1749         for (chan = 0; chan < tx_channels_count; chan++)
1750                 stmmac_start_tx_dma(priv, chan);
1751 }
1752
1753 /**
1754  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1755  * @priv: driver private structure
1756  * Description:
1757  * This stops the RX and TX DMA channels
1758  */
1759 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1760 {
1761         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1762         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1763         u32 chan = 0;
1764
1765         for (chan = 0; chan < rx_channels_count; chan++)
1766                 stmmac_stop_rx_dma(priv, chan);
1767
1768         for (chan = 0; chan < tx_channels_count; chan++)
1769                 stmmac_stop_tx_dma(priv, chan);
1770 }
1771
1772 /**
1773  *  stmmac_dma_operation_mode - HW DMA operation mode
1774  *  @priv: driver private structure
1775  *  Description: it is used for configuring the DMA operation mode register in
1776  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1777  */
1778 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1779 {
1780         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1781         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1782         int rxfifosz = priv->plat->rx_fifo_size;
1783         int txfifosz = priv->plat->tx_fifo_size;
1784         u32 txmode = 0;
1785         u32 rxmode = 0;
1786         u32 chan = 0;
1787         u8 qmode = 0;
1788
1789         if (rxfifosz == 0)
1790                 rxfifosz = priv->dma_cap.rx_fifo_size;
1791         if (txfifosz == 0)
1792                 txfifosz = priv->dma_cap.tx_fifo_size;
1793
1794         /* Adjust for real per queue fifo size */
1795         rxfifosz /= rx_channels_count;
1796         txfifosz /= tx_channels_count;
1797
1798         if (priv->plat->force_thresh_dma_mode) {
1799                 txmode = tc;
1800                 rxmode = tc;
1801         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1802                 /*
1803                  * In case of GMAC, SF mode can be enabled
1804                  * to perform the TX COE in HW. This depends on:
1805                  * 1) TX COE if actually supported
1806                  * 2) There is no bugged Jumbo frame support
1807                  *    that needs to not insert csum in the TDES.
1808                  */
1809                 txmode = SF_DMA_MODE;
1810                 rxmode = SF_DMA_MODE;
1811                 priv->xstats.threshold = SF_DMA_MODE;
1812         } else {
1813                 txmode = tc;
1814                 rxmode = SF_DMA_MODE;
1815         }
1816
1817         /* configure all channels */
1818         for (chan = 0; chan < rx_channels_count; chan++) {
1819                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1820
1821                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1822                                 rxfifosz, qmode);
1823                 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1824                                 chan);
1825         }
1826
1827         for (chan = 0; chan < tx_channels_count; chan++) {
1828                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1829
1830                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1831                                 txfifosz, qmode);
1832         }
1833 }
1834
1835 /**
1836  * stmmac_tx_clean - to manage the transmission completion
1837  * @priv: driver private structure
1838  * @queue: TX queue index
1839  * Description: it reclaims the transmit resources after transmission completes.
1840  */
1841 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1842 {
1843         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1844         unsigned int bytes_compl = 0, pkts_compl = 0;
1845         unsigned int entry, count = 0;
1846
1847         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1848
1849         priv->xstats.tx_clean++;
1850
1851         entry = tx_q->dirty_tx;
1852         while ((entry != tx_q->cur_tx) && (count < budget)) {
1853                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1854                 struct dma_desc *p;
1855                 int status;
1856
1857                 if (priv->extend_desc)
1858                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1859                 else
1860                         p = tx_q->dma_tx + entry;
1861
1862                 status = stmmac_tx_status(priv, &priv->dev->stats,
1863                                 &priv->xstats, p, priv->ioaddr);
1864                 /* Check if the descriptor is owned by the DMA */
1865                 if (unlikely(status & tx_dma_own))
1866                         break;
1867
1868                 count++;
1869
1870                 /* Make sure descriptor fields are read after reading
1871                  * the own bit.
1872                  */
1873                 dma_rmb();
1874
1875                 /* Just consider the last segment and ...*/
1876                 if (likely(!(status & tx_not_ls))) {
1877                         /* ... verify the status error condition */
1878                         if (unlikely(status & tx_err)) {
1879                                 priv->dev->stats.tx_errors++;
1880                         } else {
1881                                 priv->dev->stats.tx_packets++;
1882                                 priv->xstats.tx_pkt_n++;
1883                         }
1884                         stmmac_get_tx_hwtstamp(priv, p, skb);
1885                 }
1886
1887                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1888                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1889                                 dma_unmap_page(priv->device,
1890                                                tx_q->tx_skbuff_dma[entry].buf,
1891                                                tx_q->tx_skbuff_dma[entry].len,
1892                                                DMA_TO_DEVICE);
1893                         else
1894                                 dma_unmap_single(priv->device,
1895                                                  tx_q->tx_skbuff_dma[entry].buf,
1896                                                  tx_q->tx_skbuff_dma[entry].len,
1897                                                  DMA_TO_DEVICE);
1898                         tx_q->tx_skbuff_dma[entry].buf = 0;
1899                         tx_q->tx_skbuff_dma[entry].len = 0;
1900                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1901                 }
1902
1903                 stmmac_clean_desc3(priv, tx_q, p);
1904
1905                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1906                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1907
1908                 if (likely(skb != NULL)) {
1909                         pkts_compl++;
1910                         bytes_compl += skb->len;
1911                         dev_consume_skb_any(skb);
1912                         tx_q->tx_skbuff[entry] = NULL;
1913                 }
1914
1915                 stmmac_release_tx_desc(priv, p, priv->mode);
1916
1917                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1918         }
1919         tx_q->dirty_tx = entry;
1920
1921         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1922                                   pkts_compl, bytes_compl);
1923
1924         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1925                                                                 queue))) &&
1926             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1927
1928                 netif_dbg(priv, tx_done, priv->dev,
1929                           "%s: restart transmit\n", __func__);
1930                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1931         }
1932
1933         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1934                 stmmac_enable_eee_mode(priv);
1935                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1936         }
1937
1938         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1939
1940         return count;
1941 }
1942
1943 /**
1944  * stmmac_tx_err - to manage the tx error
1945  * @priv: driver private structure
1946  * @chan: channel index
1947  * Description: it cleans the descriptors and restarts the transmission
1948  * in case of transmission errors.
1949  */
1950 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1951 {
1952         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1953         int i;
1954
1955         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1956
1957         stmmac_stop_tx_dma(priv, chan);
1958         dma_free_tx_skbufs(priv, chan);
1959         for (i = 0; i < DMA_TX_SIZE; i++)
1960                 if (priv->extend_desc)
1961                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1962                                         priv->mode, (i == DMA_TX_SIZE - 1));
1963                 else
1964                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1965                                         priv->mode, (i == DMA_TX_SIZE - 1));
1966         tx_q->dirty_tx = 0;
1967         tx_q->cur_tx = 0;
1968         tx_q->mss = 0;
1969         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1970         stmmac_start_tx_dma(priv, chan);
1971
1972         priv->dev->stats.tx_errors++;
1973         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1974 }
1975
1976 /**
1977  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1978  *  @priv: driver private structure
1979  *  @txmode: TX operating mode
1980  *  @rxmode: RX operating mode
1981  *  @chan: channel index
1982  *  Description: it is used for configuring of the DMA operation mode in
1983  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1984  *  mode.
1985  */
1986 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1987                                           u32 rxmode, u32 chan)
1988 {
1989         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1990         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1991         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1992         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1993         int rxfifosz = priv->plat->rx_fifo_size;
1994         int txfifosz = priv->plat->tx_fifo_size;
1995
1996         if (rxfifosz == 0)
1997                 rxfifosz = priv->dma_cap.rx_fifo_size;
1998         if (txfifosz == 0)
1999                 txfifosz = priv->dma_cap.tx_fifo_size;
2000
2001         /* Adjust for real per queue fifo size */
2002         rxfifosz /= rx_channels_count;
2003         txfifosz /= tx_channels_count;
2004
2005         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2006         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2007 }
2008
2009 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2010 {
2011         int ret;
2012
2013         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2014                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2015         if (ret && (ret != -EINVAL)) {
2016                 stmmac_global_err(priv);
2017                 return true;
2018         }
2019
2020         return false;
2021 }
2022
2023 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2024 {
2025         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2026                                                  &priv->xstats, chan);
2027         struct stmmac_channel *ch = &priv->channel[chan];
2028         bool needs_work = false;
2029
2030         if ((status & handle_rx) && ch->has_rx) {
2031                 needs_work = true;
2032         } else {
2033                 status &= ~handle_rx;
2034         }
2035
2036         if ((status & handle_tx) && ch->has_tx) {
2037                 needs_work = true;
2038         } else {
2039                 status &= ~handle_tx;
2040         }
2041
2042         if (needs_work && napi_schedule_prep(&ch->napi)) {
2043                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2044                 __napi_schedule(&ch->napi);
2045         }
2046
2047         return status;
2048 }
2049
2050 /**
2051  * stmmac_dma_interrupt - DMA ISR
2052  * @priv: driver private structure
2053  * Description: this is the DMA ISR. It is called by the main ISR.
2054  * It calls the dwmac dma routine and schedule poll method in case of some
2055  * work can be done.
2056  */
2057 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2058 {
2059         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2060         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2061         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2062                                 tx_channel_count : rx_channel_count;
2063         u32 chan;
2064         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2065
2066         /* Make sure we never check beyond our status buffer. */
2067         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2068                 channels_to_check = ARRAY_SIZE(status);
2069
2070         for (chan = 0; chan < channels_to_check; chan++)
2071                 status[chan] = stmmac_napi_check(priv, chan);
2072
2073         for (chan = 0; chan < tx_channel_count; chan++) {
2074                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2075                         /* Try to bump up the dma threshold on this failure */
2076                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2077                             (tc <= 256)) {
2078                                 tc += 64;
2079                                 if (priv->plat->force_thresh_dma_mode)
2080                                         stmmac_set_dma_operation_mode(priv,
2081                                                                       tc,
2082                                                                       tc,
2083                                                                       chan);
2084                                 else
2085                                         stmmac_set_dma_operation_mode(priv,
2086                                                                     tc,
2087                                                                     SF_DMA_MODE,
2088                                                                     chan);
2089                                 priv->xstats.threshold = tc;
2090                         }
2091                 } else if (unlikely(status[chan] == tx_hard_error)) {
2092                         stmmac_tx_err(priv, chan);
2093                 }
2094         }
2095 }
2096
2097 /**
2098  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2099  * @priv: driver private structure
2100  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2101  */
2102 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2103 {
2104         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2105                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2106
2107         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2108
2109         if (priv->dma_cap.rmon) {
2110                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2111                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2112         } else
2113                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2114 }
2115
2116 /**
2117  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2118  * @priv: driver private structure
2119  * Description:
2120  *  new GMAC chip generations have a new register to indicate the
2121  *  presence of the optional feature/functions.
2122  *  This can be also used to override the value passed through the
2123  *  platform and necessary for old MAC10/100 and GMAC chips.
2124  */
2125 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2126 {
2127         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2128 }
2129
2130 /**
2131  * stmmac_check_ether_addr - check if the MAC addr is valid
2132  * @priv: driver private structure
2133  * Description:
2134  * it is to verify if the MAC address is valid, in case of failures it
2135  * generates a random MAC address
2136  */
2137 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2138 {
2139         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2140                 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2141                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2142                         eth_hw_addr_random(priv->dev);
2143                 netdev_info(priv->dev, "device MAC address %pM\n",
2144                             priv->dev->dev_addr);
2145         }
2146 }
2147
2148 /**
2149  * stmmac_init_dma_engine - DMA init.
2150  * @priv: driver private structure
2151  * Description:
2152  * It inits the DMA invoking the specific MAC/GMAC callback.
2153  * Some DMA parameters can be passed from the platform;
2154  * in case of these are not passed a default is kept for the MAC or GMAC.
2155  */
2156 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2157 {
2158         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2159         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2160         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2161         struct stmmac_rx_queue *rx_q;
2162         struct stmmac_tx_queue *tx_q;
2163         u32 chan = 0;
2164         int atds = 0;
2165         int ret = 0;
2166
2167         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2168                 dev_err(priv->device, "Invalid DMA configuration\n");
2169                 return -EINVAL;
2170         }
2171
2172         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2173                 atds = 1;
2174
2175         ret = stmmac_reset(priv, priv->ioaddr);
2176         if (ret) {
2177                 dev_err(priv->device, "Failed to reset the dma\n");
2178                 return ret;
2179         }
2180
2181         /* DMA Configuration */
2182         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2183
2184         if (priv->plat->axi)
2185                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2186
2187         /* DMA CSR Channel configuration */
2188         for (chan = 0; chan < dma_csr_ch; chan++)
2189                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2190
2191         /* DMA RX Channel Configuration */
2192         for (chan = 0; chan < rx_channels_count; chan++) {
2193                 rx_q = &priv->rx_queue[chan];
2194
2195                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2196                                     rx_q->dma_rx_phy, chan);
2197
2198                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2199                             (DMA_RX_SIZE * sizeof(struct dma_desc));
2200                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2201                                        rx_q->rx_tail_addr, chan);
2202         }
2203
2204         /* DMA TX Channel Configuration */
2205         for (chan = 0; chan < tx_channels_count; chan++) {
2206                 tx_q = &priv->tx_queue[chan];
2207
2208                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2209                                     tx_q->dma_tx_phy, chan);
2210
2211                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2212                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2213                                        tx_q->tx_tail_addr, chan);
2214         }
2215
2216         return ret;
2217 }
2218
2219 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2220 {
2221         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2222
2223         mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2224 }
2225
2226 /**
2227  * stmmac_tx_timer - mitigation sw timer for tx.
2228  * @data: data pointer
2229  * Description:
2230  * This is the timer handler to directly invoke the stmmac_tx_clean.
2231  */
2232 static void stmmac_tx_timer(struct timer_list *t)
2233 {
2234         struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2235         struct stmmac_priv *priv = tx_q->priv_data;
2236         struct stmmac_channel *ch;
2237
2238         ch = &priv->channel[tx_q->queue_index];
2239
2240         if (likely(napi_schedule_prep(&ch->napi)))
2241                 __napi_schedule(&ch->napi);
2242 }
2243
2244 /**
2245  * stmmac_init_tx_coalesce - init tx mitigation options.
2246  * @priv: driver private structure
2247  * Description:
2248  * This inits the transmit coalesce parameters: i.e. timer rate,
2249  * timer handler and default threshold used for enabling the
2250  * interrupt on completion bit.
2251  */
2252 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2253 {
2254         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2255         u32 chan;
2256
2257         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2258         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2259
2260         for (chan = 0; chan < tx_channel_count; chan++) {
2261                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2262
2263                 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2264         }
2265 }
2266
2267 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2268 {
2269         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2270         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2271         u32 chan;
2272
2273         /* set TX ring length */
2274         for (chan = 0; chan < tx_channels_count; chan++)
2275                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2276                                 (DMA_TX_SIZE - 1), chan);
2277
2278         /* set RX ring length */
2279         for (chan = 0; chan < rx_channels_count; chan++)
2280                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2281                                 (DMA_RX_SIZE - 1), chan);
2282 }
2283
2284 /**
2285  *  stmmac_set_tx_queue_weight - Set TX queue weight
2286  *  @priv: driver private structure
2287  *  Description: It is used for setting TX queues weight
2288  */
2289 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2290 {
2291         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2292         u32 weight;
2293         u32 queue;
2294
2295         for (queue = 0; queue < tx_queues_count; queue++) {
2296                 weight = priv->plat->tx_queues_cfg[queue].weight;
2297                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2298         }
2299 }
2300
2301 /**
2302  *  stmmac_configure_cbs - Configure CBS in TX queue
2303  *  @priv: driver private structure
2304  *  Description: It is used for configuring CBS in AVB TX queues
2305  */
2306 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2307 {
2308         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2309         u32 mode_to_use;
2310         u32 queue;
2311
2312         /* queue 0 is reserved for legacy traffic */
2313         for (queue = 1; queue < tx_queues_count; queue++) {
2314                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2315                 if (mode_to_use == MTL_QUEUE_DCB)
2316                         continue;
2317
2318                 stmmac_config_cbs(priv, priv->hw,
2319                                 priv->plat->tx_queues_cfg[queue].send_slope,
2320                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2321                                 priv->plat->tx_queues_cfg[queue].high_credit,
2322                                 priv->plat->tx_queues_cfg[queue].low_credit,
2323                                 queue);
2324         }
2325 }
2326
2327 /**
2328  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2329  *  @priv: driver private structure
2330  *  Description: It is used for mapping RX queues to RX dma channels
2331  */
2332 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2333 {
2334         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2335         u32 queue;
2336         u32 chan;
2337
2338         for (queue = 0; queue < rx_queues_count; queue++) {
2339                 chan = priv->plat->rx_queues_cfg[queue].chan;
2340                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2341         }
2342 }
2343
2344 /**
2345  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2346  *  @priv: driver private structure
2347  *  Description: It is used for configuring the RX Queue Priority
2348  */
2349 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2350 {
2351         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2352         u32 queue;
2353         u32 prio;
2354
2355         for (queue = 0; queue < rx_queues_count; queue++) {
2356                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2357                         continue;
2358
2359                 prio = priv->plat->rx_queues_cfg[queue].prio;
2360                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2361         }
2362 }
2363
2364 /**
2365  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2366  *  @priv: driver private structure
2367  *  Description: It is used for configuring the TX Queue Priority
2368  */
2369 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2370 {
2371         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2372         u32 queue;
2373         u32 prio;
2374
2375         for (queue = 0; queue < tx_queues_count; queue++) {
2376                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2377                         continue;
2378
2379                 prio = priv->plat->tx_queues_cfg[queue].prio;
2380                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2381         }
2382 }
2383
2384 /**
2385  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2386  *  @priv: driver private structure
2387  *  Description: It is used for configuring the RX queue routing
2388  */
2389 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2390 {
2391         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2392         u32 queue;
2393         u8 packet;
2394
2395         for (queue = 0; queue < rx_queues_count; queue++) {
2396                 /* no specific packet type routing specified for the queue */
2397                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2398                         continue;
2399
2400                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2401                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2402         }
2403 }
2404
2405 /**
2406  *  stmmac_mtl_configuration - Configure MTL
2407  *  @priv: driver private structure
2408  *  Description: It is used for configurring MTL
2409  */
2410 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2411 {
2412         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2413         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2414
2415         if (tx_queues_count > 1)
2416                 stmmac_set_tx_queue_weight(priv);
2417
2418         /* Configure MTL RX algorithms */
2419         if (rx_queues_count > 1)
2420                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2421                                 priv->plat->rx_sched_algorithm);
2422
2423         /* Configure MTL TX algorithms */
2424         if (tx_queues_count > 1)
2425                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2426                                 priv->plat->tx_sched_algorithm);
2427
2428         /* Configure CBS in AVB TX queues */
2429         if (tx_queues_count > 1)
2430                 stmmac_configure_cbs(priv);
2431
2432         /* Map RX MTL to DMA channels */
2433         stmmac_rx_queue_dma_chan_map(priv);
2434
2435         /* Enable MAC RX Queues */
2436         stmmac_mac_enable_rx_queues(priv);
2437
2438         /* Set RX priorities */
2439         if (rx_queues_count > 1)
2440                 stmmac_mac_config_rx_queues_prio(priv);
2441
2442         /* Set TX priorities */
2443         if (tx_queues_count > 1)
2444                 stmmac_mac_config_tx_queues_prio(priv);
2445
2446         /* Set RX routing */
2447         if (rx_queues_count > 1)
2448                 stmmac_mac_config_rx_queues_routing(priv);
2449 }
2450
2451 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2452 {
2453         if (priv->dma_cap.asp) {
2454                 netdev_info(priv->dev, "Enabling Safety Features\n");
2455                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2456         } else {
2457                 netdev_info(priv->dev, "No Safety Features support found\n");
2458         }
2459 }
2460
2461 /**
2462  * stmmac_hw_setup - setup mac in a usable state.
2463  *  @dev : pointer to the device structure.
2464  *  Description:
2465  *  this is the main function to setup the HW in a usable state because the
2466  *  dma engine is reset, the core registers are configured (e.g. AXI,
2467  *  Checksum features, timers). The DMA is ready to start receiving and
2468  *  transmitting.
2469  *  Return value:
2470  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2471  *  file on failure.
2472  */
2473 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2474 {
2475         struct stmmac_priv *priv = netdev_priv(dev);
2476         u32 rx_cnt = priv->plat->rx_queues_to_use;
2477         u32 tx_cnt = priv->plat->tx_queues_to_use;
2478         u32 chan;
2479         int ret;
2480
2481         /* DMA initialization and SW reset */
2482         ret = stmmac_init_dma_engine(priv);
2483         if (ret < 0) {
2484                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2485                            __func__);
2486                 return ret;
2487         }
2488
2489         /* Copy the MAC addr into the HW  */
2490         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2491
2492         /* PS and related bits will be programmed according to the speed */
2493         if (priv->hw->pcs) {
2494                 int speed = priv->plat->mac_port_sel_speed;
2495
2496                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2497                     (speed == SPEED_1000)) {
2498                         priv->hw->ps = speed;
2499                 } else {
2500                         dev_warn(priv->device, "invalid port speed\n");
2501                         priv->hw->ps = 0;
2502                 }
2503         }
2504
2505         /* Initialize the MAC Core */
2506         stmmac_core_init(priv, priv->hw, dev);
2507
2508         /* Initialize MTL*/
2509         stmmac_mtl_configuration(priv);
2510
2511         /* Initialize Safety Features */
2512         stmmac_safety_feat_configuration(priv);
2513
2514         ret = stmmac_rx_ipc(priv, priv->hw);
2515         if (!ret) {
2516                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2517                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2518                 priv->hw->rx_csum = 0;
2519         }
2520
2521         /* Enable the MAC Rx/Tx */
2522         stmmac_mac_set(priv, priv->ioaddr, true);
2523
2524         /* Set the HW DMA mode and the COE */
2525         stmmac_dma_operation_mode(priv);
2526
2527         stmmac_mmc_setup(priv);
2528
2529         if (init_ptp) {
2530                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2531                 if (ret < 0)
2532                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2533
2534                 ret = stmmac_init_ptp(priv);
2535                 if (ret == -EOPNOTSUPP)
2536                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2537                 else if (ret)
2538                         netdev_warn(priv->dev, "PTP init failed\n");
2539         }
2540
2541         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2542
2543         if (priv->use_riwt) {
2544                 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2545                 if (!ret)
2546                         priv->rx_riwt = MAX_DMA_RIWT;
2547         }
2548
2549         if (priv->hw->pcs)
2550                 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2551
2552         /* set TX and RX rings length */
2553         stmmac_set_rings_length(priv);
2554
2555         /* Enable TSO */
2556         if (priv->tso) {
2557                 for (chan = 0; chan < tx_cnt; chan++)
2558                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2559         }
2560
2561         /* Start the ball rolling... */
2562         stmmac_start_all_dma(priv);
2563
2564         return 0;
2565 }
2566
2567 static void stmmac_hw_teardown(struct net_device *dev)
2568 {
2569         struct stmmac_priv *priv = netdev_priv(dev);
2570
2571         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2572 }
2573
2574 /**
2575  *  stmmac_open - open entry point of the driver
2576  *  @dev : pointer to the device structure.
2577  *  Description:
2578  *  This function is the open entry point of the driver.
2579  *  Return value:
2580  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2581  *  file on failure.
2582  */
2583 static int stmmac_open(struct net_device *dev)
2584 {
2585         struct stmmac_priv *priv = netdev_priv(dev);
2586         u32 chan;
2587         int ret;
2588
2589         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2590             priv->hw->pcs != STMMAC_PCS_TBI &&
2591             priv->hw->pcs != STMMAC_PCS_RTBI) {
2592                 ret = stmmac_init_phy(dev);
2593                 if (ret) {
2594                         netdev_err(priv->dev,
2595                                    "%s: Cannot attach to PHY (error: %d)\n",
2596                                    __func__, ret);
2597                         return ret;
2598                 }
2599         }
2600
2601         /* Extra statistics */
2602         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2603         priv->xstats.threshold = tc;
2604
2605         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2606         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2607
2608         ret = alloc_dma_desc_resources(priv);
2609         if (ret < 0) {
2610                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2611                            __func__);
2612                 goto dma_desc_error;
2613         }
2614
2615         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2616         if (ret < 0) {
2617                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2618                            __func__);
2619                 goto init_error;
2620         }
2621
2622         ret = stmmac_hw_setup(dev, true);
2623         if (ret < 0) {
2624                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2625                 goto init_error;
2626         }
2627
2628         stmmac_init_tx_coalesce(priv);
2629
2630         if (dev->phydev)
2631                 phy_start(dev->phydev);
2632
2633         /* Request the IRQ lines */
2634         ret = request_irq(dev->irq, stmmac_interrupt,
2635                           IRQF_SHARED, dev->name, dev);
2636         if (unlikely(ret < 0)) {
2637                 netdev_err(priv->dev,
2638                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2639                            __func__, dev->irq, ret);
2640                 goto irq_error;
2641         }
2642
2643         /* Request the Wake IRQ in case of another line is used for WoL */
2644         if (priv->wol_irq != dev->irq) {
2645                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2646                                   IRQF_SHARED, dev->name, dev);
2647                 if (unlikely(ret < 0)) {
2648                         netdev_err(priv->dev,
2649                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2650                                    __func__, priv->wol_irq, ret);
2651                         goto wolirq_error;
2652                 }
2653         }
2654
2655         /* Request the IRQ lines */
2656         if (priv->lpi_irq > 0) {
2657                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2658                                   dev->name, dev);
2659                 if (unlikely(ret < 0)) {
2660                         netdev_err(priv->dev,
2661                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2662                                    __func__, priv->lpi_irq, ret);
2663                         goto lpiirq_error;
2664                 }
2665         }
2666
2667         stmmac_enable_all_queues(priv);
2668         netif_tx_start_all_queues(priv->dev);
2669
2670         return 0;
2671
2672 lpiirq_error:
2673         if (priv->wol_irq != dev->irq)
2674                 free_irq(priv->wol_irq, dev);
2675 wolirq_error:
2676         free_irq(dev->irq, dev);
2677 irq_error:
2678         if (dev->phydev)
2679                 phy_stop(dev->phydev);
2680
2681         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2682                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2683
2684         stmmac_hw_teardown(dev);
2685 init_error:
2686         free_dma_desc_resources(priv);
2687 dma_desc_error:
2688         if (dev->phydev)
2689                 phy_disconnect(dev->phydev);
2690
2691         return ret;
2692 }
2693
2694 /**
2695  *  stmmac_release - close entry point of the driver
2696  *  @dev : device pointer.
2697  *  Description:
2698  *  This is the stop entry point of the driver.
2699  */
2700 static int stmmac_release(struct net_device *dev)
2701 {
2702         struct stmmac_priv *priv = netdev_priv(dev);
2703         u32 chan;
2704
2705         /* Stop and disconnect the PHY */
2706         if (dev->phydev) {
2707                 phy_stop(dev->phydev);
2708                 phy_disconnect(dev->phydev);
2709         }
2710
2711         stmmac_disable_all_queues(priv);
2712
2713         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2714                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2715
2716         /* Free the IRQ lines */
2717         free_irq(dev->irq, dev);
2718         if (priv->wol_irq != dev->irq)
2719                 free_irq(priv->wol_irq, dev);
2720         if (priv->lpi_irq > 0)
2721                 free_irq(priv->lpi_irq, dev);
2722
2723         if (priv->eee_enabled) {
2724                 priv->tx_path_in_lpi_mode = false;
2725                 del_timer_sync(&priv->eee_ctrl_timer);
2726         }
2727
2728         /* Stop TX/RX DMA and clear the descriptors */
2729         stmmac_stop_all_dma(priv);
2730
2731         /* Release and free the Rx/Tx resources */
2732         free_dma_desc_resources(priv);
2733
2734         /* Disable the MAC Rx/Tx */
2735         stmmac_mac_set(priv, priv->ioaddr, false);
2736
2737         netif_carrier_off(dev);
2738
2739         stmmac_release_ptp(priv);
2740
2741         return 0;
2742 }
2743
2744 /**
2745  *  stmmac_tso_allocator - close entry point of the driver
2746  *  @priv: driver private structure
2747  *  @des: buffer start address
2748  *  @total_len: total length to fill in descriptors
2749  *  @last_segmant: condition for the last descriptor
2750  *  @queue: TX queue index
2751  *  Description:
2752  *  This function fills descriptor and request new descriptors according to
2753  *  buffer length to fill
2754  */
2755 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2756                                  int total_len, bool last_segment, u32 queue)
2757 {
2758         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2759         struct dma_desc *desc;
2760         u32 buff_size;
2761         int tmp_len;
2762
2763         tmp_len = total_len;
2764
2765         while (tmp_len > 0) {
2766                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2767                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2768                 desc = tx_q->dma_tx + tx_q->cur_tx;
2769
2770                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2771                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2772                             TSO_MAX_BUFF_SIZE : tmp_len;
2773
2774                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2775                                 0, 1,
2776                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2777                                 0, 0);
2778
2779                 tmp_len -= TSO_MAX_BUFF_SIZE;
2780         }
2781 }
2782
2783 /**
2784  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2785  *  @skb : the socket buffer
2786  *  @dev : device pointer
2787  *  Description: this is the transmit function that is called on TSO frames
2788  *  (support available on GMAC4 and newer chips).
2789  *  Diagram below show the ring programming in case of TSO frames:
2790  *
2791  *  First Descriptor
2792  *   --------
2793  *   | DES0 |---> buffer1 = L2/L3/L4 header
2794  *   | DES1 |---> TCP Payload (can continue on next descr...)
2795  *   | DES2 |---> buffer 1 and 2 len
2796  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2797  *   --------
2798  *      |
2799  *     ...
2800  *      |
2801  *   --------
2802  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2803  *   | DES1 | --|
2804  *   | DES2 | --> buffer 1 and 2 len
2805  *   | DES3 |
2806  *   --------
2807  *
2808  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2809  */
2810 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2811 {
2812         struct dma_desc *desc, *first, *mss_desc = NULL;
2813         struct stmmac_priv *priv = netdev_priv(dev);
2814         int nfrags = skb_shinfo(skb)->nr_frags;
2815         u32 queue = skb_get_queue_mapping(skb);
2816         unsigned int first_entry, des;
2817         struct stmmac_tx_queue *tx_q;
2818         int tmp_pay_len = 0;
2819         u32 pay_len, mss;
2820         u8 proto_hdr_len;
2821         int i;
2822
2823         tx_q = &priv->tx_queue[queue];
2824
2825         /* Compute header lengths */
2826         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2827
2828         /* Desc availability based on threshold should be enough safe */
2829         if (unlikely(stmmac_tx_avail(priv, queue) <
2830                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2831                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2832                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2833                                                                 queue));
2834                         /* This is a hard error, log it. */
2835                         netdev_err(priv->dev,
2836                                    "%s: Tx Ring full when queue awake\n",
2837                                    __func__);
2838                 }
2839                 return NETDEV_TX_BUSY;
2840         }
2841
2842         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2843
2844         mss = skb_shinfo(skb)->gso_size;
2845
2846         /* set new MSS value if needed */
2847         if (mss != tx_q->mss) {
2848                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2849                 stmmac_set_mss(priv, mss_desc, mss);
2850                 tx_q->mss = mss;
2851                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2852                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2853         }
2854
2855         if (netif_msg_tx_queued(priv)) {
2856                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2857                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2858                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2859                         skb->data_len);
2860         }
2861
2862         first_entry = tx_q->cur_tx;
2863         WARN_ON(tx_q->tx_skbuff[first_entry]);
2864
2865         desc = tx_q->dma_tx + first_entry;
2866         first = desc;
2867
2868         /* first descriptor: fill Headers on Buf1 */
2869         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2870                              DMA_TO_DEVICE);
2871         if (dma_mapping_error(priv->device, des))
2872                 goto dma_map_err;
2873
2874         tx_q->tx_skbuff_dma[first_entry].buf = des;
2875         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2876
2877         first->des0 = cpu_to_le32(des);
2878
2879         /* Fill start of payload in buff2 of first descriptor */
2880         if (pay_len)
2881                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2882
2883         /* If needed take extra descriptors to fill the remaining payload */
2884         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2885
2886         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2887
2888         /* Prepare fragments */
2889         for (i = 0; i < nfrags; i++) {
2890                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2891
2892                 des = skb_frag_dma_map(priv->device, frag, 0,
2893                                        skb_frag_size(frag),
2894                                        DMA_TO_DEVICE);
2895                 if (dma_mapping_error(priv->device, des))
2896                         goto dma_map_err;
2897
2898                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2899                                      (i == nfrags - 1), queue);
2900
2901                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2902                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2903                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2904         }
2905
2906         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2907
2908         /* Only the last descriptor gets to point to the skb. */
2909         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2910
2911         /* We've used all descriptors we need for this skb, however,
2912          * advance cur_tx so that it references a fresh descriptor.
2913          * ndo_start_xmit will fill this descriptor the next time it's
2914          * called and stmmac_tx_clean may clean up to this descriptor.
2915          */
2916         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2917
2918         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2919                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2920                           __func__);
2921                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2922         }
2923
2924         dev->stats.tx_bytes += skb->len;
2925         priv->xstats.tx_tso_frames++;
2926         priv->xstats.tx_tso_nfrags += nfrags;
2927
2928         /* Manage tx mitigation */
2929         tx_q->tx_count_frames += nfrags + 1;
2930         if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
2931             !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
2932             (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2933             priv->hwts_tx_en)) {
2934                 stmmac_tx_timer_arm(priv, queue);
2935         } else {
2936                 tx_q->tx_count_frames = 0;
2937                 stmmac_set_tx_ic(priv, desc);
2938                 priv->xstats.tx_set_ic_bit++;
2939         }
2940
2941         skb_tx_timestamp(skb);
2942
2943         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2944                      priv->hwts_tx_en)) {
2945                 /* declare that device is doing timestamping */
2946                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2947                 stmmac_enable_tx_timestamp(priv, first);
2948         }
2949
2950         /* Complete the first descriptor before granting the DMA */
2951         stmmac_prepare_tso_tx_desc(priv, first, 1,
2952                         proto_hdr_len,
2953                         pay_len,
2954                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2955                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2956
2957         /* If context desc is used to change MSS */
2958         if (mss_desc) {
2959                 /* Make sure that first descriptor has been completely
2960                  * written, including its own bit. This is because MSS is
2961                  * actually before first descriptor, so we need to make
2962                  * sure that MSS's own bit is the last thing written.
2963                  */
2964                 dma_wmb();
2965                 stmmac_set_tx_owner(priv, mss_desc);
2966         }
2967
2968         /* The own bit must be the latest setting done when prepare the
2969          * descriptor and then barrier is needed to make sure that
2970          * all is coherent before granting the DMA engine.
2971          */
2972         wmb();
2973
2974         if (netif_msg_pktdata(priv)) {
2975                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2976                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2977                         tx_q->cur_tx, first, nfrags);
2978
2979                 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2980
2981                 pr_info(">>> frame to be transmitted: ");
2982                 print_pkt(skb->data, skb_headlen(skb));
2983         }
2984
2985         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2986
2987         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
2988         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2989         stmmac_tx_timer_arm(priv, queue);
2990
2991         return NETDEV_TX_OK;
2992
2993 dma_map_err:
2994         dev_err(priv->device, "Tx dma map failed\n");
2995         dev_kfree_skb(skb);
2996         priv->dev->stats.tx_dropped++;
2997         return NETDEV_TX_OK;
2998 }
2999
3000 /**
3001  *  stmmac_xmit - Tx entry point of the driver
3002  *  @skb : the socket buffer
3003  *  @dev : device pointer
3004  *  Description : this is the tx entry point of the driver.
3005  *  It programs the chain or the ring and supports oversized frames
3006  *  and SG feature.
3007  */
3008 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3009 {
3010         struct stmmac_priv *priv = netdev_priv(dev);
3011         unsigned int nopaged_len = skb_headlen(skb);
3012         int i, csum_insertion = 0, is_jumbo = 0;
3013         u32 queue = skb_get_queue_mapping(skb);
3014         int nfrags = skb_shinfo(skb)->nr_frags;
3015         int entry;
3016         unsigned int first_entry;
3017         struct dma_desc *desc, *first;
3018         struct stmmac_tx_queue *tx_q;
3019         unsigned int enh_desc;
3020         unsigned int des;
3021
3022         tx_q = &priv->tx_queue[queue];
3023
3024         if (priv->tx_path_in_lpi_mode)
3025                 stmmac_disable_eee_mode(priv);
3026
3027         /* Manage oversized TCP frames for GMAC4 device */
3028         if (skb_is_gso(skb) && priv->tso) {
3029                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3030                         return stmmac_tso_xmit(skb, dev);
3031         }
3032
3033         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3034                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3035                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3036                                                                 queue));
3037                         /* This is a hard error, log it. */
3038                         netdev_err(priv->dev,
3039                                    "%s: Tx Ring full when queue awake\n",
3040                                    __func__);
3041                 }
3042                 return NETDEV_TX_BUSY;
3043         }
3044
3045         entry = tx_q->cur_tx;
3046         first_entry = entry;
3047         WARN_ON(tx_q->tx_skbuff[first_entry]);
3048
3049         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3050
3051         if (likely(priv->extend_desc))
3052                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3053         else
3054                 desc = tx_q->dma_tx + entry;
3055
3056         first = desc;
3057
3058         enh_desc = priv->plat->enh_desc;
3059         /* To program the descriptors according to the size of the frame */
3060         if (enh_desc)
3061                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3062
3063         if (unlikely(is_jumbo)) {
3064                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3065                 if (unlikely(entry < 0) && (entry != -EINVAL))
3066                         goto dma_map_err;
3067         }
3068
3069         for (i = 0; i < nfrags; i++) {
3070                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3071                 int len = skb_frag_size(frag);
3072                 bool last_segment = (i == (nfrags - 1));
3073
3074                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3075                 WARN_ON(tx_q->tx_skbuff[entry]);
3076
3077                 if (likely(priv->extend_desc))
3078                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3079                 else
3080                         desc = tx_q->dma_tx + entry;
3081
3082                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3083                                        DMA_TO_DEVICE);
3084                 if (dma_mapping_error(priv->device, des))
3085                         goto dma_map_err; /* should reuse desc w/o issues */
3086
3087                 tx_q->tx_skbuff_dma[entry].buf = des;
3088
3089                 stmmac_set_desc_addr(priv, desc, des);
3090
3091                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3092                 tx_q->tx_skbuff_dma[entry].len = len;
3093                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3094
3095                 /* Prepare the descriptor and set the own bit too */
3096                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3097                                 priv->mode, 1, last_segment, skb->len);
3098         }
3099
3100         /* Only the last descriptor gets to point to the skb. */
3101         tx_q->tx_skbuff[entry] = skb;
3102
3103         /* We've used all descriptors we need for this skb, however,
3104          * advance cur_tx so that it references a fresh descriptor.
3105          * ndo_start_xmit will fill this descriptor the next time it's
3106          * called and stmmac_tx_clean may clean up to this descriptor.
3107          */
3108         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3109         tx_q->cur_tx = entry;
3110
3111         if (netif_msg_pktdata(priv)) {
3112                 void *tx_head;
3113
3114                 netdev_dbg(priv->dev,
3115                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3116                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3117                            entry, first, nfrags);
3118
3119                 if (priv->extend_desc)
3120                         tx_head = (void *)tx_q->dma_etx;
3121                 else
3122                         tx_head = (void *)tx_q->dma_tx;
3123
3124                 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3125
3126                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3127                 print_pkt(skb->data, skb->len);
3128         }
3129
3130         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3131                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3132                           __func__);
3133                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3134         }
3135
3136         dev->stats.tx_bytes += skb->len;
3137
3138         /* According to the coalesce parameter the IC bit for the latest
3139          * segment is reset and the timer re-started to clean the tx status.
3140          * This approach takes care about the fragments: desc is the first
3141          * element in case of no SG.
3142          */
3143         tx_q->tx_count_frames += nfrags + 1;
3144         if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3145             !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3146             (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3147             priv->hwts_tx_en)) {
3148                 stmmac_tx_timer_arm(priv, queue);
3149         } else {
3150                 tx_q->tx_count_frames = 0;
3151                 stmmac_set_tx_ic(priv, desc);
3152                 priv->xstats.tx_set_ic_bit++;
3153         }
3154
3155         skb_tx_timestamp(skb);
3156
3157         /* Ready to fill the first descriptor and set the OWN bit w/o any
3158          * problems because all the descriptors are actually ready to be
3159          * passed to the DMA engine.
3160          */
3161         if (likely(!is_jumbo)) {
3162                 bool last_segment = (nfrags == 0);
3163
3164                 des = dma_map_single(priv->device, skb->data,
3165                                      nopaged_len, DMA_TO_DEVICE);
3166                 if (dma_mapping_error(priv->device, des))
3167                         goto dma_map_err;
3168
3169                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3170
3171                 stmmac_set_desc_addr(priv, first, des);
3172
3173                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3174                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3175
3176                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3177                              priv->hwts_tx_en)) {
3178                         /* declare that device is doing timestamping */
3179                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3180                         stmmac_enable_tx_timestamp(priv, first);
3181                 }
3182
3183                 /* Prepare the first descriptor setting the OWN bit too */
3184                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3185                                 csum_insertion, priv->mode, 1, last_segment,
3186                                 skb->len);
3187         } else {
3188                 stmmac_set_tx_owner(priv, first);
3189         }
3190
3191         /* The own bit must be the latest setting done when prepare the
3192          * descriptor and then barrier is needed to make sure that
3193          * all is coherent before granting the DMA engine.
3194          */
3195         wmb();
3196
3197         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3198
3199         stmmac_enable_dma_transmission(priv, priv->ioaddr);
3200
3201         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3202         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3203         stmmac_tx_timer_arm(priv, queue);
3204
3205         return NETDEV_TX_OK;
3206
3207 dma_map_err:
3208         netdev_err(priv->dev, "Tx DMA map failed\n");
3209         dev_kfree_skb(skb);
3210         priv->dev->stats.tx_dropped++;
3211         return NETDEV_TX_OK;
3212 }
3213
3214 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3215 {
3216         struct vlan_ethhdr *veth;
3217         __be16 vlan_proto;
3218         u16 vlanid;
3219
3220         veth = (struct vlan_ethhdr *)skb->data;
3221         vlan_proto = veth->h_vlan_proto;
3222
3223         if ((vlan_proto == htons(ETH_P_8021Q) &&
3224              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3225             (vlan_proto == htons(ETH_P_8021AD) &&
3226              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3227                 /* pop the vlan tag */
3228                 vlanid = ntohs(veth->h_vlan_TCI);
3229                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3230                 skb_pull(skb, VLAN_HLEN);
3231                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3232         }
3233 }
3234
3235
3236 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3237 {
3238         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3239                 return 0;
3240
3241         return 1;
3242 }
3243
3244 /**
3245  * stmmac_rx_refill - refill used skb preallocated buffers
3246  * @priv: driver private structure
3247  * @queue: RX queue index
3248  * Description : this is to reallocate the skb for the reception process
3249  * that is based on zero-copy.
3250  */
3251 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3252 {
3253         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3254         int dirty = stmmac_rx_dirty(priv, queue);
3255         unsigned int entry = rx_q->dirty_rx;
3256
3257         int bfsize = priv->dma_buf_sz;
3258
3259         while (dirty-- > 0) {
3260                 struct dma_desc *p;
3261
3262                 if (priv->extend_desc)
3263                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3264                 else
3265                         p = rx_q->dma_rx + entry;
3266
3267                 if (likely(!rx_q->rx_skbuff[entry])) {
3268                         struct sk_buff *skb;
3269
3270                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3271                         if (unlikely(!skb)) {
3272                                 /* so for a while no zero-copy! */
3273                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3274                                 if (unlikely(net_ratelimit()))
3275                                         dev_err(priv->device,
3276                                                 "fail to alloc skb entry %d\n",
3277                                                 entry);
3278                                 break;
3279                         }
3280
3281                         rx_q->rx_skbuff[entry] = skb;
3282                         rx_q->rx_skbuff_dma[entry] =
3283                             dma_map_single(priv->device, skb->data, bfsize,
3284                                            DMA_FROM_DEVICE);
3285                         if (dma_mapping_error(priv->device,
3286                                               rx_q->rx_skbuff_dma[entry])) {
3287                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3288                                 dev_kfree_skb(skb);
3289                                 break;
3290                         }
3291
3292                         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3293                         stmmac_refill_desc3(priv, rx_q, p);
3294
3295                         if (rx_q->rx_zeroc_thresh > 0)
3296                                 rx_q->rx_zeroc_thresh--;
3297
3298                         netif_dbg(priv, rx_status, priv->dev,
3299                                   "refill entry #%d\n", entry);
3300                 }
3301                 dma_wmb();
3302
3303                 stmmac_set_rx_owner(priv, p, priv->use_riwt);
3304
3305                 dma_wmb();
3306
3307                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3308         }
3309         rx_q->dirty_rx = entry;
3310         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3311 }
3312
3313 /**
3314  * stmmac_rx - manage the receive process
3315  * @priv: driver private structure
3316  * @limit: napi bugget
3317  * @queue: RX queue index.
3318  * Description :  this the function called by the napi poll method.
3319  * It gets all the frames inside the ring.
3320  */
3321 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3322 {
3323         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3324         struct stmmac_channel *ch = &priv->channel[queue];
3325         unsigned int next_entry = rx_q->cur_rx;
3326         int coe = priv->hw->rx_csum;
3327         unsigned int count = 0;
3328         bool xmac;
3329
3330         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3331
3332         if (netif_msg_rx_status(priv)) {
3333                 void *rx_head;
3334
3335                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3336                 if (priv->extend_desc)
3337                         rx_head = (void *)rx_q->dma_erx;
3338                 else
3339                         rx_head = (void *)rx_q->dma_rx;
3340
3341                 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3342         }
3343         while (count < limit) {
3344                 int entry, status;
3345                 struct dma_desc *p;
3346                 struct dma_desc *np;
3347
3348                 entry = next_entry;
3349
3350                 if (priv->extend_desc)
3351                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3352                 else
3353                         p = rx_q->dma_rx + entry;
3354
3355                 /* read the status of the incoming frame */
3356                 status = stmmac_rx_status(priv, &priv->dev->stats,
3357                                 &priv->xstats, p);
3358                 /* check if managed by the DMA otherwise go ahead */
3359                 if (unlikely(status & dma_own))
3360                         break;
3361
3362                 count++;
3363
3364                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3365                 next_entry = rx_q->cur_rx;
3366
3367                 if (priv->extend_desc)
3368                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3369                 else
3370                         np = rx_q->dma_rx + next_entry;
3371
3372                 prefetch(np);
3373
3374                 if (priv->extend_desc)
3375                         stmmac_rx_extended_status(priv, &priv->dev->stats,
3376                                         &priv->xstats, rx_q->dma_erx + entry);
3377                 if (unlikely(status == discard_frame)) {
3378                         priv->dev->stats.rx_errors++;
3379                         if (priv->hwts_rx_en && !priv->extend_desc) {
3380                                 /* DESC2 & DESC3 will be overwritten by device
3381                                  * with timestamp value, hence reinitialize
3382                                  * them in stmmac_rx_refill() function so that
3383                                  * device can reuse it.
3384                                  */
3385                                 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3386                                 rx_q->rx_skbuff[entry] = NULL;
3387                                 dma_unmap_single(priv->device,
3388                                                  rx_q->rx_skbuff_dma[entry],
3389                                                  priv->dma_buf_sz,
3390                                                  DMA_FROM_DEVICE);
3391                         }
3392                 } else {
3393                         struct sk_buff *skb;
3394                         int frame_len;
3395                         unsigned int des;
3396
3397                         stmmac_get_desc_addr(priv, p, &des);
3398                         frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3399
3400                         /*  If frame length is greater than skb buffer size
3401                          *  (preallocated during init) then the packet is
3402                          *  ignored
3403                          */
3404                         if (frame_len > priv->dma_buf_sz) {
3405                                 if (net_ratelimit())
3406                                         netdev_err(priv->dev,
3407                                                    "len %d larger than size (%d)\n",
3408                                                    frame_len, priv->dma_buf_sz);
3409                                 priv->dev->stats.rx_length_errors++;
3410                                 continue;
3411                         }
3412
3413                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3414                          * Type frames (LLC/LLC-SNAP)
3415                          *
3416                          * llc_snap is never checked in GMAC >= 4, so this ACS
3417                          * feature is always disabled and packets need to be
3418                          * stripped manually.
3419                          */
3420                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3421                             unlikely(status != llc_snap))
3422                                 frame_len -= ETH_FCS_LEN;
3423
3424                         if (netif_msg_rx_status(priv)) {
3425                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3426                                            p, entry, des);
3427                                 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3428                                            frame_len, status);
3429                         }
3430
3431                         /* The zero-copy is always used for all the sizes
3432                          * in case of GMAC4 because it needs
3433                          * to refill the used descriptors, always.
3434                          */
3435                         if (unlikely(!xmac &&
3436                                      ((frame_len < priv->rx_copybreak) ||
3437                                      stmmac_rx_threshold_count(rx_q)))) {
3438                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3439                                                                 frame_len);
3440                                 if (unlikely(!skb)) {
3441                                         if (net_ratelimit())
3442                                                 dev_warn(priv->device,
3443                                                          "packet dropped\n");
3444                                         priv->dev->stats.rx_dropped++;
3445                                         continue;
3446                                 }
3447
3448                                 dma_sync_single_for_cpu(priv->device,
3449                                                         rx_q->rx_skbuff_dma
3450                                                         [entry], frame_len,
3451                                                         DMA_FROM_DEVICE);
3452                                 skb_copy_to_linear_data(skb,
3453                                                         rx_q->
3454                                                         rx_skbuff[entry]->data,
3455                                                         frame_len);
3456
3457                                 skb_put(skb, frame_len);
3458                                 dma_sync_single_for_device(priv->device,
3459                                                            rx_q->rx_skbuff_dma
3460                                                            [entry], frame_len,
3461                                                            DMA_FROM_DEVICE);
3462                         } else {
3463                                 skb = rx_q->rx_skbuff[entry];
3464                                 if (unlikely(!skb)) {
3465                                         if (net_ratelimit())
3466                                                 netdev_err(priv->dev,
3467                                                            "%s: Inconsistent Rx chain\n",
3468                                                            priv->dev->name);
3469                                         priv->dev->stats.rx_dropped++;
3470                                         continue;
3471                                 }
3472                                 prefetch(skb->data - NET_IP_ALIGN);
3473                                 rx_q->rx_skbuff[entry] = NULL;
3474                                 rx_q->rx_zeroc_thresh++;
3475
3476                                 skb_put(skb, frame_len);
3477                                 dma_unmap_single(priv->device,
3478                                                  rx_q->rx_skbuff_dma[entry],
3479                                                  priv->dma_buf_sz,
3480                                                  DMA_FROM_DEVICE);
3481                         }
3482
3483                         if (netif_msg_pktdata(priv)) {
3484                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3485                                            frame_len);
3486                                 print_pkt(skb->data, frame_len);
3487                         }
3488
3489                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3490
3491                         stmmac_rx_vlan(priv->dev, skb);
3492
3493                         skb->protocol = eth_type_trans(skb, priv->dev);
3494
3495                         if (unlikely(!coe))
3496                                 skb_checksum_none_assert(skb);
3497                         else
3498                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3499
3500                         napi_gro_receive(&ch->napi, skb);
3501
3502                         priv->dev->stats.rx_packets++;
3503                         priv->dev->stats.rx_bytes += frame_len;
3504                 }
3505         }
3506
3507         stmmac_rx_refill(priv, queue);
3508
3509         priv->xstats.rx_pkt_n += count;
3510
3511         return count;
3512 }
3513
3514 /**
3515  *  stmmac_poll - stmmac poll method (NAPI)
3516  *  @napi : pointer to the napi structure.
3517  *  @budget : maximum number of packets that the current CPU can receive from
3518  *            all interfaces.
3519  *  Description :
3520  *  To look at the incoming frames and clear the tx resources.
3521  */
3522 static int stmmac_napi_poll(struct napi_struct *napi, int budget)
3523 {
3524         struct stmmac_channel *ch =
3525                 container_of(napi, struct stmmac_channel, napi);
3526         struct stmmac_priv *priv = ch->priv_data;
3527         int work_done, rx_done = 0, tx_done = 0;
3528         u32 chan = ch->index;
3529
3530         priv->xstats.napi_poll++;
3531
3532         if (ch->has_tx)
3533                 tx_done = stmmac_tx_clean(priv, budget, chan);
3534         if (ch->has_rx)
3535                 rx_done = stmmac_rx(priv, budget, chan);
3536
3537         work_done = max(rx_done, tx_done);
3538         work_done = min(work_done, budget);
3539
3540         if (work_done < budget && napi_complete_done(napi, work_done)) {
3541                 int stat;
3542
3543                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3544                 stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3545                                                    &priv->xstats, chan);
3546                 if (stat && napi_reschedule(napi))
3547                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
3548         }
3549
3550         return work_done;
3551 }
3552
3553 /**
3554  *  stmmac_tx_timeout
3555  *  @dev : Pointer to net device structure
3556  *  Description: this function is called when a packet transmission fails to
3557  *   complete within a reasonable time. The driver will mark the error in the
3558  *   netdev structure and arrange for the device to be reset to a sane state
3559  *   in order to transmit a new packet.
3560  */
3561 static void stmmac_tx_timeout(struct net_device *dev)
3562 {
3563         struct stmmac_priv *priv = netdev_priv(dev);
3564
3565         stmmac_global_err(priv);
3566 }
3567
3568 /**
3569  *  stmmac_set_rx_mode - entry point for multicast addressing
3570  *  @dev : pointer to the device structure
3571  *  Description:
3572  *  This function is a driver entry point which gets called by the kernel
3573  *  whenever multicast addresses must be enabled/disabled.
3574  *  Return value:
3575  *  void.
3576  */
3577 static void stmmac_set_rx_mode(struct net_device *dev)
3578 {
3579         struct stmmac_priv *priv = netdev_priv(dev);
3580
3581         stmmac_set_filter(priv, priv->hw, dev);
3582 }
3583
3584 /**
3585  *  stmmac_change_mtu - entry point to change MTU size for the device.
3586  *  @dev : device pointer.
3587  *  @new_mtu : the new MTU size for the device.
3588  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3589  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3590  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3591  *  Return value:
3592  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3593  *  file on failure.
3594  */
3595 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3596 {
3597         struct stmmac_priv *priv = netdev_priv(dev);
3598         int txfifosz = priv->plat->tx_fifo_size;
3599         const int mtu = new_mtu;
3600
3601         if (txfifosz == 0)
3602                 txfifosz = priv->dma_cap.tx_fifo_size;
3603
3604         txfifosz /= priv->plat->tx_queues_to_use;
3605
3606         if (netif_running(dev)) {
3607                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3608                 return -EBUSY;
3609         }
3610
3611         new_mtu = STMMAC_ALIGN(new_mtu);
3612
3613         /* If condition true, FIFO is too small or MTU too large */
3614         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
3615                 return -EINVAL;
3616
3617         dev->mtu = mtu;
3618
3619         netdev_update_features(dev);
3620
3621         return 0;
3622 }
3623
3624 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3625                                              netdev_features_t features)
3626 {
3627         struct stmmac_priv *priv = netdev_priv(dev);
3628
3629         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3630                 features &= ~NETIF_F_RXCSUM;
3631
3632         if (!priv->plat->tx_coe)
3633                 features &= ~NETIF_F_CSUM_MASK;
3634
3635         /* Some GMAC devices have a bugged Jumbo frame support that
3636          * needs to have the Tx COE disabled for oversized frames
3637          * (due to limited buffer sizes). In this case we disable
3638          * the TX csum insertion in the TDES and not use SF.
3639          */
3640         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3641                 features &= ~NETIF_F_CSUM_MASK;
3642
3643         /* Disable tso if asked by ethtool */
3644         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3645                 if (features & NETIF_F_TSO)
3646                         priv->tso = true;
3647                 else
3648                         priv->tso = false;
3649         }
3650
3651         return features;
3652 }
3653
3654 static int stmmac_set_features(struct net_device *netdev,
3655                                netdev_features_t features)
3656 {
3657         struct stmmac_priv *priv = netdev_priv(netdev);
3658
3659         /* Keep the COE Type in case of csum is supporting */
3660         if (features & NETIF_F_RXCSUM)
3661                 priv->hw->rx_csum = priv->plat->rx_coe;
3662         else
3663                 priv->hw->rx_csum = 0;
3664         /* No check needed because rx_coe has been set before and it will be
3665          * fixed in case of issue.
3666          */
3667         stmmac_rx_ipc(priv, priv->hw);
3668
3669         return 0;
3670 }
3671
3672 /**
3673  *  stmmac_interrupt - main ISR
3674  *  @irq: interrupt number.
3675  *  @dev_id: to pass the net device pointer (must be valid).
3676  *  Description: this is the main driver interrupt service routine.
3677  *  It can call:
3678  *  o DMA service routine (to manage incoming frame reception and transmission
3679  *    status)
3680  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3681  *    interrupts.
3682  */
3683 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3684 {
3685         struct net_device *dev = (struct net_device *)dev_id;
3686         struct stmmac_priv *priv = netdev_priv(dev);
3687         u32 rx_cnt = priv->plat->rx_queues_to_use;
3688         u32 tx_cnt = priv->plat->tx_queues_to_use;
3689         u32 queues_count;
3690         u32 queue;
3691         bool xmac;
3692
3693         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3694         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3695
3696         if (priv->irq_wake)
3697                 pm_wakeup_event(priv->device, 0);
3698
3699         /* Check if adapter is up */
3700         if (test_bit(STMMAC_DOWN, &priv->state))
3701                 return IRQ_HANDLED;
3702         /* Check if a fatal error happened */
3703         if (stmmac_safety_feat_interrupt(priv))
3704                 return IRQ_HANDLED;
3705
3706         /* To handle GMAC own interrupts */
3707         if ((priv->plat->has_gmac) || xmac) {
3708                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3709
3710                 if (unlikely(status)) {
3711                         /* For LPI we need to save the tx status */
3712                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3713                                 priv->tx_path_in_lpi_mode = true;
3714                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3715                                 priv->tx_path_in_lpi_mode = false;
3716                 }
3717
3718                 for (queue = 0; queue < queues_count; queue++) {
3719                         status = stmmac_host_mtl_irq_status(priv, priv->hw,
3720                                                             queue);
3721                 }
3722
3723                 /* PCS link status */
3724                 if (priv->hw->pcs) {
3725                         if (priv->xstats.pcs_link)
3726                                 netif_carrier_on(dev);
3727                         else
3728                                 netif_carrier_off(dev);
3729                 }
3730         }
3731
3732         /* To handle DMA interrupts */
3733         stmmac_dma_interrupt(priv);
3734
3735         return IRQ_HANDLED;
3736 }
3737
3738 #ifdef CONFIG_NET_POLL_CONTROLLER
3739 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3740  * to allow network I/O with interrupts disabled.
3741  */
3742 static void stmmac_poll_controller(struct net_device *dev)
3743 {
3744         disable_irq(dev->irq);
3745         stmmac_interrupt(dev->irq, dev);
3746         enable_irq(dev->irq);
3747 }
3748 #endif
3749
3750 /**
3751  *  stmmac_ioctl - Entry point for the Ioctl
3752  *  @dev: Device pointer.
3753  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3754  *  a proprietary structure used to pass information to the driver.
3755  *  @cmd: IOCTL command
3756  *  Description:
3757  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3758  */
3759 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3760 {
3761         int ret = -EOPNOTSUPP;
3762
3763         if (!netif_running(dev))
3764                 return -EINVAL;
3765
3766         switch (cmd) {
3767         case SIOCGMIIPHY:
3768         case SIOCGMIIREG:
3769         case SIOCSMIIREG:
3770                 if (!dev->phydev)
3771                         return -EINVAL;
3772                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3773                 break;
3774         case SIOCSHWTSTAMP:
3775                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3776                 break;
3777         default:
3778                 break;
3779         }
3780
3781         return ret;
3782 }
3783
3784 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3785                                     void *cb_priv)
3786 {
3787         struct stmmac_priv *priv = cb_priv;
3788         int ret = -EOPNOTSUPP;
3789
3790         stmmac_disable_all_queues(priv);
3791
3792         switch (type) {
3793         case TC_SETUP_CLSU32:
3794                 if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3795                         ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3796                 break;
3797         default:
3798                 break;
3799         }
3800
3801         stmmac_enable_all_queues(priv);
3802         return ret;
3803 }
3804
3805 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3806                                  struct tc_block_offload *f)
3807 {
3808         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3809                 return -EOPNOTSUPP;
3810
3811         switch (f->command) {
3812         case TC_BLOCK_BIND:
3813                 return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3814                                 priv, priv, f->extack);
3815         case TC_BLOCK_UNBIND:
3816                 tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3817                 return 0;
3818         default:
3819                 return -EOPNOTSUPP;
3820         }
3821 }
3822
3823 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3824                            void *type_data)
3825 {
3826         struct stmmac_priv *priv = netdev_priv(ndev);
3827
3828         switch (type) {
3829         case TC_SETUP_BLOCK:
3830                 return stmmac_setup_tc_block(priv, type_data);
3831         case TC_SETUP_QDISC_CBS:
3832                 return stmmac_tc_setup_cbs(priv, priv, type_data);
3833         default:
3834                 return -EOPNOTSUPP;
3835         }
3836 }
3837
3838 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
3839                                struct net_device *sb_dev,
3840                                select_queue_fallback_t fallback)
3841 {
3842         if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3843                 /*
3844                  * There is no way to determine the number of TSO
3845                  * capable Queues. Let's use always the Queue 0
3846                  * because if TSO is supported then at least this
3847                  * one will be capable.
3848                  */
3849                 return 0;
3850         }
3851
3852         return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
3853 }
3854
3855 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3856 {
3857         struct stmmac_priv *priv = netdev_priv(ndev);
3858         int ret = 0;
3859
3860         ret = eth_mac_addr(ndev, addr);
3861         if (ret)
3862                 return ret;
3863
3864         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3865
3866         return ret;
3867 }
3868
3869 #ifdef CONFIG_DEBUG_FS
3870 static struct dentry *stmmac_fs_dir;
3871
3872 static void sysfs_display_ring(void *head, int size, int extend_desc,
3873                                struct seq_file *seq)
3874 {
3875         int i;
3876         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3877         struct dma_desc *p = (struct dma_desc *)head;
3878
3879         for (i = 0; i < size; i++) {
3880                 if (extend_desc) {
3881                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3882                                    i, (unsigned int)virt_to_phys(ep),
3883                                    le32_to_cpu(ep->basic.des0),
3884                                    le32_to_cpu(ep->basic.des1),
3885                                    le32_to_cpu(ep->basic.des2),
3886                                    le32_to_cpu(ep->basic.des3));
3887                         ep++;
3888                 } else {
3889                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3890                                    i, (unsigned int)virt_to_phys(p),
3891                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3892                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3893                         p++;
3894                 }
3895                 seq_printf(seq, "\n");
3896         }
3897 }
3898
3899 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3900 {
3901         struct net_device *dev = seq->private;
3902         struct stmmac_priv *priv = netdev_priv(dev);
3903         u32 rx_count = priv->plat->rx_queues_to_use;
3904         u32 tx_count = priv->plat->tx_queues_to_use;
3905         u32 queue;
3906
3907         if ((dev->flags & IFF_UP) == 0)
3908                 return 0;
3909
3910         for (queue = 0; queue < rx_count; queue++) {
3911                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3912
3913                 seq_printf(seq, "RX Queue %d:\n", queue);
3914
3915                 if (priv->extend_desc) {
3916                         seq_printf(seq, "Extended descriptor ring:\n");
3917                         sysfs_display_ring((void *)rx_q->dma_erx,
3918                                            DMA_RX_SIZE, 1, seq);
3919                 } else {
3920                         seq_printf(seq, "Descriptor ring:\n");
3921                         sysfs_display_ring((void *)rx_q->dma_rx,
3922                                            DMA_RX_SIZE, 0, seq);
3923                 }
3924         }
3925
3926         for (queue = 0; queue < tx_count; queue++) {
3927                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3928
3929                 seq_printf(seq, "TX Queue %d:\n", queue);
3930
3931                 if (priv->extend_desc) {
3932                         seq_printf(seq, "Extended descriptor ring:\n");
3933                         sysfs_display_ring((void *)tx_q->dma_etx,
3934                                            DMA_TX_SIZE, 1, seq);
3935                 } else {
3936                         seq_printf(seq, "Descriptor ring:\n");
3937                         sysfs_display_ring((void *)tx_q->dma_tx,
3938                                            DMA_TX_SIZE, 0, seq);
3939                 }
3940         }
3941
3942         return 0;
3943 }
3944
3945 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3946 {
3947         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3948 }
3949
3950 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3951
3952 static const struct file_operations stmmac_rings_status_fops = {
3953         .owner = THIS_MODULE,
3954         .open = stmmac_sysfs_ring_open,
3955         .read = seq_read,
3956         .llseek = seq_lseek,
3957         .release = single_release,
3958 };
3959
3960 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3961 {
3962         struct net_device *dev = seq->private;
3963         struct stmmac_priv *priv = netdev_priv(dev);
3964
3965         if (!priv->hw_cap_support) {
3966                 seq_printf(seq, "DMA HW features not supported\n");
3967                 return 0;
3968         }
3969
3970         seq_printf(seq, "==============================\n");
3971         seq_printf(seq, "\tDMA HW features\n");
3972         seq_printf(seq, "==============================\n");
3973
3974         seq_printf(seq, "\t10/100 Mbps: %s\n",
3975                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3976         seq_printf(seq, "\t1000 Mbps: %s\n",
3977                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3978         seq_printf(seq, "\tHalf duplex: %s\n",
3979                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3980         seq_printf(seq, "\tHash Filter: %s\n",
3981                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3982         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3983                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3984         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3985                    (priv->dma_cap.pcs) ? "Y" : "N");
3986         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3987                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3988         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3989                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3990         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3991                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3992         seq_printf(seq, "\tRMON module: %s\n",
3993                    (priv->dma_cap.rmon) ? "Y" : "N");
3994         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3995                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3996         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3997                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3998         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3999                    (priv->dma_cap.eee) ? "Y" : "N");
4000         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4001         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4002                    (priv->dma_cap.tx_coe) ? "Y" : "N");
4003         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4004                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4005                            (priv->dma_cap.rx_coe) ? "Y" : "N");
4006         } else {
4007                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4008                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4009                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4010                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4011         }
4012         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4013                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4014         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4015                    priv->dma_cap.number_rx_channel);
4016         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4017                    priv->dma_cap.number_tx_channel);
4018         seq_printf(seq, "\tEnhanced descriptors: %s\n",
4019                    (priv->dma_cap.enh_desc) ? "Y" : "N");
4020
4021         return 0;
4022 }
4023
4024 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
4025 {
4026         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
4027 }
4028
4029 static const struct file_operations stmmac_dma_cap_fops = {
4030         .owner = THIS_MODULE,
4031         .open = stmmac_sysfs_dma_cap_open,
4032         .read = seq_read,
4033         .llseek = seq_lseek,
4034         .release = single_release,
4035 };
4036
4037 static int stmmac_init_fs(struct net_device *dev)
4038 {
4039         struct stmmac_priv *priv = netdev_priv(dev);
4040
4041         /* Create per netdev entries */
4042         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4043
4044         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4045                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4046
4047                 return -ENOMEM;
4048         }
4049
4050         /* Entry to report DMA RX/TX rings */
4051         priv->dbgfs_rings_status =
4052                 debugfs_create_file("descriptors_status", 0444,
4053                                     priv->dbgfs_dir, dev,
4054                                     &stmmac_rings_status_fops);
4055
4056         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4057                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4058                 debugfs_remove_recursive(priv->dbgfs_dir);
4059
4060                 return -ENOMEM;
4061         }
4062
4063         /* Entry to report the DMA HW features */
4064         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4065                                                   priv->dbgfs_dir,
4066                                                   dev, &stmmac_dma_cap_fops);
4067
4068         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4069                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4070                 debugfs_remove_recursive(priv->dbgfs_dir);
4071
4072                 return -ENOMEM;
4073         }
4074
4075         return 0;
4076 }
4077
4078 static void stmmac_exit_fs(struct net_device *dev)
4079 {
4080         struct stmmac_priv *priv = netdev_priv(dev);
4081
4082         debugfs_remove_recursive(priv->dbgfs_dir);
4083 }
4084 #endif /* CONFIG_DEBUG_FS */
4085
4086 static const struct net_device_ops stmmac_netdev_ops = {
4087         .ndo_open = stmmac_open,
4088         .ndo_start_xmit = stmmac_xmit,
4089         .ndo_stop = stmmac_release,
4090         .ndo_change_mtu = stmmac_change_mtu,
4091         .ndo_fix_features = stmmac_fix_features,
4092         .ndo_set_features = stmmac_set_features,
4093         .ndo_set_rx_mode = stmmac_set_rx_mode,
4094         .ndo_tx_timeout = stmmac_tx_timeout,
4095         .ndo_do_ioctl = stmmac_ioctl,
4096         .ndo_setup_tc = stmmac_setup_tc,
4097         .ndo_select_queue = stmmac_select_queue,
4098 #ifdef CONFIG_NET_POLL_CONTROLLER
4099         .ndo_poll_controller = stmmac_poll_controller,
4100 #endif
4101         .ndo_set_mac_address = stmmac_set_mac_address,
4102 };
4103
4104 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4105 {
4106         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4107                 return;
4108         if (test_bit(STMMAC_DOWN, &priv->state))
4109                 return;
4110
4111         netdev_err(priv->dev, "Reset adapter.\n");
4112
4113         rtnl_lock();
4114         netif_trans_update(priv->dev);
4115         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4116                 usleep_range(1000, 2000);
4117
4118         set_bit(STMMAC_DOWN, &priv->state);
4119         dev_close(priv->dev);
4120         dev_open(priv->dev);
4121         clear_bit(STMMAC_DOWN, &priv->state);
4122         clear_bit(STMMAC_RESETING, &priv->state);
4123         rtnl_unlock();
4124 }
4125
4126 static void stmmac_service_task(struct work_struct *work)
4127 {
4128         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4129                         service_task);
4130
4131         stmmac_reset_subtask(priv);
4132         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4133 }
4134
4135 /**
4136  *  stmmac_hw_init - Init the MAC device
4137  *  @priv: driver private structure
4138  *  Description: this function is to configure the MAC device according to
4139  *  some platform parameters or the HW capability register. It prepares the
4140  *  driver to use either ring or chain modes and to setup either enhanced or
4141  *  normal descriptors.
4142  */
4143 static int stmmac_hw_init(struct stmmac_priv *priv)
4144 {
4145         int ret;
4146
4147         /* dwmac-sun8i only work in chain mode */
4148         if (priv->plat->has_sun8i)
4149                 chain_mode = 1;
4150         priv->chain_mode = chain_mode;
4151
4152         /* Initialize HW Interface */
4153         ret = stmmac_hwif_init(priv);
4154         if (ret)
4155                 return ret;
4156
4157         /* Get the HW capability (new GMAC newer than 3.50a) */
4158         priv->hw_cap_support = stmmac_get_hw_features(priv);
4159         if (priv->hw_cap_support) {
4160                 dev_info(priv->device, "DMA HW capability register supported\n");
4161
4162                 /* We can override some gmac/dma configuration fields: e.g.
4163                  * enh_desc, tx_coe (e.g. that are passed through the
4164                  * platform) with the values from the HW capability
4165                  * register (if supported).
4166                  */
4167                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4168                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4169                 priv->hw->pmt = priv->plat->pmt;
4170
4171                 /* TXCOE doesn't work in thresh DMA mode */
4172                 if (priv->plat->force_thresh_dma_mode)
4173                         priv->plat->tx_coe = 0;
4174                 else
4175                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4176
4177                 /* In case of GMAC4 rx_coe is from HW cap register. */
4178                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4179
4180                 if (priv->dma_cap.rx_coe_type2)
4181                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4182                 else if (priv->dma_cap.rx_coe_type1)
4183                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4184
4185         } else {
4186                 dev_info(priv->device, "No HW DMA feature register supported\n");
4187         }
4188
4189         if (priv->plat->rx_coe) {
4190                 priv->hw->rx_csum = priv->plat->rx_coe;
4191                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4192                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4193                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4194         }
4195         if (priv->plat->tx_coe)
4196                 dev_info(priv->device, "TX Checksum insertion supported\n");
4197
4198         if (priv->plat->pmt) {
4199                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4200                 device_set_wakeup_capable(priv->device, 1);
4201         }
4202
4203         if (priv->dma_cap.tsoen)
4204                 dev_info(priv->device, "TSO supported\n");
4205
4206         /* Run HW quirks, if any */
4207         if (priv->hwif_quirks) {
4208                 ret = priv->hwif_quirks(priv);
4209                 if (ret)
4210                         return ret;
4211         }
4212
4213         /* Rx Watchdog is available in the COREs newer than the 3.40.
4214          * In some case, for example on bugged HW this feature
4215          * has to be disable and this can be done by passing the
4216          * riwt_off field from the platform.
4217          */
4218         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4219             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4220                 priv->use_riwt = 1;
4221                 dev_info(priv->device,
4222                          "Enable RX Mitigation via HW Watchdog Timer\n");
4223         }
4224
4225         return 0;
4226 }
4227
4228 /**
4229  * stmmac_dvr_probe
4230  * @device: device pointer
4231  * @plat_dat: platform data pointer
4232  * @res: stmmac resource pointer
4233  * Description: this is the main probe function used to
4234  * call the alloc_etherdev, allocate the priv structure.
4235  * Return:
4236  * returns 0 on success, otherwise errno.
4237  */
4238 int stmmac_dvr_probe(struct device *device,
4239                      struct plat_stmmacenet_data *plat_dat,
4240                      struct stmmac_resources *res)
4241 {
4242         struct net_device *ndev = NULL;
4243         struct stmmac_priv *priv;
4244         u32 queue, maxq;
4245         int ret = 0;
4246
4247         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4248                                   MTL_MAX_TX_QUEUES,
4249                                   MTL_MAX_RX_QUEUES);
4250         if (!ndev)
4251                 return -ENOMEM;
4252
4253         SET_NETDEV_DEV(ndev, device);
4254
4255         priv = netdev_priv(ndev);
4256         priv->device = device;
4257         priv->dev = ndev;
4258
4259         stmmac_set_ethtool_ops(ndev);
4260         priv->pause = pause;
4261         priv->plat = plat_dat;
4262         priv->ioaddr = res->addr;
4263         priv->dev->base_addr = (unsigned long)res->addr;
4264
4265         priv->dev->irq = res->irq;
4266         priv->wol_irq = res->wol_irq;
4267         priv->lpi_irq = res->lpi_irq;
4268
4269         if (res->mac)
4270                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4271
4272         dev_set_drvdata(device, priv->dev);
4273
4274         /* Verify driver arguments */
4275         stmmac_verify_args();
4276
4277         /* Allocate workqueue */
4278         priv->wq = create_singlethread_workqueue("stmmac_wq");
4279         if (!priv->wq) {
4280                 dev_err(priv->device, "failed to create workqueue\n");
4281                 ret = -ENOMEM;
4282                 goto error_wq;
4283         }
4284
4285         INIT_WORK(&priv->service_task, stmmac_service_task);
4286
4287         /* Override with kernel parameters if supplied XXX CRS XXX
4288          * this needs to have multiple instances
4289          */
4290         if ((phyaddr >= 0) && (phyaddr <= 31))
4291                 priv->plat->phy_addr = phyaddr;
4292
4293         if (priv->plat->stmmac_rst) {
4294                 ret = reset_control_assert(priv->plat->stmmac_rst);
4295                 reset_control_deassert(priv->plat->stmmac_rst);
4296                 /* Some reset controllers have only reset callback instead of
4297                  * assert + deassert callbacks pair.
4298                  */
4299                 if (ret == -ENOTSUPP)
4300                         reset_control_reset(priv->plat->stmmac_rst);
4301         }
4302
4303         /* Init MAC and get the capabilities */
4304         ret = stmmac_hw_init(priv);
4305         if (ret)
4306                 goto error_hw_init;
4307
4308         stmmac_check_ether_addr(priv);
4309
4310         /* Configure real RX and TX queues */
4311         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4312         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4313
4314         ndev->netdev_ops = &stmmac_netdev_ops;
4315
4316         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4317                             NETIF_F_RXCSUM;
4318
4319         ret = stmmac_tc_init(priv, priv);
4320         if (!ret) {
4321                 ndev->hw_features |= NETIF_F_HW_TC;
4322         }
4323
4324         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4325                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4326                 priv->tso = true;
4327                 dev_info(priv->device, "TSO feature enabled\n");
4328         }
4329         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4330         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4331 #ifdef STMMAC_VLAN_TAG_USED
4332         /* Both mac100 and gmac support receive VLAN tag detection */
4333         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4334 #endif
4335         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4336
4337         /* MTU range: 46 - hw-specific max */
4338         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4339         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4340                 ndev->max_mtu = JUMBO_LEN;
4341         else if (priv->plat->has_xgmac)
4342                 ndev->max_mtu = XGMAC_JUMBO_LEN;
4343         else
4344                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4345         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4346          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4347          */
4348         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4349             (priv->plat->maxmtu >= ndev->min_mtu))
4350                 ndev->max_mtu = priv->plat->maxmtu;
4351         else if (priv->plat->maxmtu < ndev->min_mtu)
4352                 dev_warn(priv->device,
4353                          "%s: warning: maxmtu having invalid value (%d)\n",
4354                          __func__, priv->plat->maxmtu);
4355
4356         if (flow_ctrl)
4357                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4358
4359         /* Setup channels NAPI */
4360         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4361
4362         for (queue = 0; queue < maxq; queue++) {
4363                 struct stmmac_channel *ch = &priv->channel[queue];
4364
4365                 ch->priv_data = priv;
4366                 ch->index = queue;
4367
4368                 if (queue < priv->plat->rx_queues_to_use)
4369                         ch->has_rx = true;
4370                 if (queue < priv->plat->tx_queues_to_use)
4371                         ch->has_tx = true;
4372
4373                 netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
4374                                NAPI_POLL_WEIGHT);
4375         }
4376
4377         mutex_init(&priv->lock);
4378
4379         /* If a specific clk_csr value is passed from the platform
4380          * this means that the CSR Clock Range selection cannot be
4381          * changed at run-time and it is fixed. Viceversa the driver'll try to
4382          * set the MDC clock dynamically according to the csr actual
4383          * clock input.
4384          */
4385         if (!priv->plat->clk_csr)
4386                 stmmac_clk_csr_set(priv);
4387         else
4388                 priv->clk_csr = priv->plat->clk_csr;
4389
4390         stmmac_check_pcs_mode(priv);
4391
4392         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4393             priv->hw->pcs != STMMAC_PCS_TBI &&
4394             priv->hw->pcs != STMMAC_PCS_RTBI) {
4395                 /* MDIO bus Registration */
4396                 ret = stmmac_mdio_register(ndev);
4397                 if (ret < 0) {
4398                         dev_err(priv->device,
4399                                 "%s: MDIO bus (id: %d) registration failed",
4400                                 __func__, priv->plat->bus_id);
4401                         goto error_mdio_register;
4402                 }
4403         }
4404
4405         ret = register_netdev(ndev);
4406         if (ret) {
4407                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4408                         __func__, ret);
4409                 goto error_netdev_register;
4410         }
4411
4412 #ifdef CONFIG_DEBUG_FS
4413         ret = stmmac_init_fs(ndev);
4414         if (ret < 0)
4415                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4416                             __func__);
4417 #endif
4418
4419         return ret;
4420
4421 error_netdev_register:
4422         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4423             priv->hw->pcs != STMMAC_PCS_TBI &&
4424             priv->hw->pcs != STMMAC_PCS_RTBI)
4425                 stmmac_mdio_unregister(ndev);
4426 error_mdio_register:
4427         for (queue = 0; queue < maxq; queue++) {
4428                 struct stmmac_channel *ch = &priv->channel[queue];
4429
4430                 netif_napi_del(&ch->napi);
4431         }
4432 error_hw_init:
4433         destroy_workqueue(priv->wq);
4434 error_wq:
4435         free_netdev(ndev);
4436
4437         return ret;
4438 }
4439 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4440
4441 /**
4442  * stmmac_dvr_remove
4443  * @dev: device pointer
4444  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4445  * changes the link status, releases the DMA descriptor rings.
4446  */
4447 int stmmac_dvr_remove(struct device *dev)
4448 {
4449         struct net_device *ndev = dev_get_drvdata(dev);
4450         struct stmmac_priv *priv = netdev_priv(ndev);
4451
4452         netdev_info(priv->dev, "%s: removing driver", __func__);
4453
4454 #ifdef CONFIG_DEBUG_FS
4455         stmmac_exit_fs(ndev);
4456 #endif
4457         stmmac_stop_all_dma(priv);
4458
4459         stmmac_mac_set(priv, priv->ioaddr, false);
4460         netif_carrier_off(ndev);
4461         unregister_netdev(ndev);
4462         if (priv->plat->stmmac_rst)
4463                 reset_control_assert(priv->plat->stmmac_rst);
4464         clk_disable_unprepare(priv->plat->pclk);
4465         clk_disable_unprepare(priv->plat->stmmac_clk);
4466         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4467             priv->hw->pcs != STMMAC_PCS_TBI &&
4468             priv->hw->pcs != STMMAC_PCS_RTBI)
4469                 stmmac_mdio_unregister(ndev);
4470         destroy_workqueue(priv->wq);
4471         mutex_destroy(&priv->lock);
4472         free_netdev(ndev);
4473
4474         return 0;
4475 }
4476 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4477
4478 /**
4479  * stmmac_suspend - suspend callback
4480  * @dev: device pointer
4481  * Description: this is the function to suspend the device and it is called
4482  * by the platform driver to stop the network queue, release the resources,
4483  * program the PMT register (for WoL), clean and release driver resources.
4484  */
4485 int stmmac_suspend(struct device *dev)
4486 {
4487         struct net_device *ndev = dev_get_drvdata(dev);
4488         struct stmmac_priv *priv = netdev_priv(ndev);
4489         u32 chan;
4490
4491         if (!ndev || !netif_running(ndev))
4492                 return 0;
4493
4494         if (ndev->phydev)
4495                 phy_stop(ndev->phydev);
4496
4497         mutex_lock(&priv->lock);
4498
4499         netif_device_detach(ndev);
4500
4501         stmmac_disable_all_queues(priv);
4502
4503         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4504                 del_timer_sync(&priv->tx_queue[chan].txtimer);
4505
4506         if (priv->eee_enabled) {
4507                 priv->tx_path_in_lpi_mode = false;
4508                 del_timer_sync(&priv->eee_ctrl_timer);
4509         }
4510
4511         /* Stop TX/RX DMA */
4512         stmmac_stop_all_dma(priv);
4513
4514         /* Enable Power down mode by programming the PMT regs */
4515         if (device_may_wakeup(priv->device)) {
4516                 stmmac_pmt(priv, priv->hw, priv->wolopts);
4517                 priv->irq_wake = 1;
4518         } else {
4519                 stmmac_mac_set(priv, priv->ioaddr, false);
4520                 pinctrl_pm_select_sleep_state(priv->device);
4521                 /* Disable clock in case of PWM is off */
4522                 if (priv->plat->clk_ptp_ref)
4523                         clk_disable_unprepare(priv->plat->clk_ptp_ref);
4524                 clk_disable_unprepare(priv->plat->pclk);
4525                 clk_disable_unprepare(priv->plat->stmmac_clk);
4526         }
4527         mutex_unlock(&priv->lock);
4528
4529         priv->oldlink = false;
4530         priv->speed = SPEED_UNKNOWN;
4531         priv->oldduplex = DUPLEX_UNKNOWN;
4532         return 0;
4533 }
4534 EXPORT_SYMBOL_GPL(stmmac_suspend);
4535
4536 /**
4537  * stmmac_reset_queues_param - reset queue parameters
4538  * @dev: device pointer
4539  */
4540 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4541 {
4542         u32 rx_cnt = priv->plat->rx_queues_to_use;
4543         u32 tx_cnt = priv->plat->tx_queues_to_use;
4544         u32 queue;
4545
4546         for (queue = 0; queue < rx_cnt; queue++) {
4547                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4548
4549                 rx_q->cur_rx = 0;
4550                 rx_q->dirty_rx = 0;
4551         }
4552
4553         for (queue = 0; queue < tx_cnt; queue++) {
4554                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4555
4556                 tx_q->cur_tx = 0;
4557                 tx_q->dirty_tx = 0;
4558                 tx_q->mss = 0;
4559
4560                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
4561         }
4562 }
4563
4564 /**
4565  * stmmac_resume - resume callback
4566  * @dev: device pointer
4567  * Description: when resume this function is invoked to setup the DMA and CORE
4568  * in a usable state.
4569  */
4570 int stmmac_resume(struct device *dev)
4571 {
4572         struct net_device *ndev = dev_get_drvdata(dev);
4573         struct stmmac_priv *priv = netdev_priv(ndev);
4574
4575         if (!netif_running(ndev))
4576                 return 0;
4577
4578         /* Power Down bit, into the PM register, is cleared
4579          * automatically as soon as a magic packet or a Wake-up frame
4580          * is received. Anyway, it's better to manually clear
4581          * this bit because it can generate problems while resuming
4582          * from another devices (e.g. serial console).
4583          */
4584         if (device_may_wakeup(priv->device)) {
4585                 mutex_lock(&priv->lock);
4586                 stmmac_pmt(priv, priv->hw, 0);
4587                 mutex_unlock(&priv->lock);
4588                 priv->irq_wake = 0;
4589         } else {
4590                 pinctrl_pm_select_default_state(priv->device);
4591                 /* enable the clk previously disabled */
4592                 clk_prepare_enable(priv->plat->stmmac_clk);
4593                 clk_prepare_enable(priv->plat->pclk);
4594                 if (priv->plat->clk_ptp_ref)
4595                         clk_prepare_enable(priv->plat->clk_ptp_ref);
4596                 /* reset the phy so that it's ready */
4597                 if (priv->mii)
4598                         stmmac_mdio_reset(priv->mii);
4599         }
4600
4601         netif_device_attach(ndev);
4602
4603         mutex_lock(&priv->lock);
4604
4605         stmmac_reset_queues_param(priv);
4606
4607         stmmac_free_tx_skbufs(priv);
4608         stmmac_clear_descriptors(priv);
4609
4610         stmmac_hw_setup(ndev, false);
4611         stmmac_init_tx_coalesce(priv);
4612         stmmac_set_rx_mode(ndev);
4613
4614         stmmac_enable_all_queues(priv);
4615
4616         mutex_unlock(&priv->lock);
4617
4618         if (ndev->phydev)
4619                 phy_start(ndev->phydev);
4620
4621         return 0;
4622 }
4623 EXPORT_SYMBOL_GPL(stmmac_resume);
4624
4625 #ifndef MODULE
4626 static int __init stmmac_cmdline_opt(char *str)
4627 {
4628         char *opt;
4629
4630         if (!str || !*str)
4631                 return 1;
4632         while ((opt = strsep(&str, ",")) != NULL) {
4633                 if (!strncmp(opt, "debug:", 6)) {
4634                         if (kstrtoint(opt + 6, 0, &debug))
4635                                 goto err;
4636                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4637                         if (kstrtoint(opt + 8, 0, &phyaddr))
4638                                 goto err;
4639                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4640                         if (kstrtoint(opt + 7, 0, &buf_sz))
4641                                 goto err;
4642                 } else if (!strncmp(opt, "tc:", 3)) {
4643                         if (kstrtoint(opt + 3, 0, &tc))
4644                                 goto err;
4645                 } else if (!strncmp(opt, "watchdog:", 9)) {
4646                         if (kstrtoint(opt + 9, 0, &watchdog))
4647                                 goto err;
4648                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4649                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4650                                 goto err;
4651                 } else if (!strncmp(opt, "pause:", 6)) {
4652                         if (kstrtoint(opt + 6, 0, &pause))
4653                                 goto err;
4654                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4655                         if (kstrtoint(opt + 10, 0, &eee_timer))
4656                                 goto err;
4657                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4658                         if (kstrtoint(opt + 11, 0, &chain_mode))
4659                                 goto err;
4660                 }
4661         }
4662         return 1;
4663
4664 err:
4665         pr_err("%s: ERROR broken module parameter conversion", __func__);
4666         return 1;
4667 }
4668
4669 __setup("stmmaceth=", stmmac_cmdline_opt);
4670 #endif /* MODULE */
4671
4672 static int __init stmmac_init(void)
4673 {
4674 #ifdef CONFIG_DEBUG_FS
4675         /* Create debugfs main directory if it doesn't exist yet */
4676         if (!stmmac_fs_dir) {
4677                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4678
4679                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4680                         pr_err("ERROR %s, debugfs create directory failed\n",
4681                                STMMAC_RESOURCE_NAME);
4682
4683                         return -ENOMEM;
4684                 }
4685         }
4686 #endif
4687
4688         return 0;
4689 }
4690
4691 static void __exit stmmac_exit(void)
4692 {
4693 #ifdef CONFIG_DEBUG_FS
4694         debugfs_remove_recursive(stmmac_fs_dir);
4695 #endif
4696 }
4697
4698 module_init(stmmac_init)
4699 module_exit(stmmac_exit)
4700
4701 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4702 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4703 MODULE_LICENSE("GPL");