GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5
6         Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11   Documentation available at:
12         http://www.stlinux.com
13   Support available at:
14         https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <net/pkt_cls.h>
42 #include "stmmac_ptp.h"
43 #include "stmmac.h"
44 #include <linux/reset.h>
45 #include <linux/of_mdio.h>
46 #include "dwmac1000.h"
47 #include "dwxgmac2.h"
48 #include "hwif.h"
49
50 /* As long as the interface is active, we keep the timestamping counter enabled
51  * with fine resolution and binary rollover. This avoid non-monotonic behavior
52  * (clock jumps) when changing timestamping settings at runtime.
53  */
54 #define STMMAC_HWTS_ACTIVE      (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
55                                  PTP_TCR_TSCTRLSSR)
56
57 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
58 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
59
60 /* Module parameters */
61 #define TX_TIMEO        5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73
74 #define STMMAC_TX_THRESH(x)     ((x)->dma_tx_size / 4)
75 #define STMMAC_RX_THRESH(x)     ((x)->dma_rx_size / 4)
76
77 static int flow_ctrl = FLOW_AUTO;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89
90 #define DEFAULT_BUFSIZE 1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94
95 #define STMMAC_RX_COPYBREAK     256
96
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
99                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100
101 #define STMMAC_DEFAULT_LPI_TIMER        1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
106
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115
116 #ifdef CONFIG_DEBUG_FS
117 static const struct net_device_ops stmmac_netdev_ops;
118 static void stmmac_init_fs(struct net_device *dev);
119 static void stmmac_exit_fs(struct net_device *dev);
120 #endif
121
122 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
123
124 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
125 {
126         int ret = 0;
127
128         if (enabled) {
129                 ret = clk_prepare_enable(priv->plat->stmmac_clk);
130                 if (ret)
131                         return ret;
132                 ret = clk_prepare_enable(priv->plat->pclk);
133                 if (ret) {
134                         clk_disable_unprepare(priv->plat->stmmac_clk);
135                         return ret;
136                 }
137         } else {
138                 clk_disable_unprepare(priv->plat->stmmac_clk);
139                 clk_disable_unprepare(priv->plat->pclk);
140         }
141
142         return ret;
143 }
144 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
145
146 /**
147  * stmmac_verify_args - verify the driver parameters.
148  * Description: it checks the driver parameters and set a default in case of
149  * errors.
150  */
151 static void stmmac_verify_args(void)
152 {
153         if (unlikely(watchdog < 0))
154                 watchdog = TX_TIMEO;
155         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
156                 buf_sz = DEFAULT_BUFSIZE;
157         if (unlikely(flow_ctrl > 1))
158                 flow_ctrl = FLOW_AUTO;
159         else if (likely(flow_ctrl < 0))
160                 flow_ctrl = FLOW_OFF;
161         if (unlikely((pause < 0) || (pause > 0xffff)))
162                 pause = PAUSE_TIME;
163         if (eee_timer < 0)
164                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
165 }
166
167 /**
168  * stmmac_disable_all_queues - Disable all queues
169  * @priv: driver private structure
170  */
171 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
172 {
173         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
174         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
175         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
176         u32 queue;
177
178         for (queue = 0; queue < maxq; queue++) {
179                 struct stmmac_channel *ch = &priv->channel[queue];
180
181                 if (queue < rx_queues_cnt)
182                         napi_disable(&ch->rx_napi);
183                 if (queue < tx_queues_cnt)
184                         napi_disable(&ch->tx_napi);
185         }
186 }
187
188 /**
189  * stmmac_enable_all_queues - Enable all queues
190  * @priv: driver private structure
191  */
192 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
193 {
194         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
195         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
196         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
197         u32 queue;
198
199         for (queue = 0; queue < maxq; queue++) {
200                 struct stmmac_channel *ch = &priv->channel[queue];
201
202                 if (queue < rx_queues_cnt)
203                         napi_enable(&ch->rx_napi);
204                 if (queue < tx_queues_cnt)
205                         napi_enable(&ch->tx_napi);
206         }
207 }
208
209 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
210 {
211         if (!test_bit(STMMAC_DOWN, &priv->state) &&
212             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
213                 queue_work(priv->wq, &priv->service_task);
214 }
215
216 static void stmmac_global_err(struct stmmac_priv *priv)
217 {
218         netif_carrier_off(priv->dev);
219         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
220         stmmac_service_event_schedule(priv);
221 }
222
223 /**
224  * stmmac_clk_csr_set - dynamically set the MDC clock
225  * @priv: driver private structure
226  * Description: this is to dynamically set the MDC clock according to the csr
227  * clock input.
228  * Note:
229  *      If a specific clk_csr value is passed from the platform
230  *      this means that the CSR Clock Range selection cannot be
231  *      changed at run-time and it is fixed (as reported in the driver
232  *      documentation). Viceversa the driver will try to set the MDC
233  *      clock dynamically according to the actual clock input.
234  */
235 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
236 {
237         u32 clk_rate;
238
239         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
240
241         /* Platform provided default clk_csr would be assumed valid
242          * for all other cases except for the below mentioned ones.
243          * For values higher than the IEEE 802.3 specified frequency
244          * we can not estimate the proper divider as it is not known
245          * the frequency of clk_csr_i. So we do not change the default
246          * divider.
247          */
248         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
249                 if (clk_rate < CSR_F_35M)
250                         priv->clk_csr = STMMAC_CSR_20_35M;
251                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
252                         priv->clk_csr = STMMAC_CSR_35_60M;
253                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
254                         priv->clk_csr = STMMAC_CSR_60_100M;
255                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
256                         priv->clk_csr = STMMAC_CSR_100_150M;
257                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
258                         priv->clk_csr = STMMAC_CSR_150_250M;
259                 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
260                         priv->clk_csr = STMMAC_CSR_250_300M;
261         }
262
263         if (priv->plat->has_sun8i) {
264                 if (clk_rate > 160000000)
265                         priv->clk_csr = 0x03;
266                 else if (clk_rate > 80000000)
267                         priv->clk_csr = 0x02;
268                 else if (clk_rate > 40000000)
269                         priv->clk_csr = 0x01;
270                 else
271                         priv->clk_csr = 0;
272         }
273
274         if (priv->plat->has_xgmac) {
275                 if (clk_rate > 400000000)
276                         priv->clk_csr = 0x5;
277                 else if (clk_rate > 350000000)
278                         priv->clk_csr = 0x4;
279                 else if (clk_rate > 300000000)
280                         priv->clk_csr = 0x3;
281                 else if (clk_rate > 250000000)
282                         priv->clk_csr = 0x2;
283                 else if (clk_rate > 150000000)
284                         priv->clk_csr = 0x1;
285                 else
286                         priv->clk_csr = 0x0;
287         }
288 }
289
290 static void print_pkt(unsigned char *buf, int len)
291 {
292         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
293         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
294 }
295
296 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
297 {
298         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
299         u32 avail;
300
301         if (tx_q->dirty_tx > tx_q->cur_tx)
302                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
303         else
304                 avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
305
306         return avail;
307 }
308
309 /**
310  * stmmac_rx_dirty - Get RX queue dirty
311  * @priv: driver private structure
312  * @queue: RX queue index
313  */
314 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
315 {
316         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
317         u32 dirty;
318
319         if (rx_q->dirty_rx <= rx_q->cur_rx)
320                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
321         else
322                 dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
323
324         return dirty;
325 }
326
327 /**
328  * stmmac_enable_eee_mode - check and enter in LPI mode
329  * @priv: driver private structure
330  * Description: this function is to verify and enter in LPI mode in case of
331  * EEE.
332  */
333 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
334 {
335         u32 tx_cnt = priv->plat->tx_queues_to_use;
336         u32 queue;
337
338         /* check if all TX queues have the work finished */
339         for (queue = 0; queue < tx_cnt; queue++) {
340                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
341
342                 if (tx_q->dirty_tx != tx_q->cur_tx)
343                         return; /* still unfinished work */
344         }
345
346         /* Check and enter in LPI mode */
347         if (!priv->tx_path_in_lpi_mode)
348                 stmmac_set_eee_mode(priv, priv->hw,
349                                 priv->plat->en_tx_lpi_clockgating);
350 }
351
352 /**
353  * stmmac_disable_eee_mode - disable and exit from LPI mode
354  * @priv: driver private structure
355  * Description: this function is to exit and disable EEE in case of
356  * LPI state is true. This is called by the xmit.
357  */
358 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
359 {
360         stmmac_reset_eee_mode(priv, priv->hw);
361         del_timer_sync(&priv->eee_ctrl_timer);
362         priv->tx_path_in_lpi_mode = false;
363 }
364
365 /**
366  * stmmac_eee_ctrl_timer - EEE TX SW timer.
367  * @t:  timer_list struct containing private info
368  * Description:
369  *  if there is no data transfer and if we are not in LPI state,
370  *  then MAC Transmitter can be moved to LPI state.
371  */
372 static void stmmac_eee_ctrl_timer(struct timer_list *t)
373 {
374         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
375
376         stmmac_enable_eee_mode(priv);
377         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
378 }
379
380 /**
381  * stmmac_eee_init - init EEE
382  * @priv: driver private structure
383  * Description:
384  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
385  *  can also manage EEE, this function enable the LPI state and start related
386  *  timer.
387  */
388 bool stmmac_eee_init(struct stmmac_priv *priv)
389 {
390         int eee_tw_timer = priv->eee_tw_timer;
391
392         /* Using PCS we cannot dial with the phy registers at this stage
393          * so we do not support extra feature like EEE.
394          */
395         if (priv->hw->pcs == STMMAC_PCS_TBI ||
396             priv->hw->pcs == STMMAC_PCS_RTBI)
397                 return false;
398
399         /* Check if MAC core supports the EEE feature. */
400         if (!priv->dma_cap.eee)
401                 return false;
402
403         mutex_lock(&priv->lock);
404
405         /* Check if it needs to be deactivated */
406         if (!priv->eee_active) {
407                 if (priv->eee_enabled) {
408                         netdev_dbg(priv->dev, "disable EEE\n");
409                         del_timer_sync(&priv->eee_ctrl_timer);
410                         stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
411                 }
412                 mutex_unlock(&priv->lock);
413                 return false;
414         }
415
416         if (priv->eee_active && !priv->eee_enabled) {
417                 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
418                 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
419                                      eee_tw_timer);
420         }
421
422         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
423
424         mutex_unlock(&priv->lock);
425         netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
426         return true;
427 }
428
429 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
430  * @priv: driver private structure
431  * @p : descriptor pointer
432  * @skb : the socket buffer
433  * Description :
434  * This function will read timestamp from the descriptor & pass it to stack.
435  * and also perform some sanity checks.
436  */
437 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
438                                    struct dma_desc *p, struct sk_buff *skb)
439 {
440         struct skb_shared_hwtstamps shhwtstamp;
441         bool found = false;
442         u64 ns = 0;
443
444         if (!priv->hwts_tx_en)
445                 return;
446
447         /* exit if skb doesn't support hw tstamp */
448         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
449                 return;
450
451         /* check tx tstamp status */
452         if (stmmac_get_tx_timestamp_status(priv, p)) {
453                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
454                 found = true;
455         } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
456                 found = true;
457         }
458
459         if (found) {
460                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
461                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
462
463                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
464                 /* pass tstamp to stack */
465                 skb_tstamp_tx(skb, &shhwtstamp);
466         }
467 }
468
469 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
470  * @priv: driver private structure
471  * @p : descriptor pointer
472  * @np : next descriptor pointer
473  * @skb : the socket buffer
474  * Description :
475  * This function will read received packet's timestamp from the descriptor
476  * and pass it to stack. It also perform some sanity checks.
477  */
478 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
479                                    struct dma_desc *np, struct sk_buff *skb)
480 {
481         struct skb_shared_hwtstamps *shhwtstamp = NULL;
482         struct dma_desc *desc = p;
483         u64 ns = 0;
484
485         if (!priv->hwts_rx_en)
486                 return;
487         /* For GMAC4, the valid timestamp is from CTX next desc. */
488         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
489                 desc = np;
490
491         /* Check if timestamp is available */
492         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
493                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
494                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
495                 shhwtstamp = skb_hwtstamps(skb);
496                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
497                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
498         } else  {
499                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
500         }
501 }
502
503 /**
504  *  stmmac_hwtstamp_set - control hardware timestamping.
505  *  @dev: device pointer.
506  *  @ifr: An IOCTL specific structure, that can contain a pointer to
507  *  a proprietary structure used to pass information to the driver.
508  *  Description:
509  *  This function configures the MAC to enable/disable both outgoing(TX)
510  *  and incoming(RX) packets time stamping based on user input.
511  *  Return Value:
512  *  0 on success and an appropriate -ve integer on failure.
513  */
514 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
515 {
516         struct stmmac_priv *priv = netdev_priv(dev);
517         struct hwtstamp_config config;
518         u32 ptp_v2 = 0;
519         u32 tstamp_all = 0;
520         u32 ptp_over_ipv4_udp = 0;
521         u32 ptp_over_ipv6_udp = 0;
522         u32 ptp_over_ethernet = 0;
523         u32 snap_type_sel = 0;
524         u32 ts_master_en = 0;
525         u32 ts_event_en = 0;
526
527         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
528                 netdev_alert(priv->dev, "No support for HW time stamping\n");
529                 priv->hwts_tx_en = 0;
530                 priv->hwts_rx_en = 0;
531
532                 return -EOPNOTSUPP;
533         }
534
535         if (copy_from_user(&config, ifr->ifr_data,
536                            sizeof(config)))
537                 return -EFAULT;
538
539         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
540                    __func__, config.flags, config.tx_type, config.rx_filter);
541
542         /* reserved for future extensions */
543         if (config.flags)
544                 return -EINVAL;
545
546         if (config.tx_type != HWTSTAMP_TX_OFF &&
547             config.tx_type != HWTSTAMP_TX_ON)
548                 return -ERANGE;
549
550         if (priv->adv_ts) {
551                 switch (config.rx_filter) {
552                 case HWTSTAMP_FILTER_NONE:
553                         /* time stamp no incoming packet at all */
554                         config.rx_filter = HWTSTAMP_FILTER_NONE;
555                         break;
556
557                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
558                         /* PTP v1, UDP, any kind of event packet */
559                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
560                         /* 'xmac' hardware can support Sync, Pdelay_Req and
561                          * Pdelay_resp by setting bit14 and bits17/16 to 01
562                          * This leaves Delay_Req timestamps out.
563                          * Enable all events *and* general purpose message
564                          * timestamping
565                          */
566                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
567                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
568                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
569                         break;
570
571                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
572                         /* PTP v1, UDP, Sync packet */
573                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
574                         /* take time stamp for SYNC messages only */
575                         ts_event_en = PTP_TCR_TSEVNTENA;
576
577                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
578                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
579                         break;
580
581                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
582                         /* PTP v1, UDP, Delay_req packet */
583                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
584                         /* take time stamp for Delay_Req messages only */
585                         ts_master_en = PTP_TCR_TSMSTRENA;
586                         ts_event_en = PTP_TCR_TSEVNTENA;
587
588                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
589                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
590                         break;
591
592                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
593                         /* PTP v2, UDP, any kind of event packet */
594                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
595                         ptp_v2 = PTP_TCR_TSVER2ENA;
596                         /* take time stamp for all event messages */
597                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
598
599                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
600                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
601                         break;
602
603                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
604                         /* PTP v2, UDP, Sync packet */
605                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
606                         ptp_v2 = PTP_TCR_TSVER2ENA;
607                         /* take time stamp for SYNC messages only */
608                         ts_event_en = PTP_TCR_TSEVNTENA;
609
610                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
611                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
612                         break;
613
614                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
615                         /* PTP v2, UDP, Delay_req packet */
616                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
617                         ptp_v2 = PTP_TCR_TSVER2ENA;
618                         /* take time stamp for Delay_Req messages only */
619                         ts_master_en = PTP_TCR_TSMSTRENA;
620                         ts_event_en = PTP_TCR_TSEVNTENA;
621
622                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
623                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
624                         break;
625
626                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
627                         /* PTP v2/802.AS1 any layer, any kind of event packet */
628                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
629                         ptp_v2 = PTP_TCR_TSVER2ENA;
630                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
631                         if (priv->synopsys_id < DWMAC_CORE_4_10)
632                                 ts_event_en = PTP_TCR_TSEVNTENA;
633                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
634                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
635                         ptp_over_ethernet = PTP_TCR_TSIPENA;
636                         break;
637
638                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
639                         /* PTP v2/802.AS1, any layer, Sync packet */
640                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
641                         ptp_v2 = PTP_TCR_TSVER2ENA;
642                         /* take time stamp for SYNC messages only */
643                         ts_event_en = PTP_TCR_TSEVNTENA;
644
645                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
646                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
647                         ptp_over_ethernet = PTP_TCR_TSIPENA;
648                         break;
649
650                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
651                         /* PTP v2/802.AS1, any layer, Delay_req packet */
652                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
653                         ptp_v2 = PTP_TCR_TSVER2ENA;
654                         /* take time stamp for Delay_Req messages only */
655                         ts_master_en = PTP_TCR_TSMSTRENA;
656                         ts_event_en = PTP_TCR_TSEVNTENA;
657
658                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
659                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
660                         ptp_over_ethernet = PTP_TCR_TSIPENA;
661                         break;
662
663                 case HWTSTAMP_FILTER_NTP_ALL:
664                 case HWTSTAMP_FILTER_ALL:
665                         /* time stamp any incoming packet */
666                         config.rx_filter = HWTSTAMP_FILTER_ALL;
667                         tstamp_all = PTP_TCR_TSENALL;
668                         break;
669
670                 default:
671                         return -ERANGE;
672                 }
673         } else {
674                 switch (config.rx_filter) {
675                 case HWTSTAMP_FILTER_NONE:
676                         config.rx_filter = HWTSTAMP_FILTER_NONE;
677                         break;
678                 default:
679                         /* PTP v1, UDP, any kind of event packet */
680                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
681                         break;
682                 }
683         }
684         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
685         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
686
687         priv->systime_flags = STMMAC_HWTS_ACTIVE;
688
689         if (priv->hwts_tx_en || priv->hwts_rx_en) {
690                 priv->systime_flags |= tstamp_all | ptp_v2 |
691                                        ptp_over_ethernet | ptp_over_ipv6_udp |
692                                        ptp_over_ipv4_udp | ts_event_en |
693                                        ts_master_en | snap_type_sel;
694         }
695
696         stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
697
698         memcpy(&priv->tstamp_config, &config, sizeof(config));
699
700         return copy_to_user(ifr->ifr_data, &config,
701                             sizeof(config)) ? -EFAULT : 0;
702 }
703
704 /**
705  *  stmmac_hwtstamp_get - read hardware timestamping.
706  *  @dev: device pointer.
707  *  @ifr: An IOCTL specific structure, that can contain a pointer to
708  *  a proprietary structure used to pass information to the driver.
709  *  Description:
710  *  This function obtain the current hardware timestamping settings
711  *  as requested.
712  */
713 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
714 {
715         struct stmmac_priv *priv = netdev_priv(dev);
716         struct hwtstamp_config *config = &priv->tstamp_config;
717
718         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
719                 return -EOPNOTSUPP;
720
721         return copy_to_user(ifr->ifr_data, config,
722                             sizeof(*config)) ? -EFAULT : 0;
723 }
724
725 /**
726  * stmmac_init_tstamp_counter - init hardware timestamping counter
727  * @priv: driver private structure
728  * @systime_flags: timestamping flags
729  * Description:
730  * Initialize hardware counter for packet timestamping.
731  * This is valid as long as the interface is open and not suspended.
732  * Will be rerun after resuming from suspend, case in which the timestamping
733  * flags updated by stmmac_hwtstamp_set() also need to be restored.
734  */
735 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
736 {
737         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
738         struct timespec64 now;
739         u32 sec_inc = 0;
740         u64 temp = 0;
741
742         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
743                 return -EOPNOTSUPP;
744
745         stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
746         priv->systime_flags = systime_flags;
747
748         /* program Sub Second Increment reg */
749         stmmac_config_sub_second_increment(priv, priv->ptpaddr,
750                                            priv->plat->clk_ptp_rate,
751                                            xmac, &sec_inc);
752         temp = div_u64(1000000000ULL, sec_inc);
753
754         /* Store sub second increment for later use */
755         priv->sub_second_inc = sec_inc;
756
757         /* calculate default added value:
758          * formula is :
759          * addend = (2^32)/freq_div_ratio;
760          * where, freq_div_ratio = 1e9ns/sec_inc
761          */
762         temp = (u64)(temp << 32);
763         priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
764         stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
765
766         /* initialize system time */
767         ktime_get_real_ts64(&now);
768
769         /* lower 32 bits of tv_sec are safe until y2106 */
770         stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
771
772         return 0;
773 }
774 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
775
776 /**
777  * stmmac_init_ptp - init PTP
778  * @priv: driver private structure
779  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
780  * This is done by looking at the HW cap. register.
781  * This function also registers the ptp driver.
782  */
783 static int stmmac_init_ptp(struct stmmac_priv *priv)
784 {
785         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
786         int ret;
787
788         ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
789         if (ret)
790                 return ret;
791
792         priv->adv_ts = 0;
793         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
794         if (xmac && priv->dma_cap.atime_stamp)
795                 priv->adv_ts = 1;
796         /* Dwmac 3.x core with extend_desc can support adv_ts */
797         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
798                 priv->adv_ts = 1;
799
800         if (priv->dma_cap.time_stamp)
801                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
802
803         if (priv->adv_ts)
804                 netdev_info(priv->dev,
805                             "IEEE 1588-2008 Advanced Timestamp supported\n");
806
807         priv->hwts_tx_en = 0;
808         priv->hwts_rx_en = 0;
809
810         return 0;
811 }
812
813 static void stmmac_release_ptp(struct stmmac_priv *priv)
814 {
815         clk_disable_unprepare(priv->plat->clk_ptp_ref);
816         stmmac_ptp_unregister(priv);
817 }
818
819 /**
820  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
821  *  @priv: driver private structure
822  *  @duplex: duplex passed to the next function
823  *  Description: It is used for configuring the flow control in all queues
824  */
825 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
826 {
827         u32 tx_cnt = priv->plat->tx_queues_to_use;
828
829         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
830                         priv->pause, tx_cnt);
831 }
832
833 static void stmmac_validate(struct phylink_config *config,
834                             unsigned long *supported,
835                             struct phylink_link_state *state)
836 {
837         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
838         __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
839         __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
840         int tx_cnt = priv->plat->tx_queues_to_use;
841         int max_speed = priv->plat->max_speed;
842
843         phylink_set(mac_supported, 10baseT_Half);
844         phylink_set(mac_supported, 10baseT_Full);
845         phylink_set(mac_supported, 100baseT_Half);
846         phylink_set(mac_supported, 100baseT_Full);
847         phylink_set(mac_supported, 1000baseT_Half);
848         phylink_set(mac_supported, 1000baseT_Full);
849         phylink_set(mac_supported, 1000baseKX_Full);
850
851         phylink_set(mac_supported, Autoneg);
852         phylink_set(mac_supported, Pause);
853         phylink_set(mac_supported, Asym_Pause);
854         phylink_set_port_modes(mac_supported);
855
856         /* Cut down 1G if asked to */
857         if ((max_speed > 0) && (max_speed < 1000)) {
858                 phylink_set(mask, 1000baseT_Full);
859                 phylink_set(mask, 1000baseX_Full);
860         } else if (priv->plat->has_xgmac) {
861                 if (!max_speed || (max_speed >= 2500)) {
862                         phylink_set(mac_supported, 2500baseT_Full);
863                         phylink_set(mac_supported, 2500baseX_Full);
864                 }
865                 if (!max_speed || (max_speed >= 5000)) {
866                         phylink_set(mac_supported, 5000baseT_Full);
867                 }
868                 if (!max_speed || (max_speed >= 10000)) {
869                         phylink_set(mac_supported, 10000baseSR_Full);
870                         phylink_set(mac_supported, 10000baseLR_Full);
871                         phylink_set(mac_supported, 10000baseER_Full);
872                         phylink_set(mac_supported, 10000baseLRM_Full);
873                         phylink_set(mac_supported, 10000baseT_Full);
874                         phylink_set(mac_supported, 10000baseKX4_Full);
875                         phylink_set(mac_supported, 10000baseKR_Full);
876                 }
877                 if (!max_speed || (max_speed >= 25000)) {
878                         phylink_set(mac_supported, 25000baseCR_Full);
879                         phylink_set(mac_supported, 25000baseKR_Full);
880                         phylink_set(mac_supported, 25000baseSR_Full);
881                 }
882                 if (!max_speed || (max_speed >= 40000)) {
883                         phylink_set(mac_supported, 40000baseKR4_Full);
884                         phylink_set(mac_supported, 40000baseCR4_Full);
885                         phylink_set(mac_supported, 40000baseSR4_Full);
886                         phylink_set(mac_supported, 40000baseLR4_Full);
887                 }
888                 if (!max_speed || (max_speed >= 50000)) {
889                         phylink_set(mac_supported, 50000baseCR2_Full);
890                         phylink_set(mac_supported, 50000baseKR2_Full);
891                         phylink_set(mac_supported, 50000baseSR2_Full);
892                         phylink_set(mac_supported, 50000baseKR_Full);
893                         phylink_set(mac_supported, 50000baseSR_Full);
894                         phylink_set(mac_supported, 50000baseCR_Full);
895                         phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
896                         phylink_set(mac_supported, 50000baseDR_Full);
897                 }
898                 if (!max_speed || (max_speed >= 100000)) {
899                         phylink_set(mac_supported, 100000baseKR4_Full);
900                         phylink_set(mac_supported, 100000baseSR4_Full);
901                         phylink_set(mac_supported, 100000baseCR4_Full);
902                         phylink_set(mac_supported, 100000baseLR4_ER4_Full);
903                         phylink_set(mac_supported, 100000baseKR2_Full);
904                         phylink_set(mac_supported, 100000baseSR2_Full);
905                         phylink_set(mac_supported, 100000baseCR2_Full);
906                         phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
907                         phylink_set(mac_supported, 100000baseDR2_Full);
908                 }
909         }
910
911         /* Half-Duplex can only work with single queue */
912         if (tx_cnt > 1) {
913                 phylink_set(mask, 10baseT_Half);
914                 phylink_set(mask, 100baseT_Half);
915                 phylink_set(mask, 1000baseT_Half);
916         }
917
918         linkmode_and(supported, supported, mac_supported);
919         linkmode_andnot(supported, supported, mask);
920
921         linkmode_and(state->advertising, state->advertising, mac_supported);
922         linkmode_andnot(state->advertising, state->advertising, mask);
923
924         /* If PCS is supported, check which modes it supports. */
925         stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
926 }
927
928 static void stmmac_mac_pcs_get_state(struct phylink_config *config,
929                                      struct phylink_link_state *state)
930 {
931         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
932
933         state->link = 0;
934         stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
935 }
936
937 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
938                               const struct phylink_link_state *state)
939 {
940         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
941
942         stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
943 }
944
945 static void stmmac_mac_an_restart(struct phylink_config *config)
946 {
947         /* Not Supported */
948 }
949
950 static void stmmac_mac_link_down(struct phylink_config *config,
951                                  unsigned int mode, phy_interface_t interface)
952 {
953         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
954
955         stmmac_mac_set(priv, priv->ioaddr, false);
956         priv->eee_active = false;
957         priv->tx_lpi_enabled = false;
958         stmmac_eee_init(priv);
959         stmmac_set_eee_pls(priv, priv->hw, false);
960 }
961
962 static void stmmac_mac_link_up(struct phylink_config *config,
963                                struct phy_device *phy,
964                                unsigned int mode, phy_interface_t interface,
965                                int speed, int duplex,
966                                bool tx_pause, bool rx_pause)
967 {
968         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
969         u32 ctrl;
970
971         stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
972
973         ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
974         ctrl &= ~priv->hw->link.speed_mask;
975
976         if (interface == PHY_INTERFACE_MODE_USXGMII) {
977                 switch (speed) {
978                 case SPEED_10000:
979                         ctrl |= priv->hw->link.xgmii.speed10000;
980                         break;
981                 case SPEED_5000:
982                         ctrl |= priv->hw->link.xgmii.speed5000;
983                         break;
984                 case SPEED_2500:
985                         ctrl |= priv->hw->link.xgmii.speed2500;
986                         break;
987                 default:
988                         return;
989                 }
990         } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
991                 switch (speed) {
992                 case SPEED_100000:
993                         ctrl |= priv->hw->link.xlgmii.speed100000;
994                         break;
995                 case SPEED_50000:
996                         ctrl |= priv->hw->link.xlgmii.speed50000;
997                         break;
998                 case SPEED_40000:
999                         ctrl |= priv->hw->link.xlgmii.speed40000;
1000                         break;
1001                 case SPEED_25000:
1002                         ctrl |= priv->hw->link.xlgmii.speed25000;
1003                         break;
1004                 case SPEED_10000:
1005                         ctrl |= priv->hw->link.xgmii.speed10000;
1006                         break;
1007                 case SPEED_2500:
1008                         ctrl |= priv->hw->link.speed2500;
1009                         break;
1010                 case SPEED_1000:
1011                         ctrl |= priv->hw->link.speed1000;
1012                         break;
1013                 default:
1014                         return;
1015                 }
1016         } else {
1017                 switch (speed) {
1018                 case SPEED_2500:
1019                         ctrl |= priv->hw->link.speed2500;
1020                         break;
1021                 case SPEED_1000:
1022                         ctrl |= priv->hw->link.speed1000;
1023                         break;
1024                 case SPEED_100:
1025                         ctrl |= priv->hw->link.speed100;
1026                         break;
1027                 case SPEED_10:
1028                         ctrl |= priv->hw->link.speed10;
1029                         break;
1030                 default:
1031                         return;
1032                 }
1033         }
1034
1035         priv->speed = speed;
1036
1037         if (priv->plat->fix_mac_speed)
1038                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1039
1040         if (!duplex)
1041                 ctrl &= ~priv->hw->link.duplex;
1042         else
1043                 ctrl |= priv->hw->link.duplex;
1044
1045         /* Flow Control operation */
1046         if (rx_pause && tx_pause)
1047                 priv->flow_ctrl = FLOW_AUTO;
1048         else if (rx_pause && !tx_pause)
1049                 priv->flow_ctrl = FLOW_RX;
1050         else if (!rx_pause && tx_pause)
1051                 priv->flow_ctrl = FLOW_TX;
1052         else
1053                 priv->flow_ctrl = FLOW_OFF;
1054
1055         stmmac_mac_flow_ctrl(priv, duplex);
1056
1057         writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1058
1059         stmmac_mac_set(priv, priv->ioaddr, true);
1060         if (phy && priv->dma_cap.eee) {
1061                 priv->eee_active =
1062                         phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
1063                 priv->eee_enabled = stmmac_eee_init(priv);
1064                 priv->tx_lpi_enabled = priv->eee_enabled;
1065                 stmmac_set_eee_pls(priv, priv->hw, true);
1066         }
1067 }
1068
1069 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1070         .validate = stmmac_validate,
1071         .mac_pcs_get_state = stmmac_mac_pcs_get_state,
1072         .mac_config = stmmac_mac_config,
1073         .mac_an_restart = stmmac_mac_an_restart,
1074         .mac_link_down = stmmac_mac_link_down,
1075         .mac_link_up = stmmac_mac_link_up,
1076 };
1077
1078 /**
1079  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1080  * @priv: driver private structure
1081  * Description: this is to verify if the HW supports the PCS.
1082  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1083  * configured for the TBI, RTBI, or SGMII PHY interface.
1084  */
1085 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1086 {
1087         int interface = priv->plat->interface;
1088
1089         if (priv->dma_cap.pcs) {
1090                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1091                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1092                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1093                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1094                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1095                         priv->hw->pcs = STMMAC_PCS_RGMII;
1096                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1097                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1098                         priv->hw->pcs = STMMAC_PCS_SGMII;
1099                 }
1100         }
1101 }
1102
1103 /**
1104  * stmmac_init_phy - PHY initialization
1105  * @dev: net device structure
1106  * Description: it initializes the driver's PHY state, and attaches the PHY
1107  * to the mac driver.
1108  *  Return value:
1109  *  0 on success
1110  */
1111 static int stmmac_init_phy(struct net_device *dev)
1112 {
1113         struct stmmac_priv *priv = netdev_priv(dev);
1114         struct device_node *node;
1115         int ret;
1116
1117         node = priv->plat->phylink_node;
1118
1119         if (node)
1120                 ret = phylink_of_phy_connect(priv->phylink, node, 0);
1121
1122         /* Some DT bindings do not set-up the PHY handle. Let's try to
1123          * manually parse it
1124          */
1125         if (!node || ret) {
1126                 int addr = priv->plat->phy_addr;
1127                 struct phy_device *phydev;
1128
1129                 if (addr < 0) {
1130                         netdev_err(priv->dev, "no phy found\n");
1131                         return -ENODEV;
1132                 }
1133
1134                 phydev = mdiobus_get_phy(priv->mii, addr);
1135                 if (!phydev) {
1136                         netdev_err(priv->dev, "no phy at addr %d\n", addr);
1137                         return -ENODEV;
1138                 }
1139
1140                 ret = phylink_connect_phy(priv->phylink, phydev);
1141         }
1142
1143         if (!priv->plat->pmt) {
1144                 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1145
1146                 phylink_ethtool_get_wol(priv->phylink, &wol);
1147                 device_set_wakeup_capable(priv->device, !!wol.supported);
1148                 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1149         }
1150
1151         return ret;
1152 }
1153
1154 static int stmmac_phy_setup(struct stmmac_priv *priv)
1155 {
1156         struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1157         int mode = priv->plat->phy_interface;
1158         struct phylink *phylink;
1159
1160         priv->phylink_config.dev = &priv->dev->dev;
1161         priv->phylink_config.type = PHYLINK_NETDEV;
1162         priv->phylink_config.pcs_poll = true;
1163
1164         if (!fwnode)
1165                 fwnode = dev_fwnode(priv->device);
1166
1167         phylink = phylink_create(&priv->phylink_config, fwnode,
1168                                  mode, &stmmac_phylink_mac_ops);
1169         if (IS_ERR(phylink))
1170                 return PTR_ERR(phylink);
1171
1172         priv->phylink = phylink;
1173         return 0;
1174 }
1175
1176 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1177 {
1178         u32 rx_cnt = priv->plat->rx_queues_to_use;
1179         unsigned int desc_size;
1180         void *head_rx;
1181         u32 queue;
1182
1183         /* Display RX rings */
1184         for (queue = 0; queue < rx_cnt; queue++) {
1185                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1186
1187                 pr_info("\tRX Queue %u rings\n", queue);
1188
1189                 if (priv->extend_desc) {
1190                         head_rx = (void *)rx_q->dma_erx;
1191                         desc_size = sizeof(struct dma_extended_desc);
1192                 } else {
1193                         head_rx = (void *)rx_q->dma_rx;
1194                         desc_size = sizeof(struct dma_desc);
1195                 }
1196
1197                 /* Display RX ring */
1198                 stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1199                                     rx_q->dma_rx_phy, desc_size);
1200         }
1201 }
1202
1203 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1204 {
1205         u32 tx_cnt = priv->plat->tx_queues_to_use;
1206         unsigned int desc_size;
1207         void *head_tx;
1208         u32 queue;
1209
1210         /* Display TX rings */
1211         for (queue = 0; queue < tx_cnt; queue++) {
1212                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1213
1214                 pr_info("\tTX Queue %d rings\n", queue);
1215
1216                 if (priv->extend_desc) {
1217                         head_tx = (void *)tx_q->dma_etx;
1218                         desc_size = sizeof(struct dma_extended_desc);
1219                 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1220                         head_tx = (void *)tx_q->dma_entx;
1221                         desc_size = sizeof(struct dma_edesc);
1222                 } else {
1223                         head_tx = (void *)tx_q->dma_tx;
1224                         desc_size = sizeof(struct dma_desc);
1225                 }
1226
1227                 stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1228                                     tx_q->dma_tx_phy, desc_size);
1229         }
1230 }
1231
1232 static void stmmac_display_rings(struct stmmac_priv *priv)
1233 {
1234         /* Display RX ring */
1235         stmmac_display_rx_rings(priv);
1236
1237         /* Display TX ring */
1238         stmmac_display_tx_rings(priv);
1239 }
1240
1241 static int stmmac_set_bfsize(int mtu, int bufsize)
1242 {
1243         int ret = bufsize;
1244
1245         if (mtu >= BUF_SIZE_8KiB)
1246                 ret = BUF_SIZE_16KiB;
1247         else if (mtu >= BUF_SIZE_4KiB)
1248                 ret = BUF_SIZE_8KiB;
1249         else if (mtu >= BUF_SIZE_2KiB)
1250                 ret = BUF_SIZE_4KiB;
1251         else if (mtu > DEFAULT_BUFSIZE)
1252                 ret = BUF_SIZE_2KiB;
1253         else
1254                 ret = DEFAULT_BUFSIZE;
1255
1256         return ret;
1257 }
1258
1259 /**
1260  * stmmac_clear_rx_descriptors - clear RX descriptors
1261  * @priv: driver private structure
1262  * @queue: RX queue index
1263  * Description: this function is called to clear the RX descriptors
1264  * in case of both basic and extended descriptors are used.
1265  */
1266 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1267 {
1268         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1269         int i;
1270
1271         /* Clear the RX descriptors */
1272         for (i = 0; i < priv->dma_rx_size; i++)
1273                 if (priv->extend_desc)
1274                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1275                                         priv->use_riwt, priv->mode,
1276                                         (i == priv->dma_rx_size - 1),
1277                                         priv->dma_buf_sz);
1278                 else
1279                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1280                                         priv->use_riwt, priv->mode,
1281                                         (i == priv->dma_rx_size - 1),
1282                                         priv->dma_buf_sz);
1283 }
1284
1285 /**
1286  * stmmac_clear_tx_descriptors - clear tx descriptors
1287  * @priv: driver private structure
1288  * @queue: TX queue index.
1289  * Description: this function is called to clear the TX descriptors
1290  * in case of both basic and extended descriptors are used.
1291  */
1292 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1293 {
1294         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1295         int i;
1296
1297         /* Clear the TX descriptors */
1298         for (i = 0; i < priv->dma_tx_size; i++) {
1299                 int last = (i == (priv->dma_tx_size - 1));
1300                 struct dma_desc *p;
1301
1302                 if (priv->extend_desc)
1303                         p = &tx_q->dma_etx[i].basic;
1304                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1305                         p = &tx_q->dma_entx[i].basic;
1306                 else
1307                         p = &tx_q->dma_tx[i];
1308
1309                 stmmac_init_tx_desc(priv, p, priv->mode, last);
1310         }
1311 }
1312
1313 /**
1314  * stmmac_clear_descriptors - clear descriptors
1315  * @priv: driver private structure
1316  * Description: this function is called to clear the TX and RX descriptors
1317  * in case of both basic and extended descriptors are used.
1318  */
1319 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1320 {
1321         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1322         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1323         u32 queue;
1324
1325         /* Clear the RX descriptors */
1326         for (queue = 0; queue < rx_queue_cnt; queue++)
1327                 stmmac_clear_rx_descriptors(priv, queue);
1328
1329         /* Clear the TX descriptors */
1330         for (queue = 0; queue < tx_queue_cnt; queue++)
1331                 stmmac_clear_tx_descriptors(priv, queue);
1332 }
1333
1334 /**
1335  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1336  * @priv: driver private structure
1337  * @p: descriptor pointer
1338  * @i: descriptor index
1339  * @flags: gfp flag
1340  * @queue: RX queue index
1341  * Description: this function is called to allocate a receive buffer, perform
1342  * the DMA mapping and init the descriptor.
1343  */
1344 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1345                                   int i, gfp_t flags, u32 queue)
1346 {
1347         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1348         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1349
1350         buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1351         if (!buf->page)
1352                 return -ENOMEM;
1353
1354         if (priv->sph) {
1355                 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1356                 if (!buf->sec_page)
1357                         return -ENOMEM;
1358
1359                 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1360                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1361         } else {
1362                 buf->sec_page = NULL;
1363                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1364         }
1365
1366         buf->addr = page_pool_get_dma_addr(buf->page);
1367         stmmac_set_desc_addr(priv, p, buf->addr);
1368         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1369                 stmmac_init_desc3(priv, p);
1370
1371         return 0;
1372 }
1373
1374 /**
1375  * stmmac_free_rx_buffer - free RX dma buffers
1376  * @priv: private structure
1377  * @queue: RX queue index
1378  * @i: buffer index.
1379  */
1380 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1381 {
1382         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1383         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1384
1385         if (buf->page)
1386                 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1387         buf->page = NULL;
1388
1389         if (buf->sec_page)
1390                 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1391         buf->sec_page = NULL;
1392 }
1393
1394 /**
1395  * stmmac_free_tx_buffer - free RX dma buffers
1396  * @priv: private structure
1397  * @queue: RX queue index
1398  * @i: buffer index.
1399  */
1400 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1401 {
1402         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1403
1404         if (tx_q->tx_skbuff_dma[i].buf) {
1405                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1406                         dma_unmap_page(priv->device,
1407                                        tx_q->tx_skbuff_dma[i].buf,
1408                                        tx_q->tx_skbuff_dma[i].len,
1409                                        DMA_TO_DEVICE);
1410                 else
1411                         dma_unmap_single(priv->device,
1412                                          tx_q->tx_skbuff_dma[i].buf,
1413                                          tx_q->tx_skbuff_dma[i].len,
1414                                          DMA_TO_DEVICE);
1415         }
1416
1417         if (tx_q->tx_skbuff[i]) {
1418                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1419                 tx_q->tx_skbuff[i] = NULL;
1420                 tx_q->tx_skbuff_dma[i].buf = 0;
1421                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1422         }
1423 }
1424
1425 /**
1426  * init_dma_rx_desc_rings - init the RX descriptor rings
1427  * @dev: net device structure
1428  * @flags: gfp flag.
1429  * Description: this function initializes the DMA RX descriptors
1430  * and allocates the socket buffers. It supports the chained and ring
1431  * modes.
1432  */
1433 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1434 {
1435         struct stmmac_priv *priv = netdev_priv(dev);
1436         u32 rx_count = priv->plat->rx_queues_to_use;
1437         int ret = -ENOMEM;
1438         int queue;
1439         int i;
1440
1441         /* RX INITIALIZATION */
1442         netif_dbg(priv, probe, priv->dev,
1443                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1444
1445         for (queue = 0; queue < rx_count; queue++) {
1446                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1447
1448                 netif_dbg(priv, probe, priv->dev,
1449                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1450                           (u32)rx_q->dma_rx_phy);
1451
1452                 stmmac_clear_rx_descriptors(priv, queue);
1453
1454                 for (i = 0; i < priv->dma_rx_size; i++) {
1455                         struct dma_desc *p;
1456
1457                         if (priv->extend_desc)
1458                                 p = &((rx_q->dma_erx + i)->basic);
1459                         else
1460                                 p = rx_q->dma_rx + i;
1461
1462                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1463                                                      queue);
1464                         if (ret)
1465                                 goto err_init_rx_buffers;
1466                 }
1467
1468                 rx_q->cur_rx = 0;
1469                 rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
1470
1471                 /* Setup the chained descriptor addresses */
1472                 if (priv->mode == STMMAC_CHAIN_MODE) {
1473                         if (priv->extend_desc)
1474                                 stmmac_mode_init(priv, rx_q->dma_erx,
1475                                                  rx_q->dma_rx_phy,
1476                                                  priv->dma_rx_size, 1);
1477                         else
1478                                 stmmac_mode_init(priv, rx_q->dma_rx,
1479                                                  rx_q->dma_rx_phy,
1480                                                  priv->dma_rx_size, 0);
1481                 }
1482         }
1483
1484         return 0;
1485
1486 err_init_rx_buffers:
1487         while (queue >= 0) {
1488                 while (--i >= 0)
1489                         stmmac_free_rx_buffer(priv, queue, i);
1490
1491                 if (queue == 0)
1492                         break;
1493
1494                 i = priv->dma_rx_size;
1495                 queue--;
1496         }
1497
1498         return ret;
1499 }
1500
1501 /**
1502  * init_dma_tx_desc_rings - init the TX descriptor rings
1503  * @dev: net device structure.
1504  * Description: this function initializes the DMA TX descriptors
1505  * and allocates the socket buffers. It supports the chained and ring
1506  * modes.
1507  */
1508 static int init_dma_tx_desc_rings(struct net_device *dev)
1509 {
1510         struct stmmac_priv *priv = netdev_priv(dev);
1511         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1512         u32 queue;
1513         int i;
1514
1515         for (queue = 0; queue < tx_queue_cnt; queue++) {
1516                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1517
1518                 netif_dbg(priv, probe, priv->dev,
1519                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1520                          (u32)tx_q->dma_tx_phy);
1521
1522                 /* Setup the chained descriptor addresses */
1523                 if (priv->mode == STMMAC_CHAIN_MODE) {
1524                         if (priv->extend_desc)
1525                                 stmmac_mode_init(priv, tx_q->dma_etx,
1526                                                  tx_q->dma_tx_phy,
1527                                                  priv->dma_tx_size, 1);
1528                         else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1529                                 stmmac_mode_init(priv, tx_q->dma_tx,
1530                                                  tx_q->dma_tx_phy,
1531                                                  priv->dma_tx_size, 0);
1532                 }
1533
1534                 for (i = 0; i < priv->dma_tx_size; i++) {
1535                         struct dma_desc *p;
1536                         if (priv->extend_desc)
1537                                 p = &((tx_q->dma_etx + i)->basic);
1538                         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1539                                 p = &((tx_q->dma_entx + i)->basic);
1540                         else
1541                                 p = tx_q->dma_tx + i;
1542
1543                         stmmac_clear_desc(priv, p);
1544
1545                         tx_q->tx_skbuff_dma[i].buf = 0;
1546                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1547                         tx_q->tx_skbuff_dma[i].len = 0;
1548                         tx_q->tx_skbuff_dma[i].last_segment = false;
1549                         tx_q->tx_skbuff[i] = NULL;
1550                 }
1551
1552                 tx_q->dirty_tx = 0;
1553                 tx_q->cur_tx = 0;
1554                 tx_q->mss = 0;
1555
1556                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1557         }
1558
1559         return 0;
1560 }
1561
1562 /**
1563  * init_dma_desc_rings - init the RX/TX descriptor rings
1564  * @dev: net device structure
1565  * @flags: gfp flag.
1566  * Description: this function initializes the DMA RX/TX descriptors
1567  * and allocates the socket buffers. It supports the chained and ring
1568  * modes.
1569  */
1570 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1571 {
1572         struct stmmac_priv *priv = netdev_priv(dev);
1573         int ret;
1574
1575         ret = init_dma_rx_desc_rings(dev, flags);
1576         if (ret)
1577                 return ret;
1578
1579         ret = init_dma_tx_desc_rings(dev);
1580
1581         stmmac_clear_descriptors(priv);
1582
1583         if (netif_msg_hw(priv))
1584                 stmmac_display_rings(priv);
1585
1586         return ret;
1587 }
1588
1589 /**
1590  * dma_free_rx_skbufs - free RX dma buffers
1591  * @priv: private structure
1592  * @queue: RX queue index
1593  */
1594 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1595 {
1596         int i;
1597
1598         for (i = 0; i < priv->dma_rx_size; i++)
1599                 stmmac_free_rx_buffer(priv, queue, i);
1600 }
1601
1602 /**
1603  * dma_free_tx_skbufs - free TX dma buffers
1604  * @priv: private structure
1605  * @queue: TX queue index
1606  */
1607 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1608 {
1609         int i;
1610
1611         for (i = 0; i < priv->dma_tx_size; i++)
1612                 stmmac_free_tx_buffer(priv, queue, i);
1613 }
1614
1615 /**
1616  * stmmac_free_tx_skbufs - free TX skb buffers
1617  * @priv: private structure
1618  */
1619 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1620 {
1621         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1622         u32 queue;
1623
1624         for (queue = 0; queue < tx_queue_cnt; queue++)
1625                 dma_free_tx_skbufs(priv, queue);
1626 }
1627
1628 /**
1629  * free_dma_rx_desc_resources - free RX dma desc resources
1630  * @priv: private structure
1631  */
1632 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1633 {
1634         u32 rx_count = priv->plat->rx_queues_to_use;
1635         u32 queue;
1636
1637         /* Free RX queue resources */
1638         for (queue = 0; queue < rx_count; queue++) {
1639                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1640
1641                 /* Release the DMA RX socket buffers */
1642                 dma_free_rx_skbufs(priv, queue);
1643
1644                 /* Free DMA regions of consistent memory previously allocated */
1645                 if (!priv->extend_desc)
1646                         dma_free_coherent(priv->device, priv->dma_rx_size *
1647                                           sizeof(struct dma_desc),
1648                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1649                 else
1650                         dma_free_coherent(priv->device, priv->dma_rx_size *
1651                                           sizeof(struct dma_extended_desc),
1652                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1653
1654                 kfree(rx_q->buf_pool);
1655                 if (rx_q->page_pool)
1656                         page_pool_destroy(rx_q->page_pool);
1657         }
1658 }
1659
1660 /**
1661  * free_dma_tx_desc_resources - free TX dma desc resources
1662  * @priv: private structure
1663  */
1664 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1665 {
1666         u32 tx_count = priv->plat->tx_queues_to_use;
1667         u32 queue;
1668
1669         /* Free TX queue resources */
1670         for (queue = 0; queue < tx_count; queue++) {
1671                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1672                 size_t size;
1673                 void *addr;
1674
1675                 /* Release the DMA TX socket buffers */
1676                 dma_free_tx_skbufs(priv, queue);
1677
1678                 if (priv->extend_desc) {
1679                         size = sizeof(struct dma_extended_desc);
1680                         addr = tx_q->dma_etx;
1681                 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1682                         size = sizeof(struct dma_edesc);
1683                         addr = tx_q->dma_entx;
1684                 } else {
1685                         size = sizeof(struct dma_desc);
1686                         addr = tx_q->dma_tx;
1687                 }
1688
1689                 size *= priv->dma_tx_size;
1690
1691                 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1692
1693                 kfree(tx_q->tx_skbuff_dma);
1694                 kfree(tx_q->tx_skbuff);
1695         }
1696 }
1697
1698 /**
1699  * alloc_dma_rx_desc_resources - alloc RX resources.
1700  * @priv: private structure
1701  * Description: according to which descriptor can be used (extend or basic)
1702  * this function allocates the resources for TX and RX paths. In case of
1703  * reception, for example, it pre-allocated the RX socket buffer in order to
1704  * allow zero-copy mechanism.
1705  */
1706 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1707 {
1708         u32 rx_count = priv->plat->rx_queues_to_use;
1709         int ret = -ENOMEM;
1710         u32 queue;
1711
1712         /* RX queues buffers and DMA */
1713         for (queue = 0; queue < rx_count; queue++) {
1714                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1715                 struct page_pool_params pp_params = { 0 };
1716                 unsigned int num_pages;
1717
1718                 rx_q->queue_index = queue;
1719                 rx_q->priv_data = priv;
1720
1721                 pp_params.flags = PP_FLAG_DMA_MAP;
1722                 pp_params.pool_size = priv->dma_rx_size;
1723                 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1724                 pp_params.order = ilog2(num_pages);
1725                 pp_params.nid = dev_to_node(priv->device);
1726                 pp_params.dev = priv->device;
1727                 pp_params.dma_dir = DMA_FROM_DEVICE;
1728
1729                 rx_q->page_pool = page_pool_create(&pp_params);
1730                 if (IS_ERR(rx_q->page_pool)) {
1731                         ret = PTR_ERR(rx_q->page_pool);
1732                         rx_q->page_pool = NULL;
1733                         goto err_dma;
1734                 }
1735
1736                 rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1737                                          sizeof(*rx_q->buf_pool),
1738                                          GFP_KERNEL);
1739                 if (!rx_q->buf_pool)
1740                         goto err_dma;
1741
1742                 if (priv->extend_desc) {
1743                         rx_q->dma_erx = dma_alloc_coherent(priv->device,
1744                                                            priv->dma_rx_size *
1745                                                            sizeof(struct dma_extended_desc),
1746                                                            &rx_q->dma_rx_phy,
1747                                                            GFP_KERNEL);
1748                         if (!rx_q->dma_erx)
1749                                 goto err_dma;
1750
1751                 } else {
1752                         rx_q->dma_rx = dma_alloc_coherent(priv->device,
1753                                                           priv->dma_rx_size *
1754                                                           sizeof(struct dma_desc),
1755                                                           &rx_q->dma_rx_phy,
1756                                                           GFP_KERNEL);
1757                         if (!rx_q->dma_rx)
1758                                 goto err_dma;
1759                 }
1760         }
1761
1762         return 0;
1763
1764 err_dma:
1765         free_dma_rx_desc_resources(priv);
1766
1767         return ret;
1768 }
1769
1770 /**
1771  * alloc_dma_tx_desc_resources - alloc TX resources.
1772  * @priv: private structure
1773  * Description: according to which descriptor can be used (extend or basic)
1774  * this function allocates the resources for TX and RX paths. In case of
1775  * reception, for example, it pre-allocated the RX socket buffer in order to
1776  * allow zero-copy mechanism.
1777  */
1778 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1779 {
1780         u32 tx_count = priv->plat->tx_queues_to_use;
1781         int ret = -ENOMEM;
1782         u32 queue;
1783
1784         /* TX queues buffers and DMA */
1785         for (queue = 0; queue < tx_count; queue++) {
1786                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1787                 size_t size;
1788                 void *addr;
1789
1790                 tx_q->queue_index = queue;
1791                 tx_q->priv_data = priv;
1792
1793                 tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
1794                                               sizeof(*tx_q->tx_skbuff_dma),
1795                                               GFP_KERNEL);
1796                 if (!tx_q->tx_skbuff_dma)
1797                         goto err_dma;
1798
1799                 tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
1800                                           sizeof(struct sk_buff *),
1801                                           GFP_KERNEL);
1802                 if (!tx_q->tx_skbuff)
1803                         goto err_dma;
1804
1805                 if (priv->extend_desc)
1806                         size = sizeof(struct dma_extended_desc);
1807                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1808                         size = sizeof(struct dma_edesc);
1809                 else
1810                         size = sizeof(struct dma_desc);
1811
1812                 size *= priv->dma_tx_size;
1813
1814                 addr = dma_alloc_coherent(priv->device, size,
1815                                           &tx_q->dma_tx_phy, GFP_KERNEL);
1816                 if (!addr)
1817                         goto err_dma;
1818
1819                 if (priv->extend_desc)
1820                         tx_q->dma_etx = addr;
1821                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1822                         tx_q->dma_entx = addr;
1823                 else
1824                         tx_q->dma_tx = addr;
1825         }
1826
1827         return 0;
1828
1829 err_dma:
1830         free_dma_tx_desc_resources(priv);
1831         return ret;
1832 }
1833
1834 /**
1835  * alloc_dma_desc_resources - alloc TX/RX resources.
1836  * @priv: private structure
1837  * Description: according to which descriptor can be used (extend or basic)
1838  * this function allocates the resources for TX and RX paths. In case of
1839  * reception, for example, it pre-allocated the RX socket buffer in order to
1840  * allow zero-copy mechanism.
1841  */
1842 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1843 {
1844         /* RX Allocation */
1845         int ret = alloc_dma_rx_desc_resources(priv);
1846
1847         if (ret)
1848                 return ret;
1849
1850         ret = alloc_dma_tx_desc_resources(priv);
1851
1852         return ret;
1853 }
1854
1855 /**
1856  * free_dma_desc_resources - free dma desc resources
1857  * @priv: private structure
1858  */
1859 static void free_dma_desc_resources(struct stmmac_priv *priv)
1860 {
1861         /* Release the DMA RX socket buffers */
1862         free_dma_rx_desc_resources(priv);
1863
1864         /* Release the DMA TX socket buffers */
1865         free_dma_tx_desc_resources(priv);
1866 }
1867
1868 /**
1869  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1870  *  @priv: driver private structure
1871  *  Description: It is used for enabling the rx queues in the MAC
1872  */
1873 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1874 {
1875         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1876         int queue;
1877         u8 mode;
1878
1879         for (queue = 0; queue < rx_queues_count; queue++) {
1880                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1881                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1882         }
1883 }
1884
1885 /**
1886  * stmmac_start_rx_dma - start RX DMA channel
1887  * @priv: driver private structure
1888  * @chan: RX channel index
1889  * Description:
1890  * This starts a RX DMA channel
1891  */
1892 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1893 {
1894         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1895         stmmac_start_rx(priv, priv->ioaddr, chan);
1896 }
1897
1898 /**
1899  * stmmac_start_tx_dma - start TX DMA channel
1900  * @priv: driver private structure
1901  * @chan: TX channel index
1902  * Description:
1903  * This starts a TX DMA channel
1904  */
1905 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1906 {
1907         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1908         stmmac_start_tx(priv, priv->ioaddr, chan);
1909 }
1910
1911 /**
1912  * stmmac_stop_rx_dma - stop RX DMA channel
1913  * @priv: driver private structure
1914  * @chan: RX channel index
1915  * Description:
1916  * This stops a RX DMA channel
1917  */
1918 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1919 {
1920         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1921         stmmac_stop_rx(priv, priv->ioaddr, chan);
1922 }
1923
1924 /**
1925  * stmmac_stop_tx_dma - stop TX DMA channel
1926  * @priv: driver private structure
1927  * @chan: TX channel index
1928  * Description:
1929  * This stops a TX DMA channel
1930  */
1931 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1932 {
1933         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1934         stmmac_stop_tx(priv, priv->ioaddr, chan);
1935 }
1936
1937 /**
1938  * stmmac_start_all_dma - start all RX and TX DMA channels
1939  * @priv: driver private structure
1940  * Description:
1941  * This starts all the RX and TX DMA channels
1942  */
1943 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1944 {
1945         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1946         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1947         u32 chan = 0;
1948
1949         for (chan = 0; chan < rx_channels_count; chan++)
1950                 stmmac_start_rx_dma(priv, chan);
1951
1952         for (chan = 0; chan < tx_channels_count; chan++)
1953                 stmmac_start_tx_dma(priv, chan);
1954 }
1955
1956 /**
1957  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1958  * @priv: driver private structure
1959  * Description:
1960  * This stops the RX and TX DMA channels
1961  */
1962 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1963 {
1964         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1965         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1966         u32 chan = 0;
1967
1968         for (chan = 0; chan < rx_channels_count; chan++)
1969                 stmmac_stop_rx_dma(priv, chan);
1970
1971         for (chan = 0; chan < tx_channels_count; chan++)
1972                 stmmac_stop_tx_dma(priv, chan);
1973 }
1974
1975 /**
1976  *  stmmac_dma_operation_mode - HW DMA operation mode
1977  *  @priv: driver private structure
1978  *  Description: it is used for configuring the DMA operation mode register in
1979  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1980  */
1981 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1982 {
1983         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1984         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1985         int rxfifosz = priv->plat->rx_fifo_size;
1986         int txfifosz = priv->plat->tx_fifo_size;
1987         u32 txmode = 0;
1988         u32 rxmode = 0;
1989         u32 chan = 0;
1990         u8 qmode = 0;
1991
1992         if (rxfifosz == 0)
1993                 rxfifosz = priv->dma_cap.rx_fifo_size;
1994         if (txfifosz == 0)
1995                 txfifosz = priv->dma_cap.tx_fifo_size;
1996
1997         /* Adjust for real per queue fifo size */
1998         rxfifosz /= rx_channels_count;
1999         txfifosz /= tx_channels_count;
2000
2001         if (priv->plat->force_thresh_dma_mode) {
2002                 txmode = tc;
2003                 rxmode = tc;
2004         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2005                 /*
2006                  * In case of GMAC, SF mode can be enabled
2007                  * to perform the TX COE in HW. This depends on:
2008                  * 1) TX COE if actually supported
2009                  * 2) There is no bugged Jumbo frame support
2010                  *    that needs to not insert csum in the TDES.
2011                  */
2012                 txmode = SF_DMA_MODE;
2013                 rxmode = SF_DMA_MODE;
2014                 priv->xstats.threshold = SF_DMA_MODE;
2015         } else {
2016                 txmode = tc;
2017                 rxmode = SF_DMA_MODE;
2018         }
2019
2020         /* configure all channels */
2021         for (chan = 0; chan < rx_channels_count; chan++) {
2022                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2023
2024                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2025                                 rxfifosz, qmode);
2026                 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
2027                                 chan);
2028         }
2029
2030         for (chan = 0; chan < tx_channels_count; chan++) {
2031                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2032
2033                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2034                                 txfifosz, qmode);
2035         }
2036 }
2037
2038 /**
2039  * stmmac_tx_clean - to manage the transmission completion
2040  * @priv: driver private structure
2041  * @budget: napi budget limiting this functions packet handling
2042  * @queue: TX queue index
2043  * Description: it reclaims the transmit resources after transmission completes.
2044  */
2045 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2046 {
2047         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2048         unsigned int bytes_compl = 0, pkts_compl = 0;
2049         unsigned int entry, count = 0;
2050
2051         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2052
2053         priv->xstats.tx_clean++;
2054
2055         entry = tx_q->dirty_tx;
2056         while ((entry != tx_q->cur_tx) && (count < budget)) {
2057                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
2058                 struct dma_desc *p;
2059                 int status;
2060
2061                 if (priv->extend_desc)
2062                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
2063                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2064                         p = &tx_q->dma_entx[entry].basic;
2065                 else
2066                         p = tx_q->dma_tx + entry;
2067
2068                 status = stmmac_tx_status(priv, &priv->dev->stats,
2069                                 &priv->xstats, p, priv->ioaddr);
2070                 /* Check if the descriptor is owned by the DMA */
2071                 if (unlikely(status & tx_dma_own))
2072                         break;
2073
2074                 count++;
2075
2076                 /* Make sure descriptor fields are read after reading
2077                  * the own bit.
2078                  */
2079                 dma_rmb();
2080
2081                 /* Just consider the last segment and ...*/
2082                 if (likely(!(status & tx_not_ls))) {
2083                         /* ... verify the status error condition */
2084                         if (unlikely(status & tx_err)) {
2085                                 priv->dev->stats.tx_errors++;
2086                         } else {
2087                                 priv->dev->stats.tx_packets++;
2088                                 priv->xstats.tx_pkt_n++;
2089                         }
2090                         stmmac_get_tx_hwtstamp(priv, p, skb);
2091                 }
2092
2093                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
2094                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
2095                                 dma_unmap_page(priv->device,
2096                                                tx_q->tx_skbuff_dma[entry].buf,
2097                                                tx_q->tx_skbuff_dma[entry].len,
2098                                                DMA_TO_DEVICE);
2099                         else
2100                                 dma_unmap_single(priv->device,
2101                                                  tx_q->tx_skbuff_dma[entry].buf,
2102                                                  tx_q->tx_skbuff_dma[entry].len,
2103                                                  DMA_TO_DEVICE);
2104                         tx_q->tx_skbuff_dma[entry].buf = 0;
2105                         tx_q->tx_skbuff_dma[entry].len = 0;
2106                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
2107                 }
2108
2109                 stmmac_clean_desc3(priv, tx_q, p);
2110
2111                 tx_q->tx_skbuff_dma[entry].last_segment = false;
2112                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2113
2114                 if (likely(skb != NULL)) {
2115                         pkts_compl++;
2116                         bytes_compl += skb->len;
2117                         dev_consume_skb_any(skb);
2118                         tx_q->tx_skbuff[entry] = NULL;
2119                 }
2120
2121                 stmmac_release_tx_desc(priv, p, priv->mode);
2122
2123                 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2124         }
2125         tx_q->dirty_tx = entry;
2126
2127         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2128                                   pkts_compl, bytes_compl);
2129
2130         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2131                                                                 queue))) &&
2132             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2133
2134                 netif_dbg(priv, tx_done, priv->dev,
2135                           "%s: restart transmit\n", __func__);
2136                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2137         }
2138
2139         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
2140                 stmmac_enable_eee_mode(priv);
2141                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2142         }
2143
2144         /* We still have pending packets, let's call for a new scheduling */
2145         if (tx_q->dirty_tx != tx_q->cur_tx)
2146                 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2147
2148         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2149
2150         return count;
2151 }
2152
2153 /**
2154  * stmmac_tx_err - to manage the tx error
2155  * @priv: driver private structure
2156  * @chan: channel index
2157  * Description: it cleans the descriptors and restarts the transmission
2158  * in case of transmission errors.
2159  */
2160 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2161 {
2162         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2163
2164         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2165
2166         stmmac_stop_tx_dma(priv, chan);
2167         dma_free_tx_skbufs(priv, chan);
2168         stmmac_clear_tx_descriptors(priv, chan);
2169         tx_q->dirty_tx = 0;
2170         tx_q->cur_tx = 0;
2171         tx_q->mss = 0;
2172         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2173         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2174                             tx_q->dma_tx_phy, chan);
2175         stmmac_start_tx_dma(priv, chan);
2176
2177         priv->dev->stats.tx_errors++;
2178         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2179 }
2180
2181 /**
2182  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2183  *  @priv: driver private structure
2184  *  @txmode: TX operating mode
2185  *  @rxmode: RX operating mode
2186  *  @chan: channel index
2187  *  Description: it is used for configuring of the DMA operation mode in
2188  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2189  *  mode.
2190  */
2191 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2192                                           u32 rxmode, u32 chan)
2193 {
2194         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2195         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2196         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2197         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2198         int rxfifosz = priv->plat->rx_fifo_size;
2199         int txfifosz = priv->plat->tx_fifo_size;
2200
2201         if (rxfifosz == 0)
2202                 rxfifosz = priv->dma_cap.rx_fifo_size;
2203         if (txfifosz == 0)
2204                 txfifosz = priv->dma_cap.tx_fifo_size;
2205
2206         /* Adjust for real per queue fifo size */
2207         rxfifosz /= rx_channels_count;
2208         txfifosz /= tx_channels_count;
2209
2210         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2211         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2212 }
2213
2214 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2215 {
2216         int ret;
2217
2218         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2219                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2220         if (ret && (ret != -EINVAL)) {
2221                 stmmac_global_err(priv);
2222                 return true;
2223         }
2224
2225         return false;
2226 }
2227
2228 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2229 {
2230         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2231                                                  &priv->xstats, chan);
2232         struct stmmac_channel *ch = &priv->channel[chan];
2233         unsigned long flags;
2234
2235         if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2236                 if (napi_schedule_prep(&ch->rx_napi)) {
2237                         spin_lock_irqsave(&ch->lock, flags);
2238                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2239                         spin_unlock_irqrestore(&ch->lock, flags);
2240                         __napi_schedule(&ch->rx_napi);
2241                 }
2242         }
2243
2244         if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2245                 if (napi_schedule_prep(&ch->tx_napi)) {
2246                         spin_lock_irqsave(&ch->lock, flags);
2247                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2248                         spin_unlock_irqrestore(&ch->lock, flags);
2249                         __napi_schedule(&ch->tx_napi);
2250                 }
2251         }
2252
2253         return status;
2254 }
2255
2256 /**
2257  * stmmac_dma_interrupt - DMA ISR
2258  * @priv: driver private structure
2259  * Description: this is the DMA ISR. It is called by the main ISR.
2260  * It calls the dwmac dma routine and schedule poll method in case of some
2261  * work can be done.
2262  */
2263 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2264 {
2265         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2266         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2267         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2268                                 tx_channel_count : rx_channel_count;
2269         u32 chan;
2270         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2271
2272         /* Make sure we never check beyond our status buffer. */
2273         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2274                 channels_to_check = ARRAY_SIZE(status);
2275
2276         for (chan = 0; chan < channels_to_check; chan++)
2277                 status[chan] = stmmac_napi_check(priv, chan);
2278
2279         for (chan = 0; chan < tx_channel_count; chan++) {
2280                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2281                         /* Try to bump up the dma threshold on this failure */
2282                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2283                             (tc <= 256)) {
2284                                 tc += 64;
2285                                 if (priv->plat->force_thresh_dma_mode)
2286                                         stmmac_set_dma_operation_mode(priv,
2287                                                                       tc,
2288                                                                       tc,
2289                                                                       chan);
2290                                 else
2291                                         stmmac_set_dma_operation_mode(priv,
2292                                                                     tc,
2293                                                                     SF_DMA_MODE,
2294                                                                     chan);
2295                                 priv->xstats.threshold = tc;
2296                         }
2297                 } else if (unlikely(status[chan] == tx_hard_error)) {
2298                         stmmac_tx_err(priv, chan);
2299                 }
2300         }
2301 }
2302
2303 /**
2304  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2305  * @priv: driver private structure
2306  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2307  */
2308 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2309 {
2310         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2311                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2312
2313         stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2314
2315         if (priv->dma_cap.rmon) {
2316                 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2317                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2318         } else
2319                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2320 }
2321
2322 /**
2323  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2324  * @priv: driver private structure
2325  * Description:
2326  *  new GMAC chip generations have a new register to indicate the
2327  *  presence of the optional feature/functions.
2328  *  This can be also used to override the value passed through the
2329  *  platform and necessary for old MAC10/100 and GMAC chips.
2330  */
2331 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2332 {
2333         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2334 }
2335
2336 /**
2337  * stmmac_check_ether_addr - check if the MAC addr is valid
2338  * @priv: driver private structure
2339  * Description:
2340  * it is to verify if the MAC address is valid, in case of failures it
2341  * generates a random MAC address
2342  */
2343 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2344 {
2345         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2346                 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2347                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2348                         eth_hw_addr_random(priv->dev);
2349                 dev_info(priv->device, "device MAC address %pM\n",
2350                          priv->dev->dev_addr);
2351         }
2352 }
2353
2354 /**
2355  * stmmac_init_dma_engine - DMA init.
2356  * @priv: driver private structure
2357  * Description:
2358  * It inits the DMA invoking the specific MAC/GMAC callback.
2359  * Some DMA parameters can be passed from the platform;
2360  * in case of these are not passed a default is kept for the MAC or GMAC.
2361  */
2362 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2363 {
2364         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2365         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2366         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2367         struct stmmac_rx_queue *rx_q;
2368         struct stmmac_tx_queue *tx_q;
2369         u32 chan = 0;
2370         int atds = 0;
2371         int ret = 0;
2372
2373         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2374                 dev_err(priv->device, "Invalid DMA configuration\n");
2375                 return -EINVAL;
2376         }
2377
2378         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2379                 atds = 1;
2380
2381         ret = stmmac_reset(priv, priv->ioaddr);
2382         if (ret) {
2383                 dev_err(priv->device, "Failed to reset the dma\n");
2384                 return ret;
2385         }
2386
2387         /* DMA Configuration */
2388         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2389
2390         if (priv->plat->axi)
2391                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2392
2393         /* DMA CSR Channel configuration */
2394         for (chan = 0; chan < dma_csr_ch; chan++)
2395                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2396
2397         /* DMA RX Channel Configuration */
2398         for (chan = 0; chan < rx_channels_count; chan++) {
2399                 rx_q = &priv->rx_queue[chan];
2400
2401                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2402                                     rx_q->dma_rx_phy, chan);
2403
2404                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2405                                      (priv->dma_rx_size *
2406                                       sizeof(struct dma_desc));
2407                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2408                                        rx_q->rx_tail_addr, chan);
2409         }
2410
2411         /* DMA TX Channel Configuration */
2412         for (chan = 0; chan < tx_channels_count; chan++) {
2413                 tx_q = &priv->tx_queue[chan];
2414
2415                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2416                                     tx_q->dma_tx_phy, chan);
2417
2418                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2419                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2420                                        tx_q->tx_tail_addr, chan);
2421         }
2422
2423         return ret;
2424 }
2425
2426 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2427 {
2428         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2429
2430         mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2431 }
2432
2433 /**
2434  * stmmac_tx_timer - mitigation sw timer for tx.
2435  * @t: data pointer
2436  * Description:
2437  * This is the timer handler to directly invoke the stmmac_tx_clean.
2438  */
2439 static void stmmac_tx_timer(struct timer_list *t)
2440 {
2441         struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2442         struct stmmac_priv *priv = tx_q->priv_data;
2443         struct stmmac_channel *ch;
2444
2445         ch = &priv->channel[tx_q->queue_index];
2446
2447         if (likely(napi_schedule_prep(&ch->tx_napi))) {
2448                 unsigned long flags;
2449
2450                 spin_lock_irqsave(&ch->lock, flags);
2451                 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2452                 spin_unlock_irqrestore(&ch->lock, flags);
2453                 __napi_schedule(&ch->tx_napi);
2454         }
2455 }
2456
2457 /**
2458  * stmmac_init_coalesce - init mitigation options.
2459  * @priv: driver private structure
2460  * Description:
2461  * This inits the coalesce parameters: i.e. timer rate,
2462  * timer handler and default threshold used for enabling the
2463  * interrupt on completion bit.
2464  */
2465 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2466 {
2467         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2468         u32 chan;
2469
2470         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2471         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2472         priv->rx_coal_frames = STMMAC_RX_FRAMES;
2473
2474         for (chan = 0; chan < tx_channel_count; chan++) {
2475                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2476
2477                 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2478         }
2479 }
2480
2481 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2482 {
2483         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2484         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2485         u32 chan;
2486
2487         /* set TX ring length */
2488         for (chan = 0; chan < tx_channels_count; chan++)
2489                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2490                                        (priv->dma_tx_size - 1), chan);
2491
2492         /* set RX ring length */
2493         for (chan = 0; chan < rx_channels_count; chan++)
2494                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2495                                        (priv->dma_rx_size - 1), chan);
2496 }
2497
2498 /**
2499  *  stmmac_set_tx_queue_weight - Set TX queue weight
2500  *  @priv: driver private structure
2501  *  Description: It is used for setting TX queues weight
2502  */
2503 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2504 {
2505         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2506         u32 weight;
2507         u32 queue;
2508
2509         for (queue = 0; queue < tx_queues_count; queue++) {
2510                 weight = priv->plat->tx_queues_cfg[queue].weight;
2511                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2512         }
2513 }
2514
2515 /**
2516  *  stmmac_configure_cbs - Configure CBS in TX queue
2517  *  @priv: driver private structure
2518  *  Description: It is used for configuring CBS in AVB TX queues
2519  */
2520 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2521 {
2522         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2523         u32 mode_to_use;
2524         u32 queue;
2525
2526         /* queue 0 is reserved for legacy traffic */
2527         for (queue = 1; queue < tx_queues_count; queue++) {
2528                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2529                 if (mode_to_use == MTL_QUEUE_DCB)
2530                         continue;
2531
2532                 stmmac_config_cbs(priv, priv->hw,
2533                                 priv->plat->tx_queues_cfg[queue].send_slope,
2534                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2535                                 priv->plat->tx_queues_cfg[queue].high_credit,
2536                                 priv->plat->tx_queues_cfg[queue].low_credit,
2537                                 queue);
2538         }
2539 }
2540
2541 /**
2542  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2543  *  @priv: driver private structure
2544  *  Description: It is used for mapping RX queues to RX dma channels
2545  */
2546 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2547 {
2548         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2549         u32 queue;
2550         u32 chan;
2551
2552         for (queue = 0; queue < rx_queues_count; queue++) {
2553                 chan = priv->plat->rx_queues_cfg[queue].chan;
2554                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2555         }
2556 }
2557
2558 /**
2559  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2560  *  @priv: driver private structure
2561  *  Description: It is used for configuring the RX Queue Priority
2562  */
2563 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2564 {
2565         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2566         u32 queue;
2567         u32 prio;
2568
2569         for (queue = 0; queue < rx_queues_count; queue++) {
2570                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2571                         continue;
2572
2573                 prio = priv->plat->rx_queues_cfg[queue].prio;
2574                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2575         }
2576 }
2577
2578 /**
2579  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2580  *  @priv: driver private structure
2581  *  Description: It is used for configuring the TX Queue Priority
2582  */
2583 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2584 {
2585         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2586         u32 queue;
2587         u32 prio;
2588
2589         for (queue = 0; queue < tx_queues_count; queue++) {
2590                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2591                         continue;
2592
2593                 prio = priv->plat->tx_queues_cfg[queue].prio;
2594                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2595         }
2596 }
2597
2598 /**
2599  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2600  *  @priv: driver private structure
2601  *  Description: It is used for configuring the RX queue routing
2602  */
2603 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2604 {
2605         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2606         u32 queue;
2607         u8 packet;
2608
2609         for (queue = 0; queue < rx_queues_count; queue++) {
2610                 /* no specific packet type routing specified for the queue */
2611                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2612                         continue;
2613
2614                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2615                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2616         }
2617 }
2618
2619 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2620 {
2621         if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2622                 priv->rss.enable = false;
2623                 return;
2624         }
2625
2626         if (priv->dev->features & NETIF_F_RXHASH)
2627                 priv->rss.enable = true;
2628         else
2629                 priv->rss.enable = false;
2630
2631         stmmac_rss_configure(priv, priv->hw, &priv->rss,
2632                              priv->plat->rx_queues_to_use);
2633 }
2634
2635 /**
2636  *  stmmac_mtl_configuration - Configure MTL
2637  *  @priv: driver private structure
2638  *  Description: It is used for configurring MTL
2639  */
2640 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2641 {
2642         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2643         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2644
2645         if (tx_queues_count > 1)
2646                 stmmac_set_tx_queue_weight(priv);
2647
2648         /* Configure MTL RX algorithms */
2649         if (rx_queues_count > 1)
2650                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2651                                 priv->plat->rx_sched_algorithm);
2652
2653         /* Configure MTL TX algorithms */
2654         if (tx_queues_count > 1)
2655                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2656                                 priv->plat->tx_sched_algorithm);
2657
2658         /* Configure CBS in AVB TX queues */
2659         if (tx_queues_count > 1)
2660                 stmmac_configure_cbs(priv);
2661
2662         /* Map RX MTL to DMA channels */
2663         stmmac_rx_queue_dma_chan_map(priv);
2664
2665         /* Enable MAC RX Queues */
2666         stmmac_mac_enable_rx_queues(priv);
2667
2668         /* Set RX priorities */
2669         if (rx_queues_count > 1)
2670                 stmmac_mac_config_rx_queues_prio(priv);
2671
2672         /* Set TX priorities */
2673         if (tx_queues_count > 1)
2674                 stmmac_mac_config_tx_queues_prio(priv);
2675
2676         /* Set RX routing */
2677         if (rx_queues_count > 1)
2678                 stmmac_mac_config_rx_queues_routing(priv);
2679
2680         /* Receive Side Scaling */
2681         if (rx_queues_count > 1)
2682                 stmmac_mac_config_rss(priv);
2683 }
2684
2685 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2686 {
2687         if (priv->dma_cap.asp) {
2688                 netdev_info(priv->dev, "Enabling Safety Features\n");
2689                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2690         } else {
2691                 netdev_info(priv->dev, "No Safety Features support found\n");
2692         }
2693 }
2694
2695 /**
2696  * stmmac_hw_setup - setup mac in a usable state.
2697  *  @dev : pointer to the device structure.
2698  *  @ptp_register: register PTP if set
2699  *  Description:
2700  *  this is the main function to setup the HW in a usable state because the
2701  *  dma engine is reset, the core registers are configured (e.g. AXI,
2702  *  Checksum features, timers). The DMA is ready to start receiving and
2703  *  transmitting.
2704  *  Return value:
2705  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2706  *  file on failure.
2707  */
2708 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
2709 {
2710         struct stmmac_priv *priv = netdev_priv(dev);
2711         u32 rx_cnt = priv->plat->rx_queues_to_use;
2712         u32 tx_cnt = priv->plat->tx_queues_to_use;
2713         u32 chan;
2714         int ret;
2715
2716         /* DMA initialization and SW reset */
2717         ret = stmmac_init_dma_engine(priv);
2718         if (ret < 0) {
2719                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2720                            __func__);
2721                 return ret;
2722         }
2723
2724         /* Copy the MAC addr into the HW  */
2725         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2726
2727         /* PS and related bits will be programmed according to the speed */
2728         if (priv->hw->pcs) {
2729                 int speed = priv->plat->mac_port_sel_speed;
2730
2731                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2732                     (speed == SPEED_1000)) {
2733                         priv->hw->ps = speed;
2734                 } else {
2735                         dev_warn(priv->device, "invalid port speed\n");
2736                         priv->hw->ps = 0;
2737                 }
2738         }
2739
2740         /* Initialize the MAC Core */
2741         stmmac_core_init(priv, priv->hw, dev);
2742
2743         /* Initialize MTL*/
2744         stmmac_mtl_configuration(priv);
2745
2746         /* Initialize Safety Features */
2747         stmmac_safety_feat_configuration(priv);
2748
2749         ret = stmmac_rx_ipc(priv, priv->hw);
2750         if (!ret) {
2751                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2752                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2753                 priv->hw->rx_csum = 0;
2754         }
2755
2756         /* Enable the MAC Rx/Tx */
2757         stmmac_mac_set(priv, priv->ioaddr, true);
2758
2759         /* Set the HW DMA mode and the COE */
2760         stmmac_dma_operation_mode(priv);
2761
2762         stmmac_mmc_setup(priv);
2763
2764         if (ptp_register) {
2765                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2766                 if (ret < 0)
2767                         netdev_warn(priv->dev,
2768                                     "failed to enable PTP reference clock: %pe\n",
2769                                     ERR_PTR(ret));
2770         }
2771
2772         ret = stmmac_init_ptp(priv);
2773         if (ret == -EOPNOTSUPP)
2774                 netdev_warn(priv->dev, "PTP not supported by HW\n");
2775         else if (ret)
2776                 netdev_warn(priv->dev, "PTP init failed\n");
2777         else if (ptp_register)
2778                 stmmac_ptp_register(priv);
2779
2780         priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
2781
2782         /* Convert the timer from msec to usec */
2783         if (!priv->tx_lpi_timer)
2784                 priv->tx_lpi_timer = eee_timer * 1000;
2785
2786         if (priv->use_riwt) {
2787                 if (!priv->rx_riwt)
2788                         priv->rx_riwt = DEF_DMA_RIWT;
2789
2790                 ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
2791         }
2792
2793         if (priv->hw->pcs)
2794                 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2795
2796         /* set TX and RX rings length */
2797         stmmac_set_rings_length(priv);
2798
2799         /* Enable TSO */
2800         if (priv->tso) {
2801                 for (chan = 0; chan < tx_cnt; chan++) {
2802                         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2803
2804                         /* TSO and TBS cannot co-exist */
2805                         if (tx_q->tbs & STMMAC_TBS_AVAIL)
2806                                 continue;
2807
2808                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2809                 }
2810         }
2811
2812         /* Enable Split Header */
2813         if (priv->sph && priv->hw->rx_csum) {
2814                 for (chan = 0; chan < rx_cnt; chan++)
2815                         stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2816         }
2817
2818         /* VLAN Tag Insertion */
2819         if (priv->dma_cap.vlins)
2820                 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2821
2822         /* TBS */
2823         for (chan = 0; chan < tx_cnt; chan++) {
2824                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2825                 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2826
2827                 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2828         }
2829
2830         /* Configure real RX and TX queues */
2831         netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
2832         netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
2833
2834         /* Start the ball rolling... */
2835         stmmac_start_all_dma(priv);
2836
2837         return 0;
2838 }
2839
2840 static void stmmac_hw_teardown(struct net_device *dev)
2841 {
2842         struct stmmac_priv *priv = netdev_priv(dev);
2843
2844         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2845 }
2846
2847 /**
2848  *  stmmac_open - open entry point of the driver
2849  *  @dev : pointer to the device structure.
2850  *  Description:
2851  *  This function is the open entry point of the driver.
2852  *  Return value:
2853  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2854  *  file on failure.
2855  */
2856 static int stmmac_open(struct net_device *dev)
2857 {
2858         struct stmmac_priv *priv = netdev_priv(dev);
2859         int bfsize = 0;
2860         u32 chan;
2861         int ret;
2862
2863         ret = pm_runtime_get_sync(priv->device);
2864         if (ret < 0) {
2865                 pm_runtime_put_noidle(priv->device);
2866                 return ret;
2867         }
2868
2869         if (priv->hw->pcs != STMMAC_PCS_TBI &&
2870             priv->hw->pcs != STMMAC_PCS_RTBI &&
2871             priv->hw->xpcs == NULL) {
2872                 ret = stmmac_init_phy(dev);
2873                 if (ret) {
2874                         netdev_err(priv->dev,
2875                                    "%s: Cannot attach to PHY (error: %d)\n",
2876                                    __func__, ret);
2877                         goto init_phy_error;
2878                 }
2879         }
2880
2881         /* Extra statistics */
2882         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2883         priv->xstats.threshold = tc;
2884
2885         bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
2886         if (bfsize < 0)
2887                 bfsize = 0;
2888
2889         if (bfsize < BUF_SIZE_16KiB)
2890                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
2891
2892         priv->dma_buf_sz = bfsize;
2893         buf_sz = bfsize;
2894
2895         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2896
2897         if (!priv->dma_tx_size)
2898                 priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
2899         if (!priv->dma_rx_size)
2900                 priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
2901
2902         /* Earlier check for TBS */
2903         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2904                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2905                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2906
2907                 /* Setup per-TXQ tbs flag before TX descriptor alloc */
2908                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2909         }
2910
2911         ret = alloc_dma_desc_resources(priv);
2912         if (ret < 0) {
2913                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2914                            __func__);
2915                 goto dma_desc_error;
2916         }
2917
2918         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2919         if (ret < 0) {
2920                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2921                            __func__);
2922                 goto init_error;
2923         }
2924
2925         if (priv->plat->serdes_powerup) {
2926                 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
2927                 if (ret < 0) {
2928                         netdev_err(priv->dev, "%s: Serdes powerup failed\n",
2929                                    __func__);
2930                         goto init_error;
2931                 }
2932         }
2933
2934         ret = stmmac_hw_setup(dev, true);
2935         if (ret < 0) {
2936                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2937                 goto init_error;
2938         }
2939
2940         stmmac_init_coalesce(priv);
2941
2942         phylink_start(priv->phylink);
2943         /* We may have called phylink_speed_down before */
2944         phylink_speed_up(priv->phylink);
2945
2946         /* Request the IRQ lines */
2947         ret = request_irq(dev->irq, stmmac_interrupt,
2948                           IRQF_SHARED, dev->name, dev);
2949         if (unlikely(ret < 0)) {
2950                 netdev_err(priv->dev,
2951                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2952                            __func__, dev->irq, ret);
2953                 goto irq_error;
2954         }
2955
2956         /* Request the Wake IRQ in case of another line is used for WoL */
2957         if (priv->wol_irq != dev->irq) {
2958                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2959                                   IRQF_SHARED, dev->name, dev);
2960                 if (unlikely(ret < 0)) {
2961                         netdev_err(priv->dev,
2962                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2963                                    __func__, priv->wol_irq, ret);
2964                         goto wolirq_error;
2965                 }
2966         }
2967
2968         /* Request the IRQ lines */
2969         if (priv->lpi_irq > 0) {
2970                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2971                                   dev->name, dev);
2972                 if (unlikely(ret < 0)) {
2973                         netdev_err(priv->dev,
2974                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2975                                    __func__, priv->lpi_irq, ret);
2976                         goto lpiirq_error;
2977                 }
2978         }
2979
2980         stmmac_enable_all_queues(priv);
2981         netif_tx_start_all_queues(priv->dev);
2982
2983         return 0;
2984
2985 lpiirq_error:
2986         if (priv->wol_irq != dev->irq)
2987                 free_irq(priv->wol_irq, dev);
2988 wolirq_error:
2989         free_irq(dev->irq, dev);
2990 irq_error:
2991         phylink_stop(priv->phylink);
2992
2993         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2994                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2995
2996         stmmac_hw_teardown(dev);
2997 init_error:
2998         free_dma_desc_resources(priv);
2999 dma_desc_error:
3000         phylink_disconnect_phy(priv->phylink);
3001 init_phy_error:
3002         pm_runtime_put(priv->device);
3003         return ret;
3004 }
3005
3006 /**
3007  *  stmmac_release - close entry point of the driver
3008  *  @dev : device pointer.
3009  *  Description:
3010  *  This is the stop entry point of the driver.
3011  */
3012 static int stmmac_release(struct net_device *dev)
3013 {
3014         struct stmmac_priv *priv = netdev_priv(dev);
3015         u32 chan;
3016
3017         if (device_may_wakeup(priv->device))
3018                 phylink_speed_down(priv->phylink, false);
3019         /* Stop and disconnect the PHY */
3020         phylink_stop(priv->phylink);
3021         phylink_disconnect_phy(priv->phylink);
3022
3023         stmmac_disable_all_queues(priv);
3024
3025         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3026                 del_timer_sync(&priv->tx_queue[chan].txtimer);
3027
3028         /* Free the IRQ lines */
3029         free_irq(dev->irq, dev);
3030         if (priv->wol_irq != dev->irq)
3031                 free_irq(priv->wol_irq, dev);
3032         if (priv->lpi_irq > 0)
3033                 free_irq(priv->lpi_irq, dev);
3034
3035         if (priv->eee_enabled) {
3036                 priv->tx_path_in_lpi_mode = false;
3037                 del_timer_sync(&priv->eee_ctrl_timer);
3038         }
3039
3040         /* Stop TX/RX DMA and clear the descriptors */
3041         stmmac_stop_all_dma(priv);
3042
3043         /* Release and free the Rx/Tx resources */
3044         free_dma_desc_resources(priv);
3045
3046         /* Disable the MAC Rx/Tx */
3047         stmmac_mac_set(priv, priv->ioaddr, false);
3048
3049         /* Powerdown Serdes if there is */
3050         if (priv->plat->serdes_powerdown)
3051                 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3052
3053         netif_carrier_off(dev);
3054
3055         stmmac_release_ptp(priv);
3056
3057         pm_runtime_put(priv->device);
3058
3059         return 0;
3060 }
3061
3062 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3063                                struct stmmac_tx_queue *tx_q)
3064 {
3065         u16 tag = 0x0, inner_tag = 0x0;
3066         u32 inner_type = 0x0;
3067         struct dma_desc *p;
3068
3069         if (!priv->dma_cap.vlins)
3070                 return false;
3071         if (!skb_vlan_tag_present(skb))
3072                 return false;
3073         if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3074                 inner_tag = skb_vlan_tag_get(skb);
3075                 inner_type = STMMAC_VLAN_INSERT;
3076         }
3077
3078         tag = skb_vlan_tag_get(skb);
3079
3080         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3081                 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3082         else
3083                 p = &tx_q->dma_tx[tx_q->cur_tx];
3084
3085         if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3086                 return false;
3087
3088         stmmac_set_tx_owner(priv, p);
3089         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3090         return true;
3091 }
3092
3093 /**
3094  *  stmmac_tso_allocator - close entry point of the driver
3095  *  @priv: driver private structure
3096  *  @des: buffer start address
3097  *  @total_len: total length to fill in descriptors
3098  *  @last_segment: condition for the last descriptor
3099  *  @queue: TX queue index
3100  *  Description:
3101  *  This function fills descriptor and request new descriptors according to
3102  *  buffer length to fill
3103  */
3104 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3105                                  int total_len, bool last_segment, u32 queue)
3106 {
3107         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3108         struct dma_desc *desc;
3109         u32 buff_size;
3110         int tmp_len;
3111
3112         tmp_len = total_len;
3113
3114         while (tmp_len > 0) {
3115                 dma_addr_t curr_addr;
3116
3117                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3118                                                 priv->dma_tx_size);
3119                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3120
3121                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3122                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3123                 else
3124                         desc = &tx_q->dma_tx[tx_q->cur_tx];
3125
3126                 curr_addr = des + (total_len - tmp_len);
3127                 if (priv->dma_cap.addr64 <= 32)
3128                         desc->des0 = cpu_to_le32(curr_addr);
3129                 else
3130                         stmmac_set_desc_addr(priv, desc, curr_addr);
3131
3132                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3133                             TSO_MAX_BUFF_SIZE : tmp_len;
3134
3135                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3136                                 0, 1,
3137                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3138                                 0, 0);
3139
3140                 tmp_len -= TSO_MAX_BUFF_SIZE;
3141         }
3142 }
3143
3144 /**
3145  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3146  *  @skb : the socket buffer
3147  *  @dev : device pointer
3148  *  Description: this is the transmit function that is called on TSO frames
3149  *  (support available on GMAC4 and newer chips).
3150  *  Diagram below show the ring programming in case of TSO frames:
3151  *
3152  *  First Descriptor
3153  *   --------
3154  *   | DES0 |---> buffer1 = L2/L3/L4 header
3155  *   | DES1 |---> TCP Payload (can continue on next descr...)
3156  *   | DES2 |---> buffer 1 and 2 len
3157  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3158  *   --------
3159  *      |
3160  *     ...
3161  *      |
3162  *   --------
3163  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3164  *   | DES1 | --|
3165  *   | DES2 | --> buffer 1 and 2 len
3166  *   | DES3 |
3167  *   --------
3168  *
3169  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3170  */
3171 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3172 {
3173         struct dma_desc *desc, *first, *mss_desc = NULL;
3174         struct stmmac_priv *priv = netdev_priv(dev);
3175         int desc_size, tmp_pay_len = 0, first_tx;
3176         int nfrags = skb_shinfo(skb)->nr_frags;
3177         u32 queue = skb_get_queue_mapping(skb);
3178         unsigned int first_entry, tx_packets;
3179         struct stmmac_tx_queue *tx_q;
3180         bool has_vlan, set_ic;
3181         u8 proto_hdr_len, hdr;
3182         u32 pay_len, mss;
3183         dma_addr_t des;
3184         int i;
3185
3186         tx_q = &priv->tx_queue[queue];
3187         first_tx = tx_q->cur_tx;
3188
3189         /* Compute header lengths */
3190         if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3191                 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3192                 hdr = sizeof(struct udphdr);
3193         } else {
3194                 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3195                 hdr = tcp_hdrlen(skb);
3196         }
3197
3198         /* Desc availability based on threshold should be enough safe */
3199         if (unlikely(stmmac_tx_avail(priv, queue) <
3200                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3201                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3202                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3203                                                                 queue));
3204                         /* This is a hard error, log it. */
3205                         netdev_err(priv->dev,
3206                                    "%s: Tx Ring full when queue awake\n",
3207                                    __func__);
3208                 }
3209                 return NETDEV_TX_BUSY;
3210         }
3211
3212         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3213
3214         mss = skb_shinfo(skb)->gso_size;
3215
3216         /* set new MSS value if needed */
3217         if (mss != tx_q->mss) {
3218                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3219                         mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3220                 else
3221                         mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3222
3223                 stmmac_set_mss(priv, mss_desc, mss);
3224                 tx_q->mss = mss;
3225                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3226                                                 priv->dma_tx_size);
3227                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3228         }
3229
3230         if (netif_msg_tx_queued(priv)) {
3231                 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3232                         __func__, hdr, proto_hdr_len, pay_len, mss);
3233                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3234                         skb->data_len);
3235         }
3236
3237         /* Check if VLAN can be inserted by HW */
3238         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3239
3240         first_entry = tx_q->cur_tx;
3241         WARN_ON(tx_q->tx_skbuff[first_entry]);
3242
3243         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3244                 desc = &tx_q->dma_entx[first_entry].basic;
3245         else
3246                 desc = &tx_q->dma_tx[first_entry];
3247         first = desc;
3248
3249         if (has_vlan)
3250                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3251
3252         /* first descriptor: fill Headers on Buf1 */
3253         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
3254                              DMA_TO_DEVICE);
3255         if (dma_mapping_error(priv->device, des))
3256                 goto dma_map_err;
3257
3258         tx_q->tx_skbuff_dma[first_entry].buf = des;
3259         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
3260
3261         if (priv->dma_cap.addr64 <= 32) {
3262                 first->des0 = cpu_to_le32(des);
3263
3264                 /* Fill start of payload in buff2 of first descriptor */
3265                 if (pay_len)
3266                         first->des1 = cpu_to_le32(des + proto_hdr_len);
3267
3268                 /* If needed take extra descriptors to fill the remaining payload */
3269                 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3270         } else {
3271                 stmmac_set_desc_addr(priv, first, des);
3272                 tmp_pay_len = pay_len;
3273                 des += proto_hdr_len;
3274                 pay_len = 0;
3275         }
3276
3277         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3278
3279         /* Prepare fragments */
3280         for (i = 0; i < nfrags; i++) {
3281                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3282
3283                 des = skb_frag_dma_map(priv->device, frag, 0,
3284                                        skb_frag_size(frag),
3285                                        DMA_TO_DEVICE);
3286                 if (dma_mapping_error(priv->device, des))
3287                         goto dma_map_err;
3288
3289                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3290                                      (i == nfrags - 1), queue);
3291
3292                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3293                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3294                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3295         }
3296
3297         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3298
3299         /* Only the last descriptor gets to point to the skb. */
3300         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3301
3302         /* Manage tx mitigation */
3303         tx_packets = (tx_q->cur_tx + 1) - first_tx;
3304         tx_q->tx_count_frames += tx_packets;
3305
3306         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3307                 set_ic = true;
3308         else if (!priv->tx_coal_frames)
3309                 set_ic = false;
3310         else if (tx_packets > priv->tx_coal_frames)
3311                 set_ic = true;
3312         else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3313                 set_ic = true;
3314         else
3315                 set_ic = false;
3316
3317         if (set_ic) {
3318                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3319                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3320                 else
3321                         desc = &tx_q->dma_tx[tx_q->cur_tx];
3322
3323                 tx_q->tx_count_frames = 0;
3324                 stmmac_set_tx_ic(priv, desc);
3325                 priv->xstats.tx_set_ic_bit++;
3326         }
3327
3328         /* We've used all descriptors we need for this skb, however,
3329          * advance cur_tx so that it references a fresh descriptor.
3330          * ndo_start_xmit will fill this descriptor the next time it's
3331          * called and stmmac_tx_clean may clean up to this descriptor.
3332          */
3333         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3334
3335         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3336                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3337                           __func__);
3338                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3339         }
3340
3341         dev->stats.tx_bytes += skb->len;
3342         priv->xstats.tx_tso_frames++;
3343         priv->xstats.tx_tso_nfrags += nfrags;
3344
3345         if (priv->sarc_type)
3346                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3347
3348         skb_tx_timestamp(skb);
3349
3350         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3351                      priv->hwts_tx_en)) {
3352                 /* declare that device is doing timestamping */
3353                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3354                 stmmac_enable_tx_timestamp(priv, first);
3355         }
3356
3357         /* Complete the first descriptor before granting the DMA */
3358         stmmac_prepare_tso_tx_desc(priv, first, 1,
3359                         proto_hdr_len,
3360                         pay_len,
3361                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3362                         hdr / 4, (skb->len - proto_hdr_len));
3363
3364         /* If context desc is used to change MSS */
3365         if (mss_desc) {
3366                 /* Make sure that first descriptor has been completely
3367                  * written, including its own bit. This is because MSS is
3368                  * actually before first descriptor, so we need to make
3369                  * sure that MSS's own bit is the last thing written.
3370                  */
3371                 dma_wmb();
3372                 stmmac_set_tx_owner(priv, mss_desc);
3373         }
3374
3375         /* The own bit must be the latest setting done when prepare the
3376          * descriptor and then barrier is needed to make sure that
3377          * all is coherent before granting the DMA engine.
3378          */
3379         wmb();
3380
3381         if (netif_msg_pktdata(priv)) {
3382                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3383                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3384                         tx_q->cur_tx, first, nfrags);
3385                 pr_info(">>> frame to be transmitted: ");
3386                 print_pkt(skb->data, skb_headlen(skb));
3387         }
3388
3389         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3390
3391         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3392                 desc_size = sizeof(struct dma_edesc);
3393         else
3394                 desc_size = sizeof(struct dma_desc);
3395
3396         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3397         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3398         stmmac_tx_timer_arm(priv, queue);
3399
3400         return NETDEV_TX_OK;
3401
3402 dma_map_err:
3403         dev_err(priv->device, "Tx dma map failed\n");
3404         dev_kfree_skb(skb);
3405         priv->dev->stats.tx_dropped++;
3406         return NETDEV_TX_OK;
3407 }
3408
3409 /**
3410  *  stmmac_xmit - Tx entry point of the driver
3411  *  @skb : the socket buffer
3412  *  @dev : device pointer
3413  *  Description : this is the tx entry point of the driver.
3414  *  It programs the chain or the ring and supports oversized frames
3415  *  and SG feature.
3416  */
3417 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3418 {
3419         unsigned int first_entry, tx_packets, enh_desc;
3420         struct stmmac_priv *priv = netdev_priv(dev);
3421         unsigned int nopaged_len = skb_headlen(skb);
3422         int i, csum_insertion = 0, is_jumbo = 0;
3423         u32 queue = skb_get_queue_mapping(skb);
3424         int nfrags = skb_shinfo(skb)->nr_frags;
3425         int gso = skb_shinfo(skb)->gso_type;
3426         struct dma_edesc *tbs_desc = NULL;
3427         int entry, desc_size, first_tx;
3428         struct dma_desc *desc, *first;
3429         struct stmmac_tx_queue *tx_q;
3430         bool has_vlan, set_ic;
3431         dma_addr_t des;
3432
3433         tx_q = &priv->tx_queue[queue];
3434         first_tx = tx_q->cur_tx;
3435
3436         if (priv->tx_path_in_lpi_mode)
3437                 stmmac_disable_eee_mode(priv);
3438
3439         /* Manage oversized TCP frames for GMAC4 device */
3440         if (skb_is_gso(skb) && priv->tso) {
3441                 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3442                         return stmmac_tso_xmit(skb, dev);
3443                 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3444                         return stmmac_tso_xmit(skb, dev);
3445         }
3446
3447         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3448                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3449                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3450                                                                 queue));
3451                         /* This is a hard error, log it. */
3452                         netdev_err(priv->dev,
3453                                    "%s: Tx Ring full when queue awake\n",
3454                                    __func__);
3455                 }
3456                 return NETDEV_TX_BUSY;
3457         }
3458
3459         /* Check if VLAN can be inserted by HW */
3460         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3461
3462         entry = tx_q->cur_tx;
3463         first_entry = entry;
3464         WARN_ON(tx_q->tx_skbuff[first_entry]);
3465
3466         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3467
3468         if (likely(priv->extend_desc))
3469                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3470         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3471                 desc = &tx_q->dma_entx[entry].basic;
3472         else
3473                 desc = tx_q->dma_tx + entry;
3474
3475         first = desc;
3476
3477         if (has_vlan)
3478                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3479
3480         enh_desc = priv->plat->enh_desc;
3481         /* To program the descriptors according to the size of the frame */
3482         if (enh_desc)
3483                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3484
3485         if (unlikely(is_jumbo)) {
3486                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3487                 if (unlikely(entry < 0) && (entry != -EINVAL))
3488                         goto dma_map_err;
3489         }
3490
3491         for (i = 0; i < nfrags; i++) {
3492                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3493                 int len = skb_frag_size(frag);
3494                 bool last_segment = (i == (nfrags - 1));
3495
3496                 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3497                 WARN_ON(tx_q->tx_skbuff[entry]);
3498
3499                 if (likely(priv->extend_desc))
3500                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3501                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3502                         desc = &tx_q->dma_entx[entry].basic;
3503                 else
3504                         desc = tx_q->dma_tx + entry;
3505
3506                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3507                                        DMA_TO_DEVICE);
3508                 if (dma_mapping_error(priv->device, des))
3509                         goto dma_map_err; /* should reuse desc w/o issues */
3510
3511                 tx_q->tx_skbuff_dma[entry].buf = des;
3512
3513                 stmmac_set_desc_addr(priv, desc, des);
3514
3515                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3516                 tx_q->tx_skbuff_dma[entry].len = len;
3517                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3518
3519                 /* Prepare the descriptor and set the own bit too */
3520                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3521                                 priv->mode, 1, last_segment, skb->len);
3522         }
3523
3524         /* Only the last descriptor gets to point to the skb. */
3525         tx_q->tx_skbuff[entry] = skb;
3526
3527         /* According to the coalesce parameter the IC bit for the latest
3528          * segment is reset and the timer re-started to clean the tx status.
3529          * This approach takes care about the fragments: desc is the first
3530          * element in case of no SG.
3531          */
3532         tx_packets = (entry + 1) - first_tx;
3533         tx_q->tx_count_frames += tx_packets;
3534
3535         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3536                 set_ic = true;
3537         else if (!priv->tx_coal_frames)
3538                 set_ic = false;
3539         else if (tx_packets > priv->tx_coal_frames)
3540                 set_ic = true;
3541         else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3542                 set_ic = true;
3543         else
3544                 set_ic = false;
3545
3546         if (set_ic) {
3547                 if (likely(priv->extend_desc))
3548                         desc = &tx_q->dma_etx[entry].basic;
3549                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3550                         desc = &tx_q->dma_entx[entry].basic;
3551                 else
3552                         desc = &tx_q->dma_tx[entry];
3553
3554                 tx_q->tx_count_frames = 0;
3555                 stmmac_set_tx_ic(priv, desc);
3556                 priv->xstats.tx_set_ic_bit++;
3557         }
3558
3559         /* We've used all descriptors we need for this skb, however,
3560          * advance cur_tx so that it references a fresh descriptor.
3561          * ndo_start_xmit will fill this descriptor the next time it's
3562          * called and stmmac_tx_clean may clean up to this descriptor.
3563          */
3564         entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3565         tx_q->cur_tx = entry;
3566
3567         if (netif_msg_pktdata(priv)) {
3568                 netdev_dbg(priv->dev,
3569                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3570                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3571                            entry, first, nfrags);
3572
3573                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3574                 print_pkt(skb->data, skb->len);
3575         }
3576
3577         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3578                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3579                           __func__);
3580                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3581         }
3582
3583         dev->stats.tx_bytes += skb->len;
3584
3585         if (priv->sarc_type)
3586                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3587
3588         skb_tx_timestamp(skb);
3589
3590         /* Ready to fill the first descriptor and set the OWN bit w/o any
3591          * problems because all the descriptors are actually ready to be
3592          * passed to the DMA engine.
3593          */
3594         if (likely(!is_jumbo)) {
3595                 bool last_segment = (nfrags == 0);
3596
3597                 des = dma_map_single(priv->device, skb->data,
3598                                      nopaged_len, DMA_TO_DEVICE);
3599                 if (dma_mapping_error(priv->device, des))
3600                         goto dma_map_err;
3601
3602                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3603
3604                 stmmac_set_desc_addr(priv, first, des);
3605
3606                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3607                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3608
3609                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3610                              priv->hwts_tx_en)) {
3611                         /* declare that device is doing timestamping */
3612                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3613                         stmmac_enable_tx_timestamp(priv, first);
3614                 }
3615
3616                 /* Prepare the first descriptor setting the OWN bit too */
3617                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3618                                 csum_insertion, priv->mode, 0, last_segment,
3619                                 skb->len);
3620         }
3621
3622         if (tx_q->tbs & STMMAC_TBS_EN) {
3623                 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3624
3625                 tbs_desc = &tx_q->dma_entx[first_entry];
3626                 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3627         }
3628
3629         stmmac_set_tx_owner(priv, first);
3630
3631         /* The own bit must be the latest setting done when prepare the
3632          * descriptor and then barrier is needed to make sure that
3633          * all is coherent before granting the DMA engine.
3634          */
3635         wmb();
3636
3637         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3638
3639         stmmac_enable_dma_transmission(priv, priv->ioaddr);
3640
3641         if (likely(priv->extend_desc))
3642                 desc_size = sizeof(struct dma_extended_desc);
3643         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3644                 desc_size = sizeof(struct dma_edesc);
3645         else
3646                 desc_size = sizeof(struct dma_desc);
3647
3648         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3649         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3650         stmmac_tx_timer_arm(priv, queue);
3651
3652         return NETDEV_TX_OK;
3653
3654 dma_map_err:
3655         netdev_err(priv->dev, "Tx DMA map failed\n");
3656         dev_kfree_skb(skb);
3657         priv->dev->stats.tx_dropped++;
3658         return NETDEV_TX_OK;
3659 }
3660
3661 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3662 {
3663         struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
3664         __be16 vlan_proto = veth->h_vlan_proto;
3665         u16 vlanid;
3666
3667         if ((vlan_proto == htons(ETH_P_8021Q) &&
3668              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3669             (vlan_proto == htons(ETH_P_8021AD) &&
3670              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3671                 /* pop the vlan tag */
3672                 vlanid = ntohs(veth->h_vlan_TCI);
3673                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3674                 skb_pull(skb, VLAN_HLEN);
3675                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3676         }
3677 }
3678
3679 /**
3680  * stmmac_rx_refill - refill used skb preallocated buffers
3681  * @priv: driver private structure
3682  * @queue: RX queue index
3683  * Description : this is to reallocate the skb for the reception process
3684  * that is based on zero-copy.
3685  */
3686 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3687 {
3688         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3689         int len, dirty = stmmac_rx_dirty(priv, queue);
3690         unsigned int entry = rx_q->dirty_rx;
3691
3692         len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
3693
3694         while (dirty-- > 0) {
3695                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3696                 struct dma_desc *p;
3697                 bool use_rx_wd;
3698
3699                 if (priv->extend_desc)
3700                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3701                 else
3702                         p = rx_q->dma_rx + entry;
3703
3704                 if (!buf->page) {
3705                         buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
3706                         if (!buf->page)
3707                                 break;
3708                 }
3709
3710                 if (priv->sph && !buf->sec_page) {
3711                         buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
3712                         if (!buf->sec_page)
3713                                 break;
3714
3715                         buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
3716
3717                         dma_sync_single_for_device(priv->device, buf->sec_addr,
3718                                                    len, DMA_FROM_DEVICE);
3719                 }
3720
3721                 buf->addr = page_pool_get_dma_addr(buf->page);
3722
3723                 /* Sync whole allocation to device. This will invalidate old
3724                  * data.
3725                  */
3726                 dma_sync_single_for_device(priv->device, buf->addr, len,
3727                                            DMA_FROM_DEVICE);
3728
3729                 stmmac_set_desc_addr(priv, p, buf->addr);
3730                 if (priv->sph)
3731                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
3732                 else
3733                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
3734                 stmmac_refill_desc3(priv, rx_q, p);
3735
3736                 rx_q->rx_count_frames++;
3737                 rx_q->rx_count_frames += priv->rx_coal_frames;
3738                 if (rx_q->rx_count_frames > priv->rx_coal_frames)
3739                         rx_q->rx_count_frames = 0;
3740
3741                 use_rx_wd = !priv->rx_coal_frames;
3742                 use_rx_wd |= rx_q->rx_count_frames > 0;
3743                 if (!priv->use_riwt)
3744                         use_rx_wd = false;
3745
3746                 dma_wmb();
3747                 stmmac_set_rx_owner(priv, p, use_rx_wd);
3748
3749                 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
3750         }
3751         rx_q->dirty_rx = entry;
3752         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3753                             (rx_q->dirty_rx * sizeof(struct dma_desc));
3754         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3755 }
3756
3757 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
3758                                        struct dma_desc *p,
3759                                        int status, unsigned int len)
3760 {
3761         unsigned int plen = 0, hlen = 0;
3762         int coe = priv->hw->rx_csum;
3763
3764         /* Not first descriptor, buffer is always zero */
3765         if (priv->sph && len)
3766                 return 0;
3767
3768         /* First descriptor, get split header length */
3769         stmmac_get_rx_header_len(priv, p, &hlen);
3770         if (priv->sph && hlen) {
3771                 priv->xstats.rx_split_hdr_pkt_n++;
3772                 return hlen;
3773         }
3774
3775         /* First descriptor, not last descriptor and not split header */
3776         if (status & rx_not_ls)
3777                 return priv->dma_buf_sz;
3778
3779         plen = stmmac_get_rx_frame_len(priv, p, coe);
3780
3781         /* First descriptor and last descriptor and not split header */
3782         return min_t(unsigned int, priv->dma_buf_sz, plen);
3783 }
3784
3785 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
3786                                        struct dma_desc *p,
3787                                        int status, unsigned int len)
3788 {
3789         int coe = priv->hw->rx_csum;
3790         unsigned int plen = 0;
3791
3792         /* Not split header, buffer is not available */
3793         if (!priv->sph)
3794                 return 0;
3795
3796         /* Not last descriptor */
3797         if (status & rx_not_ls)
3798                 return priv->dma_buf_sz;
3799
3800         plen = stmmac_get_rx_frame_len(priv, p, coe);
3801
3802         /* Last descriptor */
3803         return plen - len;
3804 }
3805
3806 /**
3807  * stmmac_rx - manage the receive process
3808  * @priv: driver private structure
3809  * @limit: napi bugget
3810  * @queue: RX queue index.
3811  * Description :  this the function called by the napi poll method.
3812  * It gets all the frames inside the ring.
3813  */
3814 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3815 {
3816         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3817         struct stmmac_channel *ch = &priv->channel[queue];
3818         unsigned int count = 0, error = 0, len = 0;
3819         int status = 0, coe = priv->hw->rx_csum;
3820         unsigned int next_entry = rx_q->cur_rx;
3821         unsigned int desc_size;
3822         struct sk_buff *skb = NULL;
3823
3824         if (netif_msg_rx_status(priv)) {
3825                 void *rx_head;
3826
3827                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3828                 if (priv->extend_desc) {
3829                         rx_head = (void *)rx_q->dma_erx;
3830                         desc_size = sizeof(struct dma_extended_desc);
3831                 } else {
3832                         rx_head = (void *)rx_q->dma_rx;
3833                         desc_size = sizeof(struct dma_desc);
3834                 }
3835
3836                 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
3837                                     rx_q->dma_rx_phy, desc_size);
3838         }
3839         while (count < limit) {
3840                 unsigned int buf1_len = 0, buf2_len = 0;
3841                 enum pkt_hash_types hash_type;
3842                 struct stmmac_rx_buffer *buf;
3843                 struct dma_desc *np, *p;
3844                 int entry;
3845                 u32 hash;
3846
3847                 if (!count && rx_q->state_saved) {
3848                         skb = rx_q->state.skb;
3849                         error = rx_q->state.error;
3850                         len = rx_q->state.len;
3851                 } else {
3852                         rx_q->state_saved = false;
3853                         skb = NULL;
3854                         error = 0;
3855                         len = 0;
3856                 }
3857
3858 read_again:
3859                 if (count >= limit)
3860                         break;
3861
3862                 buf1_len = 0;
3863                 buf2_len = 0;
3864                 entry = next_entry;
3865                 buf = &rx_q->buf_pool[entry];
3866
3867                 if (priv->extend_desc)
3868                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3869                 else
3870                         p = rx_q->dma_rx + entry;
3871
3872                 /* read the status of the incoming frame */
3873                 status = stmmac_rx_status(priv, &priv->dev->stats,
3874                                 &priv->xstats, p);
3875                 /* check if managed by the DMA otherwise go ahead */
3876                 if (unlikely(status & dma_own))
3877                         break;
3878
3879                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
3880                                                 priv->dma_rx_size);
3881                 next_entry = rx_q->cur_rx;
3882
3883                 if (priv->extend_desc)
3884                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3885                 else
3886                         np = rx_q->dma_rx + next_entry;
3887
3888                 prefetch(np);
3889
3890                 if (priv->extend_desc)
3891                         stmmac_rx_extended_status(priv, &priv->dev->stats,
3892                                         &priv->xstats, rx_q->dma_erx + entry);
3893                 if (unlikely(status == discard_frame)) {
3894                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
3895                         buf->page = NULL;
3896                         error = 1;
3897                         if (!priv->hwts_rx_en)
3898                                 priv->dev->stats.rx_errors++;
3899                 }
3900
3901                 if (unlikely(error && (status & rx_not_ls)))
3902                         goto read_again;
3903                 if (unlikely(error)) {
3904                         dev_kfree_skb(skb);
3905                         skb = NULL;
3906                         count++;
3907                         continue;
3908                 }
3909
3910                 /* Buffer is good. Go on. */
3911
3912                 prefetch(page_address(buf->page));
3913                 if (buf->sec_page)
3914                         prefetch(page_address(buf->sec_page));
3915
3916                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
3917                 len += buf1_len;
3918                 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
3919                 len += buf2_len;
3920
3921                 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3922                  * Type frames (LLC/LLC-SNAP)
3923                  *
3924                  * llc_snap is never checked in GMAC >= 4, so this ACS
3925                  * feature is always disabled and packets need to be
3926                  * stripped manually.
3927                  */
3928                 if (likely(!(status & rx_not_ls)) &&
3929                     (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3930                      unlikely(status != llc_snap))) {
3931                         if (buf2_len)
3932                                 buf2_len -= ETH_FCS_LEN;
3933                         else
3934                                 buf1_len -= ETH_FCS_LEN;
3935
3936                         len -= ETH_FCS_LEN;
3937                 }
3938
3939                 if (!skb) {
3940                         skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3941                         if (!skb) {
3942                                 priv->dev->stats.rx_dropped++;
3943                                 count++;
3944                                 goto drain_data;
3945                         }
3946
3947                         dma_sync_single_for_cpu(priv->device, buf->addr,
3948                                                 buf1_len, DMA_FROM_DEVICE);
3949                         skb_copy_to_linear_data(skb, page_address(buf->page),
3950                                                 buf1_len);
3951                         skb_put(skb, buf1_len);
3952
3953                         /* Data payload copied into SKB, page ready for recycle */
3954                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
3955                         buf->page = NULL;
3956                 } else if (buf1_len) {
3957                         dma_sync_single_for_cpu(priv->device, buf->addr,
3958                                                 buf1_len, DMA_FROM_DEVICE);
3959                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3960                                         buf->page, 0, buf1_len,
3961                                         priv->dma_buf_sz);
3962
3963                         /* Data payload appended into SKB */
3964                         page_pool_release_page(rx_q->page_pool, buf->page);
3965                         buf->page = NULL;
3966                 }
3967
3968                 if (buf2_len) {
3969                         dma_sync_single_for_cpu(priv->device, buf->sec_addr,
3970                                                 buf2_len, DMA_FROM_DEVICE);
3971                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3972                                         buf->sec_page, 0, buf2_len,
3973                                         priv->dma_buf_sz);
3974
3975                         /* Data payload appended into SKB */
3976                         page_pool_release_page(rx_q->page_pool, buf->sec_page);
3977                         buf->sec_page = NULL;
3978                 }
3979
3980 drain_data:
3981                 if (likely(status & rx_not_ls))
3982                         goto read_again;
3983                 if (!skb)
3984                         continue;
3985
3986                 /* Got entire packet into SKB. Finish it. */
3987
3988                 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3989                 stmmac_rx_vlan(priv->dev, skb);
3990                 skb->protocol = eth_type_trans(skb, priv->dev);
3991
3992                 if (unlikely(!coe))
3993                         skb_checksum_none_assert(skb);
3994                 else
3995                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3996
3997                 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
3998                         skb_set_hash(skb, hash, hash_type);
3999
4000                 skb_record_rx_queue(skb, queue);
4001                 napi_gro_receive(&ch->rx_napi, skb);
4002                 skb = NULL;
4003
4004                 priv->dev->stats.rx_packets++;
4005                 priv->dev->stats.rx_bytes += len;
4006                 count++;
4007         }
4008
4009         if (status & rx_not_ls || skb) {
4010                 rx_q->state_saved = true;
4011                 rx_q->state.skb = skb;
4012                 rx_q->state.error = error;
4013                 rx_q->state.len = len;
4014         }
4015
4016         stmmac_rx_refill(priv, queue);
4017
4018         priv->xstats.rx_pkt_n += count;
4019
4020         return count;
4021 }
4022
4023 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
4024 {
4025         struct stmmac_channel *ch =
4026                 container_of(napi, struct stmmac_channel, rx_napi);
4027         struct stmmac_priv *priv = ch->priv_data;
4028         u32 chan = ch->index;
4029         int work_done;
4030
4031         priv->xstats.napi_poll++;
4032
4033         work_done = stmmac_rx(priv, budget, chan);
4034         if (work_done < budget && napi_complete_done(napi, work_done)) {
4035                 unsigned long flags;
4036
4037                 spin_lock_irqsave(&ch->lock, flags);
4038                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
4039                 spin_unlock_irqrestore(&ch->lock, flags);
4040         }
4041
4042         return work_done;
4043 }
4044
4045 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
4046 {
4047         struct stmmac_channel *ch =
4048                 container_of(napi, struct stmmac_channel, tx_napi);
4049         struct stmmac_priv *priv = ch->priv_data;
4050         u32 chan = ch->index;
4051         int work_done;
4052
4053         priv->xstats.napi_poll++;
4054
4055         work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
4056         work_done = min(work_done, budget);
4057
4058         if (work_done < budget && napi_complete_done(napi, work_done)) {
4059                 unsigned long flags;
4060
4061                 spin_lock_irqsave(&ch->lock, flags);
4062                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
4063                 spin_unlock_irqrestore(&ch->lock, flags);
4064         }
4065
4066         return work_done;
4067 }
4068
4069 /**
4070  *  stmmac_tx_timeout
4071  *  @dev : Pointer to net device structure
4072  *  @txqueue: the index of the hanging transmit queue
4073  *  Description: this function is called when a packet transmission fails to
4074  *   complete within a reasonable time. The driver will mark the error in the
4075  *   netdev structure and arrange for the device to be reset to a sane state
4076  *   in order to transmit a new packet.
4077  */
4078 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
4079 {
4080         struct stmmac_priv *priv = netdev_priv(dev);
4081
4082         stmmac_global_err(priv);
4083 }
4084
4085 /**
4086  *  stmmac_set_rx_mode - entry point for multicast addressing
4087  *  @dev : pointer to the device structure
4088  *  Description:
4089  *  This function is a driver entry point which gets called by the kernel
4090  *  whenever multicast addresses must be enabled/disabled.
4091  *  Return value:
4092  *  void.
4093  */
4094 static void stmmac_set_rx_mode(struct net_device *dev)
4095 {
4096         struct stmmac_priv *priv = netdev_priv(dev);
4097
4098         stmmac_set_filter(priv, priv->hw, dev);
4099 }
4100
4101 /**
4102  *  stmmac_change_mtu - entry point to change MTU size for the device.
4103  *  @dev : device pointer.
4104  *  @new_mtu : the new MTU size for the device.
4105  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
4106  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
4107  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
4108  *  Return value:
4109  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4110  *  file on failure.
4111  */
4112 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
4113 {
4114         struct stmmac_priv *priv = netdev_priv(dev);
4115         int txfifosz = priv->plat->tx_fifo_size;
4116         const int mtu = new_mtu;
4117
4118         if (txfifosz == 0)
4119                 txfifosz = priv->dma_cap.tx_fifo_size;
4120
4121         txfifosz /= priv->plat->tx_queues_to_use;
4122
4123         if (netif_running(dev)) {
4124                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
4125                 return -EBUSY;
4126         }
4127
4128         new_mtu = STMMAC_ALIGN(new_mtu);
4129
4130         /* If condition true, FIFO is too small or MTU too large */
4131         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
4132                 return -EINVAL;
4133
4134         dev->mtu = mtu;
4135
4136         netdev_update_features(dev);
4137
4138         return 0;
4139 }
4140
4141 static netdev_features_t stmmac_fix_features(struct net_device *dev,
4142                                              netdev_features_t features)
4143 {
4144         struct stmmac_priv *priv = netdev_priv(dev);
4145
4146         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
4147                 features &= ~NETIF_F_RXCSUM;
4148
4149         if (!priv->plat->tx_coe)
4150                 features &= ~NETIF_F_CSUM_MASK;
4151
4152         /* Some GMAC devices have a bugged Jumbo frame support that
4153          * needs to have the Tx COE disabled for oversized frames
4154          * (due to limited buffer sizes). In this case we disable
4155          * the TX csum insertion in the TDES and not use SF.
4156          */
4157         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
4158                 features &= ~NETIF_F_CSUM_MASK;
4159
4160         /* Disable tso if asked by ethtool */
4161         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4162                 if (features & NETIF_F_TSO)
4163                         priv->tso = true;
4164                 else
4165                         priv->tso = false;
4166         }
4167
4168         return features;
4169 }
4170
4171 static int stmmac_set_features(struct net_device *netdev,
4172                                netdev_features_t features)
4173 {
4174         struct stmmac_priv *priv = netdev_priv(netdev);
4175         bool sph_en;
4176         u32 chan;
4177
4178         /* Keep the COE Type in case of csum is supporting */
4179         if (features & NETIF_F_RXCSUM)
4180                 priv->hw->rx_csum = priv->plat->rx_coe;
4181         else
4182                 priv->hw->rx_csum = 0;
4183         /* No check needed because rx_coe has been set before and it will be
4184          * fixed in case of issue.
4185          */
4186         stmmac_rx_ipc(priv, priv->hw);
4187
4188         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
4189         for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
4190                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
4191
4192         return 0;
4193 }
4194
4195 /**
4196  *  stmmac_interrupt - main ISR
4197  *  @irq: interrupt number.
4198  *  @dev_id: to pass the net device pointer (must be valid).
4199  *  Description: this is the main driver interrupt service routine.
4200  *  It can call:
4201  *  o DMA service routine (to manage incoming frame reception and transmission
4202  *    status)
4203  *  o Core interrupts to manage: remote wake-up, management counter, LPI
4204  *    interrupts.
4205  */
4206 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
4207 {
4208         struct net_device *dev = (struct net_device *)dev_id;
4209         struct stmmac_priv *priv = netdev_priv(dev);
4210         u32 rx_cnt = priv->plat->rx_queues_to_use;
4211         u32 tx_cnt = priv->plat->tx_queues_to_use;
4212         u32 queues_count;
4213         u32 queue;
4214         bool xmac;
4215
4216         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
4217         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
4218
4219         if (priv->irq_wake)
4220                 pm_wakeup_event(priv->device, 0);
4221
4222         /* Check if adapter is up */
4223         if (test_bit(STMMAC_DOWN, &priv->state))
4224                 return IRQ_HANDLED;
4225         /* Check if a fatal error happened */
4226         if (stmmac_safety_feat_interrupt(priv))
4227                 return IRQ_HANDLED;
4228
4229         /* To handle GMAC own interrupts */
4230         if ((priv->plat->has_gmac) || xmac) {
4231                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
4232
4233                 if (unlikely(status)) {
4234                         /* For LPI we need to save the tx status */
4235                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4236                                 priv->tx_path_in_lpi_mode = true;
4237                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4238                                 priv->tx_path_in_lpi_mode = false;
4239                 }
4240
4241                 for (queue = 0; queue < queues_count; queue++) {
4242                         status = stmmac_host_mtl_irq_status(priv, priv->hw,
4243                                                             queue);
4244                 }
4245
4246                 /* PCS link status */
4247                 if (priv->hw->pcs) {
4248                         if (priv->xstats.pcs_link)
4249                                 netif_carrier_on(dev);
4250                         else
4251                                 netif_carrier_off(dev);
4252                 }
4253         }
4254
4255         /* To handle DMA interrupts */
4256         stmmac_dma_interrupt(priv);
4257
4258         return IRQ_HANDLED;
4259 }
4260
4261 #ifdef CONFIG_NET_POLL_CONTROLLER
4262 /* Polling receive - used by NETCONSOLE and other diagnostic tools
4263  * to allow network I/O with interrupts disabled.
4264  */
4265 static void stmmac_poll_controller(struct net_device *dev)
4266 {
4267         disable_irq(dev->irq);
4268         stmmac_interrupt(dev->irq, dev);
4269         enable_irq(dev->irq);
4270 }
4271 #endif
4272
4273 /**
4274  *  stmmac_ioctl - Entry point for the Ioctl
4275  *  @dev: Device pointer.
4276  *  @rq: An IOCTL specefic structure, that can contain a pointer to
4277  *  a proprietary structure used to pass information to the driver.
4278  *  @cmd: IOCTL command
4279  *  Description:
4280  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
4281  */
4282 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4283 {
4284         struct stmmac_priv *priv = netdev_priv (dev);
4285         int ret = -EOPNOTSUPP;
4286
4287         if (!netif_running(dev))
4288                 return -EINVAL;
4289
4290         switch (cmd) {
4291         case SIOCGMIIPHY:
4292         case SIOCGMIIREG:
4293         case SIOCSMIIREG:
4294                 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4295                 break;
4296         case SIOCSHWTSTAMP:
4297                 ret = stmmac_hwtstamp_set(dev, rq);
4298                 break;
4299         case SIOCGHWTSTAMP:
4300                 ret = stmmac_hwtstamp_get(dev, rq);
4301                 break;
4302         default:
4303                 break;
4304         }
4305
4306         return ret;
4307 }
4308
4309 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4310                                     void *cb_priv)
4311 {
4312         struct stmmac_priv *priv = cb_priv;
4313         int ret = -EOPNOTSUPP;
4314
4315         if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4316                 return ret;
4317
4318         stmmac_disable_all_queues(priv);
4319
4320         switch (type) {
4321         case TC_SETUP_CLSU32:
4322                 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4323                 break;
4324         case TC_SETUP_CLSFLOWER:
4325                 ret = stmmac_tc_setup_cls(priv, priv, type_data);
4326                 break;
4327         default:
4328                 break;
4329         }
4330
4331         stmmac_enable_all_queues(priv);
4332         return ret;
4333 }
4334
4335 static LIST_HEAD(stmmac_block_cb_list);
4336
4337 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
4338                            void *type_data)
4339 {
4340         struct stmmac_priv *priv = netdev_priv(ndev);
4341
4342         switch (type) {
4343         case TC_SETUP_BLOCK:
4344                 return flow_block_cb_setup_simple(type_data,
4345                                                   &stmmac_block_cb_list,
4346                                                   stmmac_setup_tc_block_cb,
4347                                                   priv, priv, true);
4348         case TC_SETUP_QDISC_CBS:
4349                 return stmmac_tc_setup_cbs(priv, priv, type_data);
4350         case TC_SETUP_QDISC_TAPRIO:
4351                 return stmmac_tc_setup_taprio(priv, priv, type_data);
4352         case TC_SETUP_QDISC_ETF:
4353                 return stmmac_tc_setup_etf(priv, priv, type_data);
4354         default:
4355                 return -EOPNOTSUPP;
4356         }
4357 }
4358
4359 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
4360                                struct net_device *sb_dev)
4361 {
4362         int gso = skb_shinfo(skb)->gso_type;
4363
4364         if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
4365                 /*
4366                  * There is no way to determine the number of TSO/USO
4367                  * capable Queues. Let's use always the Queue 0
4368                  * because if TSO/USO is supported then at least this
4369                  * one will be capable.
4370                  */
4371                 return 0;
4372         }
4373
4374         return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
4375 }
4376
4377 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4378 {
4379         struct stmmac_priv *priv = netdev_priv(ndev);
4380         int ret = 0;
4381
4382         ret = pm_runtime_get_sync(priv->device);
4383         if (ret < 0) {
4384                 pm_runtime_put_noidle(priv->device);
4385                 return ret;
4386         }
4387
4388         ret = eth_mac_addr(ndev, addr);
4389         if (ret)
4390                 goto set_mac_error;
4391
4392         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4393
4394 set_mac_error:
4395         pm_runtime_put(priv->device);
4396
4397         return ret;
4398 }
4399
4400 #ifdef CONFIG_DEBUG_FS
4401 static struct dentry *stmmac_fs_dir;
4402
4403 static void sysfs_display_ring(void *head, int size, int extend_desc,
4404                                struct seq_file *seq, dma_addr_t dma_phy_addr)
4405 {
4406         int i;
4407         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4408         struct dma_desc *p = (struct dma_desc *)head;
4409         dma_addr_t dma_addr;
4410
4411         for (i = 0; i < size; i++) {
4412                 if (extend_desc) {
4413                         dma_addr = dma_phy_addr + i * sizeof(*ep);
4414                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4415                                    i, &dma_addr,
4416                                    le32_to_cpu(ep->basic.des0),
4417                                    le32_to_cpu(ep->basic.des1),
4418                                    le32_to_cpu(ep->basic.des2),
4419                                    le32_to_cpu(ep->basic.des3));
4420                         ep++;
4421                 } else {
4422                         dma_addr = dma_phy_addr + i * sizeof(*p);
4423                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4424                                    i, &dma_addr,
4425                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4426                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4427                         p++;
4428                 }
4429                 seq_printf(seq, "\n");
4430         }
4431 }
4432
4433 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4434 {
4435         struct net_device *dev = seq->private;
4436         struct stmmac_priv *priv = netdev_priv(dev);
4437         u32 rx_count = priv->plat->rx_queues_to_use;
4438         u32 tx_count = priv->plat->tx_queues_to_use;
4439         u32 queue;
4440
4441         if ((dev->flags & IFF_UP) == 0)
4442                 return 0;
4443
4444         for (queue = 0; queue < rx_count; queue++) {
4445                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4446
4447                 seq_printf(seq, "RX Queue %d:\n", queue);
4448
4449                 if (priv->extend_desc) {
4450                         seq_printf(seq, "Extended descriptor ring:\n");
4451                         sysfs_display_ring((void *)rx_q->dma_erx,
4452                                            priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
4453                 } else {
4454                         seq_printf(seq, "Descriptor ring:\n");
4455                         sysfs_display_ring((void *)rx_q->dma_rx,
4456                                            priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
4457                 }
4458         }
4459
4460         for (queue = 0; queue < tx_count; queue++) {
4461                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4462
4463                 seq_printf(seq, "TX Queue %d:\n", queue);
4464
4465                 if (priv->extend_desc) {
4466                         seq_printf(seq, "Extended descriptor ring:\n");
4467                         sysfs_display_ring((void *)tx_q->dma_etx,
4468                                            priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
4469                 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4470                         seq_printf(seq, "Descriptor ring:\n");
4471                         sysfs_display_ring((void *)tx_q->dma_tx,
4472                                            priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
4473                 }
4474         }
4475
4476         return 0;
4477 }
4478 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
4479
4480 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4481 {
4482         struct net_device *dev = seq->private;
4483         struct stmmac_priv *priv = netdev_priv(dev);
4484
4485         if (!priv->hw_cap_support) {
4486                 seq_printf(seq, "DMA HW features not supported\n");
4487                 return 0;
4488         }
4489
4490         seq_printf(seq, "==============================\n");
4491         seq_printf(seq, "\tDMA HW features\n");
4492         seq_printf(seq, "==============================\n");
4493
4494         seq_printf(seq, "\t10/100 Mbps: %s\n",
4495                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
4496         seq_printf(seq, "\t1000 Mbps: %s\n",
4497                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
4498         seq_printf(seq, "\tHalf duplex: %s\n",
4499                    (priv->dma_cap.half_duplex) ? "Y" : "N");
4500         seq_printf(seq, "\tHash Filter: %s\n",
4501                    (priv->dma_cap.hash_filter) ? "Y" : "N");
4502         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4503                    (priv->dma_cap.multi_addr) ? "Y" : "N");
4504         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4505                    (priv->dma_cap.pcs) ? "Y" : "N");
4506         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4507                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
4508         seq_printf(seq, "\tPMT Remote wake up: %s\n",
4509                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4510         seq_printf(seq, "\tPMT Magic Frame: %s\n",
4511                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4512         seq_printf(seq, "\tRMON module: %s\n",
4513                    (priv->dma_cap.rmon) ? "Y" : "N");
4514         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4515                    (priv->dma_cap.time_stamp) ? "Y" : "N");
4516         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4517                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
4518         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4519                    (priv->dma_cap.eee) ? "Y" : "N");
4520         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4521         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4522                    (priv->dma_cap.tx_coe) ? "Y" : "N");
4523         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4524                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4525                            (priv->dma_cap.rx_coe) ? "Y" : "N");
4526         } else {
4527                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4528                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4529                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4530                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4531         }
4532         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4533                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4534         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4535                    priv->dma_cap.number_rx_channel);
4536         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4537                    priv->dma_cap.number_tx_channel);
4538         seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
4539                    priv->dma_cap.number_rx_queues);
4540         seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
4541                    priv->dma_cap.number_tx_queues);
4542         seq_printf(seq, "\tEnhanced descriptors: %s\n",
4543                    (priv->dma_cap.enh_desc) ? "Y" : "N");
4544         seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
4545         seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
4546         seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
4547         seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
4548         seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
4549                    priv->dma_cap.pps_out_num);
4550         seq_printf(seq, "\tSafety Features: %s\n",
4551                    priv->dma_cap.asp ? "Y" : "N");
4552         seq_printf(seq, "\tFlexible RX Parser: %s\n",
4553                    priv->dma_cap.frpsel ? "Y" : "N");
4554         seq_printf(seq, "\tEnhanced Addressing: %d\n",
4555                    priv->dma_cap.addr64);
4556         seq_printf(seq, "\tReceive Side Scaling: %s\n",
4557                    priv->dma_cap.rssen ? "Y" : "N");
4558         seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
4559                    priv->dma_cap.vlhash ? "Y" : "N");
4560         seq_printf(seq, "\tSplit Header: %s\n",
4561                    priv->dma_cap.sphen ? "Y" : "N");
4562         seq_printf(seq, "\tVLAN TX Insertion: %s\n",
4563                    priv->dma_cap.vlins ? "Y" : "N");
4564         seq_printf(seq, "\tDouble VLAN: %s\n",
4565                    priv->dma_cap.dvlan ? "Y" : "N");
4566         seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
4567                    priv->dma_cap.l3l4fnum);
4568         seq_printf(seq, "\tARP Offloading: %s\n",
4569                    priv->dma_cap.arpoffsel ? "Y" : "N");
4570         seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
4571                    priv->dma_cap.estsel ? "Y" : "N");
4572         seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
4573                    priv->dma_cap.fpesel ? "Y" : "N");
4574         seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
4575                    priv->dma_cap.tbssel ? "Y" : "N");
4576         return 0;
4577 }
4578 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4579
4580 /* Use network device events to rename debugfs file entries.
4581  */
4582 static int stmmac_device_event(struct notifier_block *unused,
4583                                unsigned long event, void *ptr)
4584 {
4585         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4586         struct stmmac_priv *priv = netdev_priv(dev);
4587
4588         if (dev->netdev_ops != &stmmac_netdev_ops)
4589                 goto done;
4590
4591         switch (event) {
4592         case NETDEV_CHANGENAME:
4593                 if (priv->dbgfs_dir)
4594                         priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4595                                                          priv->dbgfs_dir,
4596                                                          stmmac_fs_dir,
4597                                                          dev->name);
4598                 break;
4599         }
4600 done:
4601         return NOTIFY_DONE;
4602 }
4603
4604 static struct notifier_block stmmac_notifier = {
4605         .notifier_call = stmmac_device_event,
4606 };
4607
4608 static void stmmac_init_fs(struct net_device *dev)
4609 {
4610         struct stmmac_priv *priv = netdev_priv(dev);
4611
4612         rtnl_lock();
4613
4614         /* Create per netdev entries */
4615         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4616
4617         /* Entry to report DMA RX/TX rings */
4618         debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
4619                             &stmmac_rings_status_fops);
4620
4621         /* Entry to report the DMA HW features */
4622         debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4623                             &stmmac_dma_cap_fops);
4624
4625         rtnl_unlock();
4626 }
4627
4628 static void stmmac_exit_fs(struct net_device *dev)
4629 {
4630         struct stmmac_priv *priv = netdev_priv(dev);
4631
4632         debugfs_remove_recursive(priv->dbgfs_dir);
4633 }
4634 #endif /* CONFIG_DEBUG_FS */
4635
4636 static u32 stmmac_vid_crc32_le(__le16 vid_le)
4637 {
4638         unsigned char *data = (unsigned char *)&vid_le;
4639         unsigned char data_byte = 0;
4640         u32 crc = ~0x0;
4641         u32 temp = 0;
4642         int i, bits;
4643
4644         bits = get_bitmask_order(VLAN_VID_MASK);
4645         for (i = 0; i < bits; i++) {
4646                 if ((i % 8) == 0)
4647                         data_byte = data[i / 8];
4648
4649                 temp = ((crc & 1) ^ data_byte) & 1;
4650                 crc >>= 1;
4651                 data_byte >>= 1;
4652
4653                 if (temp)
4654                         crc ^= 0xedb88320;
4655         }
4656
4657         return crc;
4658 }
4659
4660 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
4661 {
4662         u32 crc, hash = 0;
4663         __le16 pmatch = 0;
4664         int count = 0;
4665         u16 vid = 0;
4666
4667         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
4668                 __le16 vid_le = cpu_to_le16(vid);
4669                 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
4670                 hash |= (1 << crc);
4671                 count++;
4672         }
4673
4674         if (!priv->dma_cap.vlhash) {
4675                 if (count > 2) /* VID = 0 always passes filter */
4676                         return -EOPNOTSUPP;
4677
4678                 pmatch = cpu_to_le16(vid);
4679                 hash = 0;
4680         }
4681
4682         return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
4683 }
4684
4685 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
4686 {
4687         struct stmmac_priv *priv = netdev_priv(ndev);
4688         bool is_double = false;
4689         int ret;
4690
4691         if (be16_to_cpu(proto) == ETH_P_8021AD)
4692                 is_double = true;
4693
4694         set_bit(vid, priv->active_vlans);
4695         ret = stmmac_vlan_update(priv, is_double);
4696         if (ret) {
4697                 clear_bit(vid, priv->active_vlans);
4698                 return ret;
4699         }
4700
4701         if (priv->hw->num_vlan) {
4702                 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4703                 if (ret)
4704                         return ret;
4705         }
4706
4707         return 0;
4708 }
4709
4710 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
4711 {
4712         struct stmmac_priv *priv = netdev_priv(ndev);
4713         bool is_double = false;
4714         int ret;
4715
4716         ret = pm_runtime_get_sync(priv->device);
4717         if (ret < 0) {
4718                 pm_runtime_put_noidle(priv->device);
4719                 return ret;
4720         }
4721
4722         if (be16_to_cpu(proto) == ETH_P_8021AD)
4723                 is_double = true;
4724
4725         clear_bit(vid, priv->active_vlans);
4726
4727         if (priv->hw->num_vlan) {
4728                 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4729                 if (ret)
4730                         goto del_vlan_error;
4731         }
4732
4733         ret = stmmac_vlan_update(priv, is_double);
4734
4735 del_vlan_error:
4736         pm_runtime_put(priv->device);
4737
4738         return ret;
4739 }
4740
4741 static const struct net_device_ops stmmac_netdev_ops = {
4742         .ndo_open = stmmac_open,
4743         .ndo_start_xmit = stmmac_xmit,
4744         .ndo_stop = stmmac_release,
4745         .ndo_change_mtu = stmmac_change_mtu,
4746         .ndo_fix_features = stmmac_fix_features,
4747         .ndo_set_features = stmmac_set_features,
4748         .ndo_set_rx_mode = stmmac_set_rx_mode,
4749         .ndo_tx_timeout = stmmac_tx_timeout,
4750         .ndo_do_ioctl = stmmac_ioctl,
4751         .ndo_setup_tc = stmmac_setup_tc,
4752         .ndo_select_queue = stmmac_select_queue,
4753 #ifdef CONFIG_NET_POLL_CONTROLLER
4754         .ndo_poll_controller = stmmac_poll_controller,
4755 #endif
4756         .ndo_set_mac_address = stmmac_set_mac_address,
4757         .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
4758         .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
4759 };
4760
4761 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4762 {
4763         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4764                 return;
4765         if (test_bit(STMMAC_DOWN, &priv->state))
4766                 return;
4767
4768         netdev_err(priv->dev, "Reset adapter.\n");
4769
4770         rtnl_lock();
4771         netif_trans_update(priv->dev);
4772         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4773                 usleep_range(1000, 2000);
4774
4775         set_bit(STMMAC_DOWN, &priv->state);
4776         dev_close(priv->dev);
4777         dev_open(priv->dev, NULL);
4778         clear_bit(STMMAC_DOWN, &priv->state);
4779         clear_bit(STMMAC_RESETING, &priv->state);
4780         rtnl_unlock();
4781 }
4782
4783 static void stmmac_service_task(struct work_struct *work)
4784 {
4785         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4786                         service_task);
4787
4788         stmmac_reset_subtask(priv);
4789         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4790 }
4791
4792 /**
4793  *  stmmac_hw_init - Init the MAC device
4794  *  @priv: driver private structure
4795  *  Description: this function is to configure the MAC device according to
4796  *  some platform parameters or the HW capability register. It prepares the
4797  *  driver to use either ring or chain modes and to setup either enhanced or
4798  *  normal descriptors.
4799  */
4800 static int stmmac_hw_init(struct stmmac_priv *priv)
4801 {
4802         int ret;
4803
4804         /* dwmac-sun8i only work in chain mode */
4805         if (priv->plat->has_sun8i)
4806                 chain_mode = 1;
4807         priv->chain_mode = chain_mode;
4808
4809         /* Initialize HW Interface */
4810         ret = stmmac_hwif_init(priv);
4811         if (ret)
4812                 return ret;
4813
4814         /* Get the HW capability (new GMAC newer than 3.50a) */
4815         priv->hw_cap_support = stmmac_get_hw_features(priv);
4816         if (priv->hw_cap_support) {
4817                 dev_info(priv->device, "DMA HW capability register supported\n");
4818
4819                 /* We can override some gmac/dma configuration fields: e.g.
4820                  * enh_desc, tx_coe (e.g. that are passed through the
4821                  * platform) with the values from the HW capability
4822                  * register (if supported).
4823                  */
4824                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4825                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4826                 priv->hw->pmt = priv->plat->pmt;
4827                 if (priv->dma_cap.hash_tb_sz) {
4828                         priv->hw->multicast_filter_bins =
4829                                         (BIT(priv->dma_cap.hash_tb_sz) << 5);
4830                         priv->hw->mcast_bits_log2 =
4831                                         ilog2(priv->hw->multicast_filter_bins);
4832                 }
4833
4834                 /* TXCOE doesn't work in thresh DMA mode */
4835                 if (priv->plat->force_thresh_dma_mode)
4836                         priv->plat->tx_coe = 0;
4837                 else
4838                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4839
4840                 /* In case of GMAC4 rx_coe is from HW cap register. */
4841                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4842
4843                 if (priv->dma_cap.rx_coe_type2)
4844                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4845                 else if (priv->dma_cap.rx_coe_type1)
4846                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4847
4848         } else {
4849                 dev_info(priv->device, "No HW DMA feature register supported\n");
4850         }
4851
4852         if (priv->plat->rx_coe) {
4853                 priv->hw->rx_csum = priv->plat->rx_coe;
4854                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4855                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4856                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4857         }
4858         if (priv->plat->tx_coe)
4859                 dev_info(priv->device, "TX Checksum insertion supported\n");
4860
4861         if (priv->plat->pmt) {
4862                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4863                 device_set_wakeup_capable(priv->device, 1);
4864         }
4865
4866         if (priv->dma_cap.tsoen)
4867                 dev_info(priv->device, "TSO supported\n");
4868
4869         priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
4870         priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
4871
4872         /* Run HW quirks, if any */
4873         if (priv->hwif_quirks) {
4874                 ret = priv->hwif_quirks(priv);
4875                 if (ret)
4876                         return ret;
4877         }
4878
4879         /* Rx Watchdog is available in the COREs newer than the 3.40.
4880          * In some case, for example on bugged HW this feature
4881          * has to be disable and this can be done by passing the
4882          * riwt_off field from the platform.
4883          */
4884         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4885             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4886                 priv->use_riwt = 1;
4887                 dev_info(priv->device,
4888                          "Enable RX Mitigation via HW Watchdog Timer\n");
4889         }
4890
4891         return 0;
4892 }
4893
4894 static void stmmac_napi_add(struct net_device *dev)
4895 {
4896         struct stmmac_priv *priv = netdev_priv(dev);
4897         u32 queue, maxq;
4898
4899         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4900
4901         for (queue = 0; queue < maxq; queue++) {
4902                 struct stmmac_channel *ch = &priv->channel[queue];
4903
4904                 ch->priv_data = priv;
4905                 ch->index = queue;
4906                 spin_lock_init(&ch->lock);
4907
4908                 if (queue < priv->plat->rx_queues_to_use) {
4909                         netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
4910                                        NAPI_POLL_WEIGHT);
4911                 }
4912                 if (queue < priv->plat->tx_queues_to_use) {
4913                         netif_tx_napi_add(dev, &ch->tx_napi,
4914                                           stmmac_napi_poll_tx,
4915                                           NAPI_POLL_WEIGHT);
4916                 }
4917         }
4918 }
4919
4920 static void stmmac_napi_del(struct net_device *dev)
4921 {
4922         struct stmmac_priv *priv = netdev_priv(dev);
4923         u32 queue, maxq;
4924
4925         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4926
4927         for (queue = 0; queue < maxq; queue++) {
4928                 struct stmmac_channel *ch = &priv->channel[queue];
4929
4930                 if (queue < priv->plat->rx_queues_to_use)
4931                         netif_napi_del(&ch->rx_napi);
4932                 if (queue < priv->plat->tx_queues_to_use)
4933                         netif_napi_del(&ch->tx_napi);
4934         }
4935 }
4936
4937 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
4938 {
4939         struct stmmac_priv *priv = netdev_priv(dev);
4940         int ret = 0, i;
4941
4942         if (netif_running(dev))
4943                 stmmac_release(dev);
4944
4945         stmmac_napi_del(dev);
4946
4947         priv->plat->rx_queues_to_use = rx_cnt;
4948         priv->plat->tx_queues_to_use = tx_cnt;
4949         if (!netif_is_rxfh_configured(dev))
4950                 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
4951                         priv->rss.table[i] = ethtool_rxfh_indir_default(i,
4952                                                                         rx_cnt);
4953
4954         stmmac_napi_add(dev);
4955
4956         if (netif_running(dev))
4957                 ret = stmmac_open(dev);
4958
4959         return ret;
4960 }
4961
4962 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
4963 {
4964         struct stmmac_priv *priv = netdev_priv(dev);
4965         int ret = 0;
4966
4967         if (netif_running(dev))
4968                 stmmac_release(dev);
4969
4970         priv->dma_rx_size = rx_size;
4971         priv->dma_tx_size = tx_size;
4972
4973         if (netif_running(dev))
4974                 ret = stmmac_open(dev);
4975
4976         return ret;
4977 }
4978
4979 /**
4980  * stmmac_dvr_probe
4981  * @device: device pointer
4982  * @plat_dat: platform data pointer
4983  * @res: stmmac resource pointer
4984  * Description: this is the main probe function used to
4985  * call the alloc_etherdev, allocate the priv structure.
4986  * Return:
4987  * returns 0 on success, otherwise errno.
4988  */
4989 int stmmac_dvr_probe(struct device *device,
4990                      struct plat_stmmacenet_data *plat_dat,
4991                      struct stmmac_resources *res)
4992 {
4993         struct net_device *ndev = NULL;
4994         struct stmmac_priv *priv;
4995         u32 rxq;
4996         int i, ret = 0;
4997
4998         ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
4999                                        MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
5000         if (!ndev)
5001                 return -ENOMEM;
5002
5003         SET_NETDEV_DEV(ndev, device);
5004
5005         priv = netdev_priv(ndev);
5006         priv->device = device;
5007         priv->dev = ndev;
5008
5009         stmmac_set_ethtool_ops(ndev);
5010         priv->pause = pause;
5011         priv->plat = plat_dat;
5012         priv->ioaddr = res->addr;
5013         priv->dev->base_addr = (unsigned long)res->addr;
5014
5015         priv->dev->irq = res->irq;
5016         priv->wol_irq = res->wol_irq;
5017         priv->lpi_irq = res->lpi_irq;
5018
5019         if (!IS_ERR_OR_NULL(res->mac))
5020                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
5021
5022         dev_set_drvdata(device, priv->dev);
5023
5024         /* Verify driver arguments */
5025         stmmac_verify_args();
5026
5027         /* Allocate workqueue */
5028         priv->wq = create_singlethread_workqueue("stmmac_wq");
5029         if (!priv->wq) {
5030                 dev_err(priv->device, "failed to create workqueue\n");
5031                 return -ENOMEM;
5032         }
5033
5034         INIT_WORK(&priv->service_task, stmmac_service_task);
5035
5036         /* Override with kernel parameters if supplied XXX CRS XXX
5037          * this needs to have multiple instances
5038          */
5039         if ((phyaddr >= 0) && (phyaddr <= 31))
5040                 priv->plat->phy_addr = phyaddr;
5041
5042         if (priv->plat->stmmac_rst) {
5043                 ret = reset_control_assert(priv->plat->stmmac_rst);
5044                 reset_control_deassert(priv->plat->stmmac_rst);
5045                 /* Some reset controllers have only reset callback instead of
5046                  * assert + deassert callbacks pair.
5047                  */
5048                 if (ret == -ENOTSUPP)
5049                         reset_control_reset(priv->plat->stmmac_rst);
5050         }
5051
5052         /* Init MAC and get the capabilities */
5053         ret = stmmac_hw_init(priv);
5054         if (ret)
5055                 goto error_hw_init;
5056
5057         stmmac_check_ether_addr(priv);
5058
5059         ndev->netdev_ops = &stmmac_netdev_ops;
5060
5061         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5062                             NETIF_F_RXCSUM;
5063
5064         ret = stmmac_tc_init(priv, priv);
5065         if (!ret) {
5066                 ndev->hw_features |= NETIF_F_HW_TC;
5067         }
5068
5069         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5070                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
5071                 if (priv->plat->has_gmac4)
5072                         ndev->hw_features |= NETIF_F_GSO_UDP_L4;
5073                 priv->tso = true;
5074                 dev_info(priv->device, "TSO feature enabled\n");
5075         }
5076
5077         if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
5078                 ndev->hw_features |= NETIF_F_GRO;
5079                 priv->sph = true;
5080                 dev_info(priv->device, "SPH feature enabled\n");
5081         }
5082
5083         /* The current IP register MAC_HW_Feature1[ADDR64] only define
5084          * 32/40/64 bit width, but some SOC support others like i.MX8MP
5085          * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
5086          * So overwrite dma_cap.addr64 according to HW real design.
5087          */
5088         if (priv->plat->addr64)
5089                 priv->dma_cap.addr64 = priv->plat->addr64;
5090
5091         if (priv->dma_cap.addr64) {
5092                 ret = dma_set_mask_and_coherent(device,
5093                                 DMA_BIT_MASK(priv->dma_cap.addr64));
5094                 if (!ret) {
5095                         dev_info(priv->device, "Using %d bits DMA width\n",
5096                                  priv->dma_cap.addr64);
5097
5098                         /*
5099                          * If more than 32 bits can be addressed, make sure to
5100                          * enable enhanced addressing mode.
5101                          */
5102                         if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
5103                                 priv->plat->dma_cfg->eame = true;
5104                 } else {
5105                         ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
5106                         if (ret) {
5107                                 dev_err(priv->device, "Failed to set DMA Mask\n");
5108                                 goto error_hw_init;
5109                         }
5110
5111                         priv->dma_cap.addr64 = 32;
5112                 }
5113         }
5114
5115         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
5116         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
5117 #ifdef STMMAC_VLAN_TAG_USED
5118         /* Both mac100 and gmac support receive VLAN tag detection */
5119         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
5120         if (priv->dma_cap.vlhash) {
5121                 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5122                 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
5123         }
5124         if (priv->dma_cap.vlins) {
5125                 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
5126                 if (priv->dma_cap.dvlan)
5127                         ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
5128         }
5129 #endif
5130         priv->msg_enable = netif_msg_init(debug, default_msg_level);
5131
5132         /* Initialize RSS */
5133         rxq = priv->plat->rx_queues_to_use;
5134         netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
5135         for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
5136                 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
5137
5138         if (priv->dma_cap.rssen && priv->plat->rss_en)
5139                 ndev->features |= NETIF_F_RXHASH;
5140
5141         /* MTU range: 46 - hw-specific max */
5142         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
5143         if (priv->plat->has_xgmac)
5144                 ndev->max_mtu = XGMAC_JUMBO_LEN;
5145         else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
5146                 ndev->max_mtu = JUMBO_LEN;
5147         else
5148                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
5149         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
5150          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
5151          */
5152         if ((priv->plat->maxmtu < ndev->max_mtu) &&
5153             (priv->plat->maxmtu >= ndev->min_mtu))
5154                 ndev->max_mtu = priv->plat->maxmtu;
5155         else if (priv->plat->maxmtu < ndev->min_mtu)
5156                 dev_warn(priv->device,
5157                          "%s: warning: maxmtu having invalid value (%d)\n",
5158                          __func__, priv->plat->maxmtu);
5159
5160         if (flow_ctrl)
5161                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
5162
5163         /* Setup channels NAPI */
5164         stmmac_napi_add(ndev);
5165
5166         mutex_init(&priv->lock);
5167
5168         /* If a specific clk_csr value is passed from the platform
5169          * this means that the CSR Clock Range selection cannot be
5170          * changed at run-time and it is fixed. Viceversa the driver'll try to
5171          * set the MDC clock dynamically according to the csr actual
5172          * clock input.
5173          */
5174         if (priv->plat->clk_csr >= 0)
5175                 priv->clk_csr = priv->plat->clk_csr;
5176         else
5177                 stmmac_clk_csr_set(priv);
5178
5179         stmmac_check_pcs_mode(priv);
5180
5181         pm_runtime_get_noresume(device);
5182         pm_runtime_set_active(device);
5183         pm_runtime_enable(device);
5184
5185         if (priv->hw->pcs != STMMAC_PCS_TBI &&
5186             priv->hw->pcs != STMMAC_PCS_RTBI) {
5187                 /* MDIO bus Registration */
5188                 ret = stmmac_mdio_register(ndev);
5189                 if (ret < 0) {
5190                         dev_err_probe(priv->device, ret,
5191                                       "%s: MDIO bus (id: %d) registration failed\n",
5192                                       __func__, priv->plat->bus_id);
5193                         goto error_mdio_register;
5194                 }
5195         }
5196
5197         ret = stmmac_phy_setup(priv);
5198         if (ret) {
5199                 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
5200                 goto error_phy_setup;
5201         }
5202
5203         ret = register_netdev(ndev);
5204         if (ret) {
5205                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
5206                         __func__, ret);
5207                 goto error_netdev_register;
5208         }
5209
5210 #ifdef CONFIG_DEBUG_FS
5211         stmmac_init_fs(ndev);
5212 #endif
5213
5214         /* Let pm_runtime_put() disable the clocks.
5215          * If CONFIG_PM is not enabled, the clocks will stay powered.
5216          */
5217         pm_runtime_put(device);
5218
5219         return ret;
5220
5221 error_netdev_register:
5222         phylink_destroy(priv->phylink);
5223 error_phy_setup:
5224         if (priv->hw->pcs != STMMAC_PCS_TBI &&
5225             priv->hw->pcs != STMMAC_PCS_RTBI)
5226                 stmmac_mdio_unregister(ndev);
5227 error_mdio_register:
5228         stmmac_napi_del(ndev);
5229 error_hw_init:
5230         destroy_workqueue(priv->wq);
5231
5232         return ret;
5233 }
5234 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
5235
5236 /**
5237  * stmmac_dvr_remove
5238  * @dev: device pointer
5239  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5240  * changes the link status, releases the DMA descriptor rings.
5241  */
5242 int stmmac_dvr_remove(struct device *dev)
5243 {
5244         struct net_device *ndev = dev_get_drvdata(dev);
5245         struct stmmac_priv *priv = netdev_priv(ndev);
5246
5247         netdev_info(priv->dev, "%s: removing driver", __func__);
5248
5249         stmmac_stop_all_dma(priv);
5250         stmmac_mac_set(priv, priv->ioaddr, false);
5251         netif_carrier_off(ndev);
5252         unregister_netdev(ndev);
5253
5254 #ifdef CONFIG_DEBUG_FS
5255         stmmac_exit_fs(ndev);
5256 #endif
5257         phylink_destroy(priv->phylink);
5258         if (priv->plat->stmmac_rst)
5259                 reset_control_assert(priv->plat->stmmac_rst);
5260         pm_runtime_put(dev);
5261         pm_runtime_disable(dev);
5262         if (priv->hw->pcs != STMMAC_PCS_TBI &&
5263             priv->hw->pcs != STMMAC_PCS_RTBI)
5264                 stmmac_mdio_unregister(ndev);
5265         destroy_workqueue(priv->wq);
5266         mutex_destroy(&priv->lock);
5267
5268         return 0;
5269 }
5270 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
5271
5272 /**
5273  * stmmac_suspend - suspend callback
5274  * @dev: device pointer
5275  * Description: this is the function to suspend the device and it is called
5276  * by the platform driver to stop the network queue, release the resources,
5277  * program the PMT register (for WoL), clean and release driver resources.
5278  */
5279 int stmmac_suspend(struct device *dev)
5280 {
5281         struct net_device *ndev = dev_get_drvdata(dev);
5282         struct stmmac_priv *priv = netdev_priv(ndev);
5283         u32 chan;
5284
5285         if (!ndev || !netif_running(ndev))
5286                 return 0;
5287
5288         phylink_mac_change(priv->phylink, false);
5289
5290         mutex_lock(&priv->lock);
5291
5292         netif_device_detach(ndev);
5293
5294         stmmac_disable_all_queues(priv);
5295
5296         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
5297                 del_timer_sync(&priv->tx_queue[chan].txtimer);
5298
5299         if (priv->eee_enabled) {
5300                 priv->tx_path_in_lpi_mode = false;
5301                 del_timer_sync(&priv->eee_ctrl_timer);
5302         }
5303
5304         /* Stop TX/RX DMA */
5305         stmmac_stop_all_dma(priv);
5306
5307         if (priv->plat->serdes_powerdown)
5308                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5309
5310         /* Enable Power down mode by programming the PMT regs */
5311         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5312                 stmmac_pmt(priv, priv->hw, priv->wolopts);
5313                 priv->irq_wake = 1;
5314         } else {
5315                 mutex_unlock(&priv->lock);
5316                 rtnl_lock();
5317                 if (device_may_wakeup(priv->device))
5318                         phylink_speed_down(priv->phylink, false);
5319                 phylink_stop(priv->phylink);
5320                 rtnl_unlock();
5321                 mutex_lock(&priv->lock);
5322
5323                 stmmac_mac_set(priv, priv->ioaddr, false);
5324                 pinctrl_pm_select_sleep_state(priv->device);
5325         }
5326         mutex_unlock(&priv->lock);
5327
5328         priv->speed = SPEED_UNKNOWN;
5329         return 0;
5330 }
5331 EXPORT_SYMBOL_GPL(stmmac_suspend);
5332
5333 /**
5334  * stmmac_reset_queues_param - reset queue parameters
5335  * @priv: device pointer
5336  */
5337 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
5338 {
5339         u32 rx_cnt = priv->plat->rx_queues_to_use;
5340         u32 tx_cnt = priv->plat->tx_queues_to_use;
5341         u32 queue;
5342
5343         for (queue = 0; queue < rx_cnt; queue++) {
5344                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5345
5346                 rx_q->cur_rx = 0;
5347                 rx_q->dirty_rx = 0;
5348         }
5349
5350         for (queue = 0; queue < tx_cnt; queue++) {
5351                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5352
5353                 tx_q->cur_tx = 0;
5354                 tx_q->dirty_tx = 0;
5355                 tx_q->mss = 0;
5356
5357                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
5358         }
5359 }
5360
5361 /**
5362  * stmmac_resume - resume callback
5363  * @dev: device pointer
5364  * Description: when resume this function is invoked to setup the DMA and CORE
5365  * in a usable state.
5366  */
5367 int stmmac_resume(struct device *dev)
5368 {
5369         struct net_device *ndev = dev_get_drvdata(dev);
5370         struct stmmac_priv *priv = netdev_priv(ndev);
5371         int ret;
5372
5373         if (!netif_running(ndev))
5374                 return 0;
5375
5376         /* Power Down bit, into the PM register, is cleared
5377          * automatically as soon as a magic packet or a Wake-up frame
5378          * is received. Anyway, it's better to manually clear
5379          * this bit because it can generate problems while resuming
5380          * from another devices (e.g. serial console).
5381          */
5382         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5383                 mutex_lock(&priv->lock);
5384                 stmmac_pmt(priv, priv->hw, 0);
5385                 mutex_unlock(&priv->lock);
5386                 priv->irq_wake = 0;
5387         } else {
5388                 pinctrl_pm_select_default_state(priv->device);
5389                 /* reset the phy so that it's ready */
5390                 if (priv->mii)
5391                         stmmac_mdio_reset(priv->mii);
5392         }
5393
5394         if (priv->plat->serdes_powerup) {
5395                 ret = priv->plat->serdes_powerup(ndev,
5396                                                  priv->plat->bsp_priv);
5397
5398                 if (ret < 0)
5399                         return ret;
5400         }
5401
5402         if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
5403                 rtnl_lock();
5404                 phylink_start(priv->phylink);
5405                 /* We may have called phylink_speed_down before */
5406                 phylink_speed_up(priv->phylink);
5407                 rtnl_unlock();
5408         }
5409
5410         rtnl_lock();
5411         mutex_lock(&priv->lock);
5412
5413         stmmac_reset_queues_param(priv);
5414
5415         stmmac_free_tx_skbufs(priv);
5416         stmmac_clear_descriptors(priv);
5417
5418         stmmac_hw_setup(ndev, false);
5419         stmmac_init_coalesce(priv);
5420         stmmac_set_rx_mode(ndev);
5421
5422         stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
5423
5424         stmmac_enable_all_queues(priv);
5425
5426         mutex_unlock(&priv->lock);
5427         rtnl_unlock();
5428
5429         phylink_mac_change(priv->phylink, true);
5430
5431         netif_device_attach(ndev);
5432
5433         return 0;
5434 }
5435 EXPORT_SYMBOL_GPL(stmmac_resume);
5436
5437 #ifndef MODULE
5438 static int __init stmmac_cmdline_opt(char *str)
5439 {
5440         char *opt;
5441
5442         if (!str || !*str)
5443                 return 1;
5444         while ((opt = strsep(&str, ",")) != NULL) {
5445                 if (!strncmp(opt, "debug:", 6)) {
5446                         if (kstrtoint(opt + 6, 0, &debug))
5447                                 goto err;
5448                 } else if (!strncmp(opt, "phyaddr:", 8)) {
5449                         if (kstrtoint(opt + 8, 0, &phyaddr))
5450                                 goto err;
5451                 } else if (!strncmp(opt, "buf_sz:", 7)) {
5452                         if (kstrtoint(opt + 7, 0, &buf_sz))
5453                                 goto err;
5454                 } else if (!strncmp(opt, "tc:", 3)) {
5455                         if (kstrtoint(opt + 3, 0, &tc))
5456                                 goto err;
5457                 } else if (!strncmp(opt, "watchdog:", 9)) {
5458                         if (kstrtoint(opt + 9, 0, &watchdog))
5459                                 goto err;
5460                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
5461                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
5462                                 goto err;
5463                 } else if (!strncmp(opt, "pause:", 6)) {
5464                         if (kstrtoint(opt + 6, 0, &pause))
5465                                 goto err;
5466                 } else if (!strncmp(opt, "eee_timer:", 10)) {
5467                         if (kstrtoint(opt + 10, 0, &eee_timer))
5468                                 goto err;
5469                 } else if (!strncmp(opt, "chain_mode:", 11)) {
5470                         if (kstrtoint(opt + 11, 0, &chain_mode))
5471                                 goto err;
5472                 }
5473         }
5474         return 1;
5475
5476 err:
5477         pr_err("%s: ERROR broken module parameter conversion", __func__);
5478         return 1;
5479 }
5480
5481 __setup("stmmaceth=", stmmac_cmdline_opt);
5482 #endif /* MODULE */
5483
5484 static int __init stmmac_init(void)
5485 {
5486 #ifdef CONFIG_DEBUG_FS
5487         /* Create debugfs main directory if it doesn't exist yet */
5488         if (!stmmac_fs_dir)
5489                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
5490         register_netdevice_notifier(&stmmac_notifier);
5491 #endif
5492
5493         return 0;
5494 }
5495
5496 static void __exit stmmac_exit(void)
5497 {
5498 #ifdef CONFIG_DEBUG_FS
5499         unregister_netdevice_notifier(&stmmac_notifier);
5500         debugfs_remove_recursive(stmmac_fs_dir);
5501 #endif
5502 }
5503
5504 module_init(stmmac_init)
5505 module_exit(stmmac_exit)
5506
5507 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
5508 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
5509 MODULE_LICENSE("GPL");