1 /* Renesas Ethernet AVB device driver
3 * Copyright (C) 2014-2019 Renesas Electronics Corporation
4 * Copyright (C) 2015 Renesas Solutions Corp.
5 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
7 * Based on the SuperH Ethernet driver
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License version 2,
11 * as published by the Free Software Foundation.
14 #include <linux/cache.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/etherdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/if_vlan.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/net_tstamp.h>
27 #include <linux/of_device.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/sys_soc.h>
36 #include <asm/div64.h>
40 #define RAVB_DEF_MSG_ENABLE \
46 static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
51 static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
56 void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
59 ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
62 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
66 for (i = 0; i < 10000; i++) {
67 if ((ravb_read(ndev, reg) & mask) == value)
74 static int ravb_config(struct net_device *ndev)
79 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
80 /* Check if the operating mode is changed to the config mode */
81 error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
83 netdev_err(ndev, "failed to switch device to config mode\n");
88 static void ravb_set_duplex(struct net_device *ndev)
90 struct ravb_private *priv = netdev_priv(ndev);
92 ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0);
95 static void ravb_set_rate(struct net_device *ndev)
97 struct ravb_private *priv = netdev_priv(ndev);
99 switch (priv->speed) {
100 case 100: /* 100BASE */
101 ravb_write(ndev, GECMR_SPEED_100, GECMR);
103 case 1000: /* 1000BASE */
104 ravb_write(ndev, GECMR_SPEED_1000, GECMR);
109 static void ravb_set_buffer_align(struct sk_buff *skb)
111 u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
114 skb_reserve(skb, RAVB_ALIGN - reserve);
117 /* Get MAC address from the MAC address registers
119 * Ethernet AVB device doesn't have ROM for MAC address.
120 * This function gets the MAC address that was used by a bootloader.
122 static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
125 ether_addr_copy(ndev->dev_addr, mac);
127 u32 mahr = ravb_read(ndev, MAHR);
128 u32 malr = ravb_read(ndev, MALR);
130 ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
131 ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
132 ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
133 ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
134 ndev->dev_addr[4] = (malr >> 8) & 0xFF;
135 ndev->dev_addr[5] = (malr >> 0) & 0xFF;
139 static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
141 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
144 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
147 /* MDC pin control */
148 static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
150 ravb_mdio_ctrl(ctrl, PIR_MDC, level);
153 /* Data I/O pin control */
154 static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
156 ravb_mdio_ctrl(ctrl, PIR_MMD, output);
160 static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
162 ravb_mdio_ctrl(ctrl, PIR_MDO, value);
166 static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
168 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
171 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
174 /* MDIO bus control struct */
175 static struct mdiobb_ops bb_ops = {
176 .owner = THIS_MODULE,
177 .set_mdc = ravb_set_mdc,
178 .set_mdio_dir = ravb_set_mdio_dir,
179 .set_mdio_data = ravb_set_mdio_data,
180 .get_mdio_data = ravb_get_mdio_data,
183 /* Free TX skb function for AVB-IP */
184 static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
186 struct ravb_private *priv = netdev_priv(ndev);
187 struct net_device_stats *stats = &priv->stats[q];
188 struct ravb_tx_desc *desc;
193 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
196 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
198 desc = &priv->tx_ring[q][entry];
199 txed = desc->die_dt == DT_FEMPTY;
200 if (free_txed_only && !txed)
202 /* Descriptor type must be checked before all other reads */
204 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
205 /* Free the original skb. */
206 if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
207 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
208 size, DMA_TO_DEVICE);
209 /* Last packet descriptor? */
210 if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
211 entry /= NUM_TX_DESC;
212 dev_kfree_skb_any(priv->tx_skb[q][entry]);
213 priv->tx_skb[q][entry] = NULL;
220 stats->tx_bytes += size;
221 desc->die_dt = DT_EEMPTY;
226 /* Free skb's and DMA buffers for Ethernet AVB */
227 static void ravb_ring_free(struct net_device *ndev, int q)
229 struct ravb_private *priv = netdev_priv(ndev);
233 if (priv->rx_ring[q]) {
234 for (i = 0; i < priv->num_rx_ring[q]; i++) {
235 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
237 if (!dma_mapping_error(ndev->dev.parent,
238 le32_to_cpu(desc->dptr)))
239 dma_unmap_single(ndev->dev.parent,
240 le32_to_cpu(desc->dptr),
244 ring_size = sizeof(struct ravb_ex_rx_desc) *
245 (priv->num_rx_ring[q] + 1);
246 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
247 priv->rx_desc_dma[q]);
248 priv->rx_ring[q] = NULL;
251 if (priv->tx_ring[q]) {
252 ravb_tx_free(ndev, q, false);
254 ring_size = sizeof(struct ravb_tx_desc) *
255 (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
256 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
257 priv->tx_desc_dma[q]);
258 priv->tx_ring[q] = NULL;
261 /* Free RX skb ringbuffer */
262 if (priv->rx_skb[q]) {
263 for (i = 0; i < priv->num_rx_ring[q]; i++)
264 dev_kfree_skb(priv->rx_skb[q][i]);
266 kfree(priv->rx_skb[q]);
267 priv->rx_skb[q] = NULL;
269 /* Free aligned TX buffers */
270 kfree(priv->tx_align[q]);
271 priv->tx_align[q] = NULL;
273 /* Free TX skb ringbuffer.
274 * SKBs are freed by ravb_tx_free() call above.
276 kfree(priv->tx_skb[q]);
277 priv->tx_skb[q] = NULL;
280 /* Format skb and descriptor buffer for Ethernet AVB */
281 static void ravb_ring_format(struct net_device *ndev, int q)
283 struct ravb_private *priv = netdev_priv(ndev);
284 struct ravb_ex_rx_desc *rx_desc;
285 struct ravb_tx_desc *tx_desc;
286 struct ravb_desc *desc;
287 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
288 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
295 priv->dirty_rx[q] = 0;
296 priv->dirty_tx[q] = 0;
298 memset(priv->rx_ring[q], 0, rx_ring_size);
299 /* Build RX ring buffer */
300 for (i = 0; i < priv->num_rx_ring[q]; i++) {
302 rx_desc = &priv->rx_ring[q][i];
303 rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
304 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
307 /* We just set the data size to 0 for a failed mapping which
308 * should prevent DMA from happening...
310 if (dma_mapping_error(ndev->dev.parent, dma_addr))
311 rx_desc->ds_cc = cpu_to_le16(0);
312 rx_desc->dptr = cpu_to_le32(dma_addr);
313 rx_desc->die_dt = DT_FEMPTY;
315 rx_desc = &priv->rx_ring[q][i];
316 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
317 rx_desc->die_dt = DT_LINKFIX; /* type */
319 memset(priv->tx_ring[q], 0, tx_ring_size);
320 /* Build TX ring buffer */
321 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
323 tx_desc->die_dt = DT_EEMPTY;
325 tx_desc->die_dt = DT_EEMPTY;
327 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
328 tx_desc->die_dt = DT_LINKFIX; /* type */
330 /* RX descriptor base address for best effort */
331 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
332 desc->die_dt = DT_LINKFIX; /* type */
333 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
335 /* TX descriptor base address for best effort */
336 desc = &priv->desc_bat[q];
337 desc->die_dt = DT_LINKFIX; /* type */
338 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
341 /* Init skb and descriptor buffer for Ethernet AVB */
342 static int ravb_ring_init(struct net_device *ndev, int q)
344 struct ravb_private *priv = netdev_priv(ndev);
349 /* Allocate RX and TX skb rings */
350 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
351 sizeof(*priv->rx_skb[q]), GFP_KERNEL);
352 priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
353 sizeof(*priv->tx_skb[q]), GFP_KERNEL);
354 if (!priv->rx_skb[q] || !priv->tx_skb[q])
357 for (i = 0; i < priv->num_rx_ring[q]; i++) {
358 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
361 ravb_set_buffer_align(skb);
362 priv->rx_skb[q][i] = skb;
365 /* Allocate rings for the aligned buffers */
366 priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
367 DPTR_ALIGN - 1, GFP_KERNEL);
368 if (!priv->tx_align[q])
371 /* Allocate all RX descriptors. */
372 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
373 priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
374 &priv->rx_desc_dma[q],
376 if (!priv->rx_ring[q])
379 priv->dirty_rx[q] = 0;
381 /* Allocate all TX descriptors. */
382 ring_size = sizeof(struct ravb_tx_desc) *
383 (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
384 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
385 &priv->tx_desc_dma[q],
387 if (!priv->tx_ring[q])
393 ravb_ring_free(ndev, q);
398 /* E-MAC init function */
399 static void ravb_emac_init(struct net_device *ndev)
401 struct ravb_private *priv = netdev_priv(ndev);
403 /* Receive frame limit set register */
404 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
406 /* PAUSE prohibition */
407 ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) |
408 ECMR_TE | ECMR_RE, ECMR);
412 /* Set MAC address */
414 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
415 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
417 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
419 /* E-MAC status register clear */
420 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
422 /* E-MAC interrupt enable register */
423 ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
426 /* Device init function for Ethernet AVB */
427 static int ravb_dmac_init(struct net_device *ndev)
429 struct ravb_private *priv = netdev_priv(ndev);
432 /* Set CONFIG mode */
433 error = ravb_config(ndev);
437 error = ravb_ring_init(ndev, RAVB_BE);
440 error = ravb_ring_init(ndev, RAVB_NC);
442 ravb_ring_free(ndev, RAVB_BE);
446 /* Descriptor format */
447 ravb_ring_format(ndev, RAVB_BE);
448 ravb_ring_format(ndev, RAVB_NC);
450 #if defined(__LITTLE_ENDIAN)
451 ravb_modify(ndev, CCC, CCC_BOC, 0);
453 ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC);
458 RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
461 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
463 /* Timestamp enable */
464 ravb_write(ndev, TCCR_TFEN, TCCR);
466 /* Interrupt init: */
467 if (priv->chip_id == RCAR_GEN3) {
469 ravb_write(ndev, 0, DIL);
470 /* Set queue specific interrupt */
471 ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
474 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
475 /* Disable FIFO full warning */
476 ravb_write(ndev, 0, RIC1);
477 /* Receive FIFO full error, descriptor empty */
478 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
479 /* Frame transmitted, timestamp FIFO updated */
480 ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
482 /* Setting the control will start the AVB-DMAC process. */
483 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
488 static void ravb_get_tx_tstamp(struct net_device *ndev)
490 struct ravb_private *priv = netdev_priv(ndev);
491 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
492 struct skb_shared_hwtstamps shhwtstamps;
494 struct timespec64 ts;
499 count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
501 tfa2 = ravb_read(ndev, TFA2);
502 tfa_tag = (tfa2 & TFA2_TST) >> 16;
503 ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
504 ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
505 ravb_read(ndev, TFA1);
506 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
507 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
508 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
512 list_del(&ts_skb->list);
514 if (tag == tfa_tag) {
515 skb_tstamp_tx(skb, &shhwtstamps);
516 dev_consume_skb_any(skb);
519 dev_kfree_skb_any(skb);
522 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
526 /* Packet receive function for Ethernet AVB */
527 static bool ravb_rx(struct net_device *ndev, int *quota, int q)
529 struct ravb_private *priv = netdev_priv(ndev);
530 int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
531 int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
533 struct net_device_stats *stats = &priv->stats[q];
534 struct ravb_ex_rx_desc *desc;
537 struct timespec64 ts;
542 boguscnt = min(boguscnt, *quota);
544 desc = &priv->rx_ring[q][entry];
545 while (desc->die_dt != DT_FEMPTY) {
546 /* Descriptor type must be checked before all other reads */
548 desc_status = desc->msc;
549 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
554 /* We use 0-byte descriptors to mark the DMA mapping errors */
558 if (desc_status & MSC_MC)
561 if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
564 if (desc_status & MSC_CRC)
565 stats->rx_crc_errors++;
566 if (desc_status & MSC_RFE)
567 stats->rx_frame_errors++;
568 if (desc_status & (MSC_RTLF | MSC_RTSF))
569 stats->rx_length_errors++;
570 if (desc_status & MSC_CEEF)
571 stats->rx_missed_errors++;
573 u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
575 skb = priv->rx_skb[q][entry];
576 priv->rx_skb[q][entry] = NULL;
577 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
580 get_ts &= (q == RAVB_NC) ?
581 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
582 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
584 struct skb_shared_hwtstamps *shhwtstamps;
586 shhwtstamps = skb_hwtstamps(skb);
587 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
588 ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
589 32) | le32_to_cpu(desc->ts_sl);
590 ts.tv_nsec = le32_to_cpu(desc->ts_n);
591 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
593 skb_put(skb, pkt_len);
594 skb->protocol = eth_type_trans(skb, ndev);
595 napi_gro_receive(&priv->napi[q], skb);
597 stats->rx_bytes += pkt_len;
600 entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
601 desc = &priv->rx_ring[q][entry];
604 /* Refill the RX ring buffers. */
605 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
606 entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
607 desc = &priv->rx_ring[q][entry];
608 desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
610 if (!priv->rx_skb[q][entry]) {
611 skb = netdev_alloc_skb(ndev,
612 PKT_BUF_SZ + RAVB_ALIGN - 1);
614 break; /* Better luck next round. */
615 ravb_set_buffer_align(skb);
616 dma_addr = dma_map_single(ndev->dev.parent, skb->data,
617 le16_to_cpu(desc->ds_cc),
619 skb_checksum_none_assert(skb);
620 /* We just set the data size to 0 for a failed mapping
621 * which should prevent DMA from happening...
623 if (dma_mapping_error(ndev->dev.parent, dma_addr))
624 desc->ds_cc = cpu_to_le16(0);
625 desc->dptr = cpu_to_le32(dma_addr);
626 priv->rx_skb[q][entry] = skb;
628 /* Descriptor type must be set after all the above writes */
630 desc->die_dt = DT_FEMPTY;
633 *quota -= limit - (++boguscnt);
635 return boguscnt <= 0;
638 static void ravb_rcv_snd_disable(struct net_device *ndev)
640 /* Disable TX and RX */
641 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
644 static void ravb_rcv_snd_enable(struct net_device *ndev)
646 /* Enable TX and RX */
647 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
650 /* function for waiting dma process finished */
651 static int ravb_stop_dma(struct net_device *ndev)
655 /* Wait for stopping the hardware TX process */
656 error = ravb_wait(ndev, TCCR,
657 TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
661 error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
666 /* Stop the E-MAC's RX/TX processes. */
667 ravb_rcv_snd_disable(ndev);
669 /* Wait for stopping the RX DMA process */
670 error = ravb_wait(ndev, CSR, CSR_RPO, 0);
674 /* Stop AVB-DMAC process */
675 return ravb_config(ndev);
678 /* E-MAC interrupt handler */
679 static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
681 struct ravb_private *priv = netdev_priv(ndev);
684 ecsr = ravb_read(ndev, ECSR);
685 ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
688 pm_wakeup_event(&priv->pdev->dev, 0);
690 ndev->stats.tx_carrier_errors++;
691 if (ecsr & ECSR_LCHNG) {
693 if (priv->no_avb_link)
695 psr = ravb_read(ndev, PSR);
696 if (priv->avb_link_active_low)
698 if (!(psr & PSR_LMON)) {
699 /* DIsable RX and TX */
700 ravb_rcv_snd_disable(ndev);
702 /* Enable RX and TX */
703 ravb_rcv_snd_enable(ndev);
708 static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
710 struct net_device *ndev = dev_id;
711 struct ravb_private *priv = netdev_priv(ndev);
713 spin_lock(&priv->lock);
714 ravb_emac_interrupt_unlocked(ndev);
716 spin_unlock(&priv->lock);
720 /* Error interrupt handler */
721 static void ravb_error_interrupt(struct net_device *ndev)
723 struct ravb_private *priv = netdev_priv(ndev);
726 eis = ravb_read(ndev, EIS);
727 ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
729 ris2 = ravb_read(ndev, RIS2);
730 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
733 /* Receive Descriptor Empty int */
734 if (ris2 & RIS2_QFF0)
735 priv->stats[RAVB_BE].rx_over_errors++;
737 /* Receive Descriptor Empty int */
738 if (ris2 & RIS2_QFF1)
739 priv->stats[RAVB_NC].rx_over_errors++;
741 /* Receive FIFO Overflow int */
742 if (ris2 & RIS2_RFFF)
743 priv->rx_fifo_errors++;
747 static bool ravb_queue_interrupt(struct net_device *ndev, int q)
749 struct ravb_private *priv = netdev_priv(ndev);
750 u32 ris0 = ravb_read(ndev, RIS0);
751 u32 ric0 = ravb_read(ndev, RIC0);
752 u32 tis = ravb_read(ndev, TIS);
753 u32 tic = ravb_read(ndev, TIC);
755 if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
756 if (napi_schedule_prep(&priv->napi[q])) {
757 /* Mask RX and TX interrupts */
758 if (priv->chip_id == RCAR_GEN2) {
759 ravb_write(ndev, ric0 & ~BIT(q), RIC0);
760 ravb_write(ndev, tic & ~BIT(q), TIC);
762 ravb_write(ndev, BIT(q), RID0);
763 ravb_write(ndev, BIT(q), TID);
765 __napi_schedule(&priv->napi[q]);
768 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
771 " tx status 0x%08x, tx mask 0x%08x.\n",
779 static bool ravb_timestamp_interrupt(struct net_device *ndev)
781 u32 tis = ravb_read(ndev, TIS);
783 if (tis & TIS_TFUF) {
784 ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
785 ravb_get_tx_tstamp(ndev);
791 static irqreturn_t ravb_interrupt(int irq, void *dev_id)
793 struct net_device *ndev = dev_id;
794 struct ravb_private *priv = netdev_priv(ndev);
795 irqreturn_t result = IRQ_NONE;
798 spin_lock(&priv->lock);
799 /* Get interrupt status */
800 iss = ravb_read(ndev, ISS);
802 /* Received and transmitted interrupts */
803 if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
806 /* Timestamp updated */
807 if (ravb_timestamp_interrupt(ndev))
808 result = IRQ_HANDLED;
810 /* Network control and best effort queue RX/TX */
811 for (q = RAVB_NC; q >= RAVB_BE; q--) {
812 if (ravb_queue_interrupt(ndev, q))
813 result = IRQ_HANDLED;
817 /* E-MAC status summary */
819 ravb_emac_interrupt_unlocked(ndev);
820 result = IRQ_HANDLED;
823 /* Error status summary */
825 ravb_error_interrupt(ndev);
826 result = IRQ_HANDLED;
829 /* gPTP interrupt status summary */
830 if (iss & ISS_CGIS) {
831 ravb_ptp_interrupt(ndev);
832 result = IRQ_HANDLED;
836 spin_unlock(&priv->lock);
840 /* Timestamp/Error/gPTP interrupt handler */
841 static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
843 struct net_device *ndev = dev_id;
844 struct ravb_private *priv = netdev_priv(ndev);
845 irqreturn_t result = IRQ_NONE;
848 spin_lock(&priv->lock);
849 /* Get interrupt status */
850 iss = ravb_read(ndev, ISS);
852 /* Timestamp updated */
853 if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
854 result = IRQ_HANDLED;
856 /* Error status summary */
858 ravb_error_interrupt(ndev);
859 result = IRQ_HANDLED;
862 /* gPTP interrupt status summary */
863 if (iss & ISS_CGIS) {
864 ravb_ptp_interrupt(ndev);
865 result = IRQ_HANDLED;
869 spin_unlock(&priv->lock);
873 static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
875 struct net_device *ndev = dev_id;
876 struct ravb_private *priv = netdev_priv(ndev);
877 irqreturn_t result = IRQ_NONE;
879 spin_lock(&priv->lock);
881 /* Network control/Best effort queue RX/TX */
882 if (ravb_queue_interrupt(ndev, q))
883 result = IRQ_HANDLED;
886 spin_unlock(&priv->lock);
890 static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
892 return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
895 static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
897 return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
900 static int ravb_poll(struct napi_struct *napi, int budget)
902 struct net_device *ndev = napi->dev;
903 struct ravb_private *priv = netdev_priv(ndev);
905 int q = napi - priv->napi;
911 tis = ravb_read(ndev, TIS);
912 ris0 = ravb_read(ndev, RIS0);
913 if (!((ris0 & mask) || (tis & mask)))
916 /* Processing RX Descriptor Ring */
918 /* Clear RX interrupt */
919 ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
920 if (ravb_rx(ndev, "a, q))
923 /* Processing TX Descriptor Ring */
925 spin_lock_irqsave(&priv->lock, flags);
926 /* Clear TX interrupt */
927 ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
928 ravb_tx_free(ndev, q, true);
929 netif_wake_subqueue(ndev, q);
931 spin_unlock_irqrestore(&priv->lock, flags);
937 /* Re-enable RX/TX interrupts */
938 spin_lock_irqsave(&priv->lock, flags);
939 if (priv->chip_id == RCAR_GEN2) {
940 ravb_modify(ndev, RIC0, mask, mask);
941 ravb_modify(ndev, TIC, mask, mask);
943 ravb_write(ndev, mask, RIE0);
944 ravb_write(ndev, mask, TIE);
947 spin_unlock_irqrestore(&priv->lock, flags);
949 /* Receive error message handling */
950 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
951 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
952 if (priv->rx_over_errors != ndev->stats.rx_over_errors)
953 ndev->stats.rx_over_errors = priv->rx_over_errors;
954 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
955 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
957 return budget - quota;
960 /* PHY state control function */
961 static void ravb_adjust_link(struct net_device *ndev)
963 struct ravb_private *priv = netdev_priv(ndev);
964 struct phy_device *phydev = ndev->phydev;
965 bool new_state = false;
968 spin_lock_irqsave(&priv->lock, flags);
970 /* Disable TX and RX right over here, if E-MAC change is ignored */
971 if (priv->no_avb_link)
972 ravb_rcv_snd_disable(ndev);
975 if (phydev->duplex != priv->duplex) {
977 priv->duplex = phydev->duplex;
978 ravb_set_duplex(ndev);
981 if (phydev->speed != priv->speed) {
983 priv->speed = phydev->speed;
987 ravb_modify(ndev, ECMR, ECMR_TXF, 0);
989 priv->link = phydev->link;
991 } else if (priv->link) {
998 /* Enable TX and RX right over here, if E-MAC change is ignored */
999 if (priv->no_avb_link && phydev->link)
1000 ravb_rcv_snd_enable(ndev);
1003 spin_unlock_irqrestore(&priv->lock, flags);
1005 if (new_state && netif_msg_link(priv))
1006 phy_print_status(phydev);
1009 static const struct soc_device_attribute r8a7795es10[] = {
1010 { .soc_id = "r8a7795", .revision = "ES1.0", },
1014 /* PHY init function */
1015 static int ravb_phy_init(struct net_device *ndev)
1017 struct device_node *np = ndev->dev.parent->of_node;
1018 struct ravb_private *priv = netdev_priv(ndev);
1019 struct phy_device *phydev;
1020 struct device_node *pn;
1027 /* Try connecting to PHY */
1028 pn = of_parse_phandle(np, "phy-handle", 0);
1030 /* In the case of a fixed PHY, the DT node associated
1031 * to the PHY is the Ethernet MAC DT node.
1033 if (of_phy_is_fixed_link(np)) {
1034 err = of_phy_register_fixed_link(np);
1038 pn = of_node_get(np);
1040 phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
1041 priv->phy_interface);
1044 netdev_err(ndev, "failed to connect PHY\n");
1046 goto err_deregister_fixed_link;
1049 /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0
1052 if (soc_device_match(r8a7795es10)) {
1053 err = phy_set_max_speed(phydev, SPEED_100);
1055 netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
1056 goto err_phy_disconnect;
1059 netdev_info(ndev, "limited PHY to 100Mbit/s\n");
1062 /* 10BASE is not supported */
1063 phydev->supported &= ~PHY_10BT_FEATURES;
1065 phy_attached_info(phydev);
1070 phy_disconnect(phydev);
1071 err_deregister_fixed_link:
1072 if (of_phy_is_fixed_link(np))
1073 of_phy_deregister_fixed_link(np);
1078 /* PHY control start function */
1079 static int ravb_phy_start(struct net_device *ndev)
1083 error = ravb_phy_init(ndev);
1087 phy_start(ndev->phydev);
1092 static int ravb_get_link_ksettings(struct net_device *ndev,
1093 struct ethtool_link_ksettings *cmd)
1095 struct ravb_private *priv = netdev_priv(ndev);
1096 unsigned long flags;
1101 spin_lock_irqsave(&priv->lock, flags);
1102 phy_ethtool_ksettings_get(ndev->phydev, cmd);
1103 spin_unlock_irqrestore(&priv->lock, flags);
1108 static int ravb_set_link_ksettings(struct net_device *ndev,
1109 const struct ethtool_link_ksettings *cmd)
1114 return phy_ethtool_ksettings_set(ndev->phydev, cmd);
1117 static int ravb_nway_reset(struct net_device *ndev)
1119 int error = -ENODEV;
1122 error = phy_start_aneg(ndev->phydev);
1127 static u32 ravb_get_msglevel(struct net_device *ndev)
1129 struct ravb_private *priv = netdev_priv(ndev);
1131 return priv->msg_enable;
1134 static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1136 struct ravb_private *priv = netdev_priv(ndev);
1138 priv->msg_enable = value;
1141 static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1142 "rx_queue_0_current",
1143 "tx_queue_0_current",
1146 "rx_queue_0_packets",
1147 "tx_queue_0_packets",
1150 "rx_queue_0_mcast_packets",
1151 "rx_queue_0_errors",
1152 "rx_queue_0_crc_errors",
1153 "rx_queue_0_frame_errors",
1154 "rx_queue_0_length_errors",
1155 "rx_queue_0_missed_errors",
1156 "rx_queue_0_over_errors",
1158 "rx_queue_1_current",
1159 "tx_queue_1_current",
1162 "rx_queue_1_packets",
1163 "tx_queue_1_packets",
1166 "rx_queue_1_mcast_packets",
1167 "rx_queue_1_errors",
1168 "rx_queue_1_crc_errors",
1169 "rx_queue_1_frame_errors",
1170 "rx_queue_1_length_errors",
1171 "rx_queue_1_missed_errors",
1172 "rx_queue_1_over_errors",
1175 #define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
1177 static int ravb_get_sset_count(struct net_device *netdev, int sset)
1181 return RAVB_STATS_LEN;
1187 static void ravb_get_ethtool_stats(struct net_device *ndev,
1188 struct ethtool_stats *stats, u64 *data)
1190 struct ravb_private *priv = netdev_priv(ndev);
1194 /* Device-specific stats */
1195 for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
1196 struct net_device_stats *stats = &priv->stats[q];
1198 data[i++] = priv->cur_rx[q];
1199 data[i++] = priv->cur_tx[q];
1200 data[i++] = priv->dirty_rx[q];
1201 data[i++] = priv->dirty_tx[q];
1202 data[i++] = stats->rx_packets;
1203 data[i++] = stats->tx_packets;
1204 data[i++] = stats->rx_bytes;
1205 data[i++] = stats->tx_bytes;
1206 data[i++] = stats->multicast;
1207 data[i++] = stats->rx_errors;
1208 data[i++] = stats->rx_crc_errors;
1209 data[i++] = stats->rx_frame_errors;
1210 data[i++] = stats->rx_length_errors;
1211 data[i++] = stats->rx_missed_errors;
1212 data[i++] = stats->rx_over_errors;
1216 static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1218 switch (stringset) {
1220 memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
1225 static void ravb_get_ringparam(struct net_device *ndev,
1226 struct ethtool_ringparam *ring)
1228 struct ravb_private *priv = netdev_priv(ndev);
1230 ring->rx_max_pending = BE_RX_RING_MAX;
1231 ring->tx_max_pending = BE_TX_RING_MAX;
1232 ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1233 ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1236 static int ravb_set_ringparam(struct net_device *ndev,
1237 struct ethtool_ringparam *ring)
1239 struct ravb_private *priv = netdev_priv(ndev);
1242 if (ring->tx_pending > BE_TX_RING_MAX ||
1243 ring->rx_pending > BE_RX_RING_MAX ||
1244 ring->tx_pending < BE_TX_RING_MIN ||
1245 ring->rx_pending < BE_RX_RING_MIN)
1247 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1250 if (netif_running(ndev)) {
1251 netif_device_detach(ndev);
1252 /* Stop PTP Clock driver */
1253 if (priv->chip_id == RCAR_GEN2)
1254 ravb_ptp_stop(ndev);
1255 /* Wait for DMA stopping */
1256 error = ravb_stop_dma(ndev);
1259 "cannot set ringparam! Any AVB processes are still running?\n");
1262 synchronize_irq(ndev->irq);
1264 /* Free all the skb's in the RX queue and the DMA buffers. */
1265 ravb_ring_free(ndev, RAVB_BE);
1266 ravb_ring_free(ndev, RAVB_NC);
1269 /* Set new parameters */
1270 priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1271 priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1273 if (netif_running(ndev)) {
1274 error = ravb_dmac_init(ndev);
1277 "%s: ravb_dmac_init() failed, error %d\n",
1282 ravb_emac_init(ndev);
1284 /* Initialise PTP Clock driver */
1285 if (priv->chip_id == RCAR_GEN2)
1286 ravb_ptp_init(ndev, priv->pdev);
1288 netif_device_attach(ndev);
1294 static int ravb_get_ts_info(struct net_device *ndev,
1295 struct ethtool_ts_info *info)
1297 struct ravb_private *priv = netdev_priv(ndev);
1299 info->so_timestamping =
1300 SOF_TIMESTAMPING_TX_SOFTWARE |
1301 SOF_TIMESTAMPING_RX_SOFTWARE |
1302 SOF_TIMESTAMPING_SOFTWARE |
1303 SOF_TIMESTAMPING_TX_HARDWARE |
1304 SOF_TIMESTAMPING_RX_HARDWARE |
1305 SOF_TIMESTAMPING_RAW_HARDWARE;
1306 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1308 (1 << HWTSTAMP_FILTER_NONE) |
1309 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1310 (1 << HWTSTAMP_FILTER_ALL);
1311 info->phc_index = ptp_clock_index(priv->ptp.clock);
1316 static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1318 struct ravb_private *priv = netdev_priv(ndev);
1324 wol->supported = WAKE_MAGIC;
1325 wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
1329 static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1331 struct ravb_private *priv = netdev_priv(ndev);
1333 if (!priv->clk || wol->wolopts & ~WAKE_MAGIC)
1336 priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
1338 device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
1343 static const struct ethtool_ops ravb_ethtool_ops = {
1344 .nway_reset = ravb_nway_reset,
1345 .get_msglevel = ravb_get_msglevel,
1346 .set_msglevel = ravb_set_msglevel,
1347 .get_link = ethtool_op_get_link,
1348 .get_strings = ravb_get_strings,
1349 .get_ethtool_stats = ravb_get_ethtool_stats,
1350 .get_sset_count = ravb_get_sset_count,
1351 .get_ringparam = ravb_get_ringparam,
1352 .set_ringparam = ravb_set_ringparam,
1353 .get_ts_info = ravb_get_ts_info,
1354 .get_link_ksettings = ravb_get_link_ksettings,
1355 .set_link_ksettings = ravb_set_link_ksettings,
1356 .get_wol = ravb_get_wol,
1357 .set_wol = ravb_set_wol,
1360 static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
1361 struct net_device *ndev, struct device *dev,
1367 name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
1370 error = request_irq(irq, handler, 0, name, ndev);
1372 netdev_err(ndev, "cannot request IRQ %s\n", name);
1377 /* Network device open function for Ethernet AVB */
1378 static int ravb_open(struct net_device *ndev)
1380 struct ravb_private *priv = netdev_priv(ndev);
1381 struct platform_device *pdev = priv->pdev;
1382 struct device *dev = &pdev->dev;
1385 napi_enable(&priv->napi[RAVB_BE]);
1386 napi_enable(&priv->napi[RAVB_NC]);
1388 if (priv->chip_id == RCAR_GEN2) {
1389 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
1392 netdev_err(ndev, "cannot request IRQ\n");
1396 error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
1400 error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
1404 error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
1405 ndev, dev, "ch0:rx_be");
1407 goto out_free_irq_emac;
1408 error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
1409 ndev, dev, "ch18:tx_be");
1411 goto out_free_irq_be_rx;
1412 error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
1413 ndev, dev, "ch1:rx_nc");
1415 goto out_free_irq_be_tx;
1416 error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
1417 ndev, dev, "ch19:tx_nc");
1419 goto out_free_irq_nc_rx;
1423 error = ravb_dmac_init(ndev);
1425 goto out_free_irq_nc_tx;
1426 ravb_emac_init(ndev);
1428 /* Initialise PTP Clock driver */
1429 if (priv->chip_id == RCAR_GEN2)
1430 ravb_ptp_init(ndev, priv->pdev);
1432 netif_tx_start_all_queues(ndev);
1434 /* PHY control start */
1435 error = ravb_phy_start(ndev);
1442 /* Stop PTP Clock driver */
1443 if (priv->chip_id == RCAR_GEN2)
1444 ravb_ptp_stop(ndev);
1446 if (priv->chip_id == RCAR_GEN2)
1448 free_irq(priv->tx_irqs[RAVB_NC], ndev);
1450 free_irq(priv->rx_irqs[RAVB_NC], ndev);
1452 free_irq(priv->tx_irqs[RAVB_BE], ndev);
1454 free_irq(priv->rx_irqs[RAVB_BE], ndev);
1456 free_irq(priv->emac_irq, ndev);
1458 free_irq(ndev->irq, ndev);
1460 napi_disable(&priv->napi[RAVB_NC]);
1461 napi_disable(&priv->napi[RAVB_BE]);
1465 /* Timeout function for Ethernet AVB */
1466 static void ravb_tx_timeout(struct net_device *ndev)
1468 struct ravb_private *priv = netdev_priv(ndev);
1470 netif_err(priv, tx_err, ndev,
1471 "transmit timed out, status %08x, resetting...\n",
1472 ravb_read(ndev, ISS));
1474 /* tx_errors count up */
1475 ndev->stats.tx_errors++;
1477 schedule_work(&priv->work);
1480 static void ravb_tx_timeout_work(struct work_struct *work)
1482 struct ravb_private *priv = container_of(work, struct ravb_private,
1484 struct net_device *ndev = priv->ndev;
1487 netif_tx_stop_all_queues(ndev);
1489 /* Stop PTP Clock driver */
1490 if (priv->chip_id == RCAR_GEN2)
1491 ravb_ptp_stop(ndev);
1493 /* Wait for DMA stopping */
1494 if (ravb_stop_dma(ndev)) {
1495 /* If ravb_stop_dma() fails, the hardware is still operating
1496 * for TX and/or RX. So, this should not call the following
1497 * functions because ravb_dmac_init() is possible to fail too.
1498 * Also, this should not retry ravb_stop_dma() again and again
1499 * here because it's possible to wait forever. So, this just
1500 * re-enables the TX and RX and skip the following
1501 * re-initialization procedure.
1503 ravb_rcv_snd_enable(ndev);
1507 ravb_ring_free(ndev, RAVB_BE);
1508 ravb_ring_free(ndev, RAVB_NC);
1511 error = ravb_dmac_init(ndev);
1513 /* If ravb_dmac_init() fails, descriptors are freed. So, this
1514 * should return here to avoid re-enabling the TX and RX in
1517 netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
1521 ravb_emac_init(ndev);
1524 /* Initialise PTP Clock driver */
1525 if (priv->chip_id == RCAR_GEN2)
1526 ravb_ptp_init(ndev, priv->pdev);
1528 netif_tx_start_all_queues(ndev);
1531 /* Packet transmit function for Ethernet AVB */
1532 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1534 struct ravb_private *priv = netdev_priv(ndev);
1535 u16 q = skb_get_queue_mapping(skb);
1536 struct ravb_tstamp_skb *ts_skb;
1537 struct ravb_tx_desc *desc;
1538 unsigned long flags;
1544 spin_lock_irqsave(&priv->lock, flags);
1545 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
1547 netif_err(priv, tx_queued, ndev,
1548 "still transmitting with the full ring!\n");
1549 netif_stop_subqueue(ndev, q);
1550 spin_unlock_irqrestore(&priv->lock, flags);
1551 return NETDEV_TX_BUSY;
1554 if (skb_put_padto(skb, ETH_ZLEN))
1557 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
1558 priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
1560 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1561 entry / NUM_TX_DESC * DPTR_ALIGN;
1562 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1563 /* Zero length DMA descriptors are problematic as they seem to
1564 * terminate DMA transfers. Avoid them by simply using a length of
1565 * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
1567 * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
1568 * data by the call to skb_put_padto() above this is safe with
1569 * respect to both the length of the first DMA descriptor (len)
1570 * overflowing the available data and the length of the second DMA
1571 * descriptor (skb->len - len) being negative.
1576 memcpy(buffer, skb->data, len);
1577 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
1578 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1581 desc = &priv->tx_ring[q][entry];
1582 desc->ds_tagl = cpu_to_le16(len);
1583 desc->dptr = cpu_to_le32(dma_addr);
1585 buffer = skb->data + len;
1586 len = skb->len - len;
1587 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
1588 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1592 desc->ds_tagl = cpu_to_le16(len);
1593 desc->dptr = cpu_to_le32(dma_addr);
1595 /* TX timestamp required */
1597 ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
1600 dma_unmap_single(ndev->dev.parent, dma_addr, len,
1604 ts_skb->skb = skb_get(skb);
1605 ts_skb->tag = priv->ts_skb_tag++;
1606 priv->ts_skb_tag &= 0x3ff;
1607 list_add_tail(&ts_skb->list, &priv->ts_skb_list);
1609 /* TAG and timestamp required flag */
1610 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1611 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
1612 desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
1615 skb_tx_timestamp(skb);
1616 /* Descriptor type must be set after all the above writes */
1618 desc->die_dt = DT_FEND;
1620 desc->die_dt = DT_FSTART;
1622 ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
1624 priv->cur_tx[q] += NUM_TX_DESC;
1625 if (priv->cur_tx[q] - priv->dirty_tx[q] >
1626 (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
1627 !ravb_tx_free(ndev, q, true))
1628 netif_stop_subqueue(ndev, q);
1632 spin_unlock_irqrestore(&priv->lock, flags);
1633 return NETDEV_TX_OK;
1636 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
1637 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
1639 dev_kfree_skb_any(skb);
1640 priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
1644 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
1645 void *accel_priv, select_queue_fallback_t fallback)
1647 /* If skb needs TX timestamp, it is handled in network control queue */
1648 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
1653 static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
1655 struct ravb_private *priv = netdev_priv(ndev);
1656 struct net_device_stats *nstats, *stats0, *stats1;
1658 nstats = &ndev->stats;
1659 stats0 = &priv->stats[RAVB_BE];
1660 stats1 = &priv->stats[RAVB_NC];
1662 nstats->tx_dropped += ravb_read(ndev, TROCR);
1663 ravb_write(ndev, 0, TROCR); /* (write clear) */
1664 nstats->collisions += ravb_read(ndev, CDCR);
1665 ravb_write(ndev, 0, CDCR); /* (write clear) */
1666 nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
1667 ravb_write(ndev, 0, LCCR); /* (write clear) */
1669 nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
1670 ravb_write(ndev, 0, CERCR); /* (write clear) */
1671 nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
1672 ravb_write(ndev, 0, CEECR); /* (write clear) */
1674 nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
1675 nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
1676 nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
1677 nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
1678 nstats->multicast = stats0->multicast + stats1->multicast;
1679 nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
1680 nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
1681 nstats->rx_frame_errors =
1682 stats0->rx_frame_errors + stats1->rx_frame_errors;
1683 nstats->rx_length_errors =
1684 stats0->rx_length_errors + stats1->rx_length_errors;
1685 nstats->rx_missed_errors =
1686 stats0->rx_missed_errors + stats1->rx_missed_errors;
1687 nstats->rx_over_errors =
1688 stats0->rx_over_errors + stats1->rx_over_errors;
1693 /* Update promiscuous bit */
1694 static void ravb_set_rx_mode(struct net_device *ndev)
1696 struct ravb_private *priv = netdev_priv(ndev);
1697 unsigned long flags;
1699 spin_lock_irqsave(&priv->lock, flags);
1700 ravb_modify(ndev, ECMR, ECMR_PRM,
1701 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
1703 spin_unlock_irqrestore(&priv->lock, flags);
1706 /* Device close function for Ethernet AVB */
1707 static int ravb_close(struct net_device *ndev)
1709 struct device_node *np = ndev->dev.parent->of_node;
1710 struct ravb_private *priv = netdev_priv(ndev);
1711 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
1713 netif_tx_stop_all_queues(ndev);
1715 /* Disable interrupts by clearing the interrupt masks. */
1716 ravb_write(ndev, 0, RIC0);
1717 ravb_write(ndev, 0, RIC2);
1718 ravb_write(ndev, 0, TIC);
1720 /* Stop PTP Clock driver */
1721 if (priv->chip_id == RCAR_GEN2)
1722 ravb_ptp_stop(ndev);
1724 /* Set the config mode to stop the AVB-DMAC's processes */
1725 if (ravb_stop_dma(ndev) < 0)
1727 "device will be stopped after h/w processes are done.\n");
1729 /* Clear the timestamp list */
1730 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
1731 list_del(&ts_skb->list);
1732 kfree_skb(ts_skb->skb);
1736 /* PHY disconnect */
1738 phy_stop(ndev->phydev);
1739 phy_disconnect(ndev->phydev);
1740 if (of_phy_is_fixed_link(np))
1741 of_phy_deregister_fixed_link(np);
1744 if (priv->chip_id != RCAR_GEN2) {
1745 free_irq(priv->tx_irqs[RAVB_NC], ndev);
1746 free_irq(priv->rx_irqs[RAVB_NC], ndev);
1747 free_irq(priv->tx_irqs[RAVB_BE], ndev);
1748 free_irq(priv->rx_irqs[RAVB_BE], ndev);
1749 free_irq(priv->emac_irq, ndev);
1751 free_irq(ndev->irq, ndev);
1753 napi_disable(&priv->napi[RAVB_NC]);
1754 napi_disable(&priv->napi[RAVB_BE]);
1756 /* Free all the skb's in the RX queue and the DMA buffers. */
1757 ravb_ring_free(ndev, RAVB_BE);
1758 ravb_ring_free(ndev, RAVB_NC);
1763 static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
1765 struct ravb_private *priv = netdev_priv(ndev);
1766 struct hwtstamp_config config;
1769 config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1771 switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
1772 case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
1773 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1775 case RAVB_RXTSTAMP_TYPE_ALL:
1776 config.rx_filter = HWTSTAMP_FILTER_ALL;
1779 config.rx_filter = HWTSTAMP_FILTER_NONE;
1782 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1786 /* Control hardware time stamping */
1787 static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
1789 struct ravb_private *priv = netdev_priv(ndev);
1790 struct hwtstamp_config config;
1791 u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
1794 if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1797 /* Reserved for future extensions */
1801 switch (config.tx_type) {
1802 case HWTSTAMP_TX_OFF:
1805 case HWTSTAMP_TX_ON:
1806 tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
1812 switch (config.rx_filter) {
1813 case HWTSTAMP_FILTER_NONE:
1816 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1817 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
1820 config.rx_filter = HWTSTAMP_FILTER_ALL;
1821 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
1824 priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1825 priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1827 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1831 /* ioctl to device function */
1832 static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1834 struct phy_device *phydev = ndev->phydev;
1836 if (!netif_running(ndev))
1844 return ravb_hwtstamp_get(ndev, req);
1846 return ravb_hwtstamp_set(ndev, req);
1849 return phy_mii_ioctl(phydev, req, cmd);
1852 static const struct net_device_ops ravb_netdev_ops = {
1853 .ndo_open = ravb_open,
1854 .ndo_stop = ravb_close,
1855 .ndo_start_xmit = ravb_start_xmit,
1856 .ndo_select_queue = ravb_select_queue,
1857 .ndo_get_stats = ravb_get_stats,
1858 .ndo_set_rx_mode = ravb_set_rx_mode,
1859 .ndo_tx_timeout = ravb_tx_timeout,
1860 .ndo_do_ioctl = ravb_do_ioctl,
1861 .ndo_validate_addr = eth_validate_addr,
1862 .ndo_set_mac_address = eth_mac_addr,
1865 /* MDIO bus init function */
1866 static int ravb_mdio_init(struct ravb_private *priv)
1868 struct platform_device *pdev = priv->pdev;
1869 struct device *dev = &pdev->dev;
1873 priv->mdiobb.ops = &bb_ops;
1875 /* MII controller setting */
1876 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
1880 /* Hook up MII support for ethtool */
1881 priv->mii_bus->name = "ravb_mii";
1882 priv->mii_bus->parent = dev;
1883 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1884 pdev->name, pdev->id);
1886 /* Register MDIO bus */
1887 error = of_mdiobus_register(priv->mii_bus, dev->of_node);
1894 free_mdio_bitbang(priv->mii_bus);
1898 /* MDIO bus release function */
1899 static int ravb_mdio_release(struct ravb_private *priv)
1901 /* Unregister mdio bus */
1902 mdiobus_unregister(priv->mii_bus);
1904 /* Free bitbang info */
1905 free_mdio_bitbang(priv->mii_bus);
1910 static const struct of_device_id ravb_match_table[] = {
1911 { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
1912 { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
1913 { .compatible = "renesas,etheravb-rcar-gen2", .data = (void *)RCAR_GEN2 },
1914 { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
1915 { .compatible = "renesas,etheravb-rcar-gen3", .data = (void *)RCAR_GEN3 },
1918 MODULE_DEVICE_TABLE(of, ravb_match_table);
1920 static int ravb_set_gti(struct net_device *ndev)
1923 struct device *dev = ndev->dev.parent;
1924 struct device_node *np = dev->of_node;
1929 clk = of_clk_get(np, 0);
1931 dev_err(dev, "could not get clock\n");
1932 return PTR_ERR(clk);
1935 rate = clk_get_rate(clk);
1941 inc = 1000000000ULL << 20;
1944 if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
1945 dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
1946 inc, GTI_TIV_MIN, GTI_TIV_MAX);
1950 ravb_write(ndev, inc, GTI);
1955 static void ravb_set_config_mode(struct net_device *ndev)
1957 struct ravb_private *priv = netdev_priv(ndev);
1959 if (priv->chip_id == RCAR_GEN2) {
1960 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
1961 /* Set CSEL value */
1962 ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
1964 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
1965 CCC_GAC | CCC_CSEL_HPB);
1969 /* Set tx and rx clock internal delay modes */
1970 static void ravb_set_delay_mode(struct net_device *ndev)
1972 struct ravb_private *priv = netdev_priv(ndev);
1975 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1976 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
1979 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1980 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
1983 ravb_modify(ndev, APSR, APSR_DM, set);
1986 static int ravb_probe(struct platform_device *pdev)
1988 struct device_node *np = pdev->dev.of_node;
1989 struct ravb_private *priv;
1990 enum ravb_chip_id chip_id;
1991 struct net_device *ndev;
1993 struct resource *res;
1998 "this driver is required to be instantiated from device tree\n");
2002 /* Get base address */
2003 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2005 dev_err(&pdev->dev, "invalid resource\n");
2009 ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
2010 NUM_TX_QUEUE, NUM_RX_QUEUE);
2014 pm_runtime_enable(&pdev->dev);
2015 pm_runtime_get_sync(&pdev->dev);
2017 /* The Ether-specific entries in the device structure. */
2018 ndev->base_addr = res->start;
2020 chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
2022 if (chip_id == RCAR_GEN3)
2023 irq = platform_get_irq_byname(pdev, "ch22");
2025 irq = platform_get_irq(pdev, 0);
2032 SET_NETDEV_DEV(ndev, &pdev->dev);
2034 priv = netdev_priv(ndev);
2037 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
2038 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
2039 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
2040 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
2041 priv->addr = devm_ioremap_resource(&pdev->dev, res);
2042 if (IS_ERR(priv->addr)) {
2043 error = PTR_ERR(priv->addr);
2047 spin_lock_init(&priv->lock);
2048 INIT_WORK(&priv->work, ravb_tx_timeout_work);
2050 priv->phy_interface = of_get_phy_mode(np);
2052 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
2053 priv->avb_link_active_low =
2054 of_property_read_bool(np, "renesas,ether-link-active-low");
2056 if (chip_id == RCAR_GEN3) {
2057 irq = platform_get_irq_byname(pdev, "ch24");
2062 priv->emac_irq = irq;
2063 for (i = 0; i < NUM_RX_QUEUE; i++) {
2064 irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
2069 priv->rx_irqs[i] = irq;
2071 for (i = 0; i < NUM_TX_QUEUE; i++) {
2072 irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
2077 priv->tx_irqs[i] = irq;
2081 priv->chip_id = chip_id;
2083 /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */
2084 priv->clk = devm_clk_get(&pdev->dev, NULL);
2085 if (IS_ERR(priv->clk))
2089 ndev->netdev_ops = &ravb_netdev_ops;
2090 ndev->ethtool_ops = &ravb_ethtool_ops;
2092 /* Set AVB config mode */
2093 ravb_set_config_mode(ndev);
2096 error = ravb_set_gti(ndev);
2100 /* Request GTI loading */
2101 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2103 if (priv->chip_id != RCAR_GEN2)
2104 ravb_set_delay_mode(ndev);
2106 /* Allocate descriptor base address table */
2107 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
2108 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
2109 &priv->desc_bat_dma, GFP_KERNEL);
2110 if (!priv->desc_bat) {
2112 "Cannot allocate desc base address table (size %d bytes)\n",
2113 priv->desc_bat_size);
2117 for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
2118 priv->desc_bat[q].die_dt = DT_EOS;
2119 ravb_write(ndev, priv->desc_bat_dma, DBAT);
2121 /* Initialise HW timestamp list */
2122 INIT_LIST_HEAD(&priv->ts_skb_list);
2124 /* Initialise PTP Clock driver */
2125 if (chip_id != RCAR_GEN2)
2126 ravb_ptp_init(ndev, pdev);
2128 /* Debug message level */
2129 priv->msg_enable = RAVB_DEF_MSG_ENABLE;
2131 /* Read and set MAC address */
2132 ravb_read_mac_address(ndev, of_get_mac_address(np));
2133 if (!is_valid_ether_addr(ndev->dev_addr)) {
2134 dev_warn(&pdev->dev,
2135 "no valid MAC address supplied, using a random one\n");
2136 eth_hw_addr_random(ndev);
2140 error = ravb_mdio_init(priv);
2142 dev_err(&pdev->dev, "failed to initialize MDIO\n");
2146 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
2147 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
2149 /* Network device register */
2150 error = register_netdev(ndev);
2155 device_set_wakeup_capable(&pdev->dev, 1);
2157 /* Print device information */
2158 netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
2159 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2161 platform_set_drvdata(pdev, ndev);
2166 netif_napi_del(&priv->napi[RAVB_NC]);
2167 netif_napi_del(&priv->napi[RAVB_BE]);
2168 ravb_mdio_release(priv);
2170 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2171 priv->desc_bat_dma);
2173 /* Stop PTP Clock driver */
2174 if (chip_id != RCAR_GEN2)
2175 ravb_ptp_stop(ndev);
2180 pm_runtime_put(&pdev->dev);
2181 pm_runtime_disable(&pdev->dev);
2185 static int ravb_remove(struct platform_device *pdev)
2187 struct net_device *ndev = platform_get_drvdata(pdev);
2188 struct ravb_private *priv = netdev_priv(ndev);
2190 /* Stop PTP Clock driver */
2191 if (priv->chip_id != RCAR_GEN2)
2192 ravb_ptp_stop(ndev);
2194 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2195 priv->desc_bat_dma);
2196 /* Set reset mode */
2197 ravb_write(ndev, CCC_OPC_RESET, CCC);
2198 pm_runtime_put_sync(&pdev->dev);
2199 unregister_netdev(ndev);
2200 netif_napi_del(&priv->napi[RAVB_NC]);
2201 netif_napi_del(&priv->napi[RAVB_BE]);
2202 ravb_mdio_release(priv);
2203 pm_runtime_disable(&pdev->dev);
2205 platform_set_drvdata(pdev, NULL);
2210 static int ravb_wol_setup(struct net_device *ndev)
2212 struct ravb_private *priv = netdev_priv(ndev);
2214 /* Disable interrupts by clearing the interrupt masks. */
2215 ravb_write(ndev, 0, RIC0);
2216 ravb_write(ndev, 0, RIC2);
2217 ravb_write(ndev, 0, TIC);
2219 /* Only allow ECI interrupts */
2220 synchronize_irq(priv->emac_irq);
2221 napi_disable(&priv->napi[RAVB_NC]);
2222 napi_disable(&priv->napi[RAVB_BE]);
2223 ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
2225 /* Enable MagicPacket */
2226 ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
2228 /* Increased clock usage so device won't be suspended */
2229 clk_enable(priv->clk);
2231 return enable_irq_wake(priv->emac_irq);
2234 static int ravb_wol_restore(struct net_device *ndev)
2236 struct ravb_private *priv = netdev_priv(ndev);
2239 napi_enable(&priv->napi[RAVB_NC]);
2240 napi_enable(&priv->napi[RAVB_BE]);
2242 /* Disable MagicPacket */
2243 ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
2245 ret = ravb_close(ndev);
2249 /* Restore clock usage count */
2250 clk_disable(priv->clk);
2252 return disable_irq_wake(priv->emac_irq);
2255 static int __maybe_unused ravb_suspend(struct device *dev)
2257 struct net_device *ndev = dev_get_drvdata(dev);
2258 struct ravb_private *priv = netdev_priv(ndev);
2261 if (!netif_running(ndev))
2264 netif_device_detach(ndev);
2266 if (priv->wol_enabled)
2267 ret = ravb_wol_setup(ndev);
2269 ret = ravb_close(ndev);
2274 static int __maybe_unused ravb_resume(struct device *dev)
2276 struct net_device *ndev = dev_get_drvdata(dev);
2277 struct ravb_private *priv = netdev_priv(ndev);
2280 if (priv->wol_enabled) {
2281 /* Reduce the usecount of the clock to zero and then
2282 * restore it to its original value. This is done to force
2283 * the clock to be re-enabled which is a workaround
2284 * for renesas-cpg-mssr driver which do not enable clocks
2285 * when resuming from PSCI suspend/resume.
2287 * Without this workaround the driver fails to communicate
2288 * with the hardware if WoL was enabled when the system
2289 * entered PSCI suspend. This is due to that if WoL is enabled
2290 * we explicitly keep the clock from being turned off when
2291 * suspending, but in PSCI sleep power is cut so the clock
2292 * is disabled anyhow, the clock driver is not aware of this
2293 * so the clock is not turned back on when resuming.
2295 * TODO: once the renesas-cpg-mssr suspend/resume is working
2296 * this clock dance should be removed.
2298 clk_disable(priv->clk);
2299 clk_disable(priv->clk);
2300 clk_enable(priv->clk);
2301 clk_enable(priv->clk);
2303 /* Set reset mode to rearm the WoL logic */
2304 ravb_write(ndev, CCC_OPC_RESET, CCC);
2307 /* All register have been reset to default values.
2308 * Restore all registers which where setup at probe time and
2309 * reopen device if it was running before system suspended.
2312 /* Set AVB config mode */
2313 ravb_set_config_mode(ndev);
2316 ret = ravb_set_gti(ndev);
2320 /* Request GTI loading */
2321 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2323 if (priv->chip_id != RCAR_GEN2)
2324 ravb_set_delay_mode(ndev);
2326 /* Restore descriptor base address table */
2327 ravb_write(ndev, priv->desc_bat_dma, DBAT);
2329 if (netif_running(ndev)) {
2330 if (priv->wol_enabled) {
2331 ret = ravb_wol_restore(ndev);
2335 ret = ravb_open(ndev);
2338 netif_device_attach(ndev);
2344 static int __maybe_unused ravb_runtime_nop(struct device *dev)
2346 /* Runtime PM callback shared between ->runtime_suspend()
2347 * and ->runtime_resume(). Simply returns success.
2349 * This driver re-initializes all registers after
2350 * pm_runtime_get_sync() anyway so there is no need
2351 * to save and restore registers here.
2356 static const struct dev_pm_ops ravb_dev_pm_ops = {
2357 SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
2358 SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
2361 static struct platform_driver ravb_driver = {
2362 .probe = ravb_probe,
2363 .remove = ravb_remove,
2366 .pm = &ravb_dev_pm_ops,
2367 .of_match_table = ravb_match_table,
2371 module_platform_driver(ravb_driver);
2373 MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
2374 MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
2375 MODULE_LICENSE("GPL v2");