1 /* SuperH Ethernet device driver
3 * Copyright (C) 2014 Renesas Electronics Corporation
4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2014 Renesas Solutions Corp.
6 * Copyright (C) 2013-2014 Cogent Embedded, Inc.
7 * Copyright (C) 2014 Codethink Limited
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/etherdevice.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/mdio-bitbang.h>
31 #include <linux/netdevice.h>
33 #include <linux/of_device.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_net.h>
36 #include <linux/phy.h>
37 #include <linux/cache.h>
39 #include <linux/pm_runtime.h>
40 #include <linux/slab.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/clk.h>
44 #include <linux/sh_eth.h>
45 #include <linux/of_mdio.h>
49 #define SH_ETH_DEF_MSG_ENABLE \
55 #define SH_ETH_OFFSET_INVALID ((u16)~0)
57 #define SH_ETH_OFFSET_DEFAULTS \
58 [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
60 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
61 SH_ETH_OFFSET_DEFAULTS,
116 [TSU_CTRST] = 0x0004,
117 [TSU_FWEN0] = 0x0010,
118 [TSU_FWEN1] = 0x0014,
120 [TSU_BSYSL0] = 0x0020,
121 [TSU_BSYSL1] = 0x0024,
122 [TSU_PRISL0] = 0x0028,
123 [TSU_PRISL1] = 0x002c,
124 [TSU_FWSL0] = 0x0030,
125 [TSU_FWSL1] = 0x0034,
126 [TSU_FWSLC] = 0x0038,
127 [TSU_QTAG0] = 0x0040,
128 [TSU_QTAG1] = 0x0044,
130 [TSU_FWINMK] = 0x0054,
131 [TSU_ADQT0] = 0x0048,
132 [TSU_ADQT1] = 0x004c,
133 [TSU_VTAG0] = 0x0058,
134 [TSU_VTAG1] = 0x005c,
135 [TSU_ADSBSY] = 0x0060,
137 [TSU_POST1] = 0x0070,
138 [TSU_POST2] = 0x0074,
139 [TSU_POST3] = 0x0078,
140 [TSU_POST4] = 0x007c,
141 [TSU_ADRH0] = 0x0100,
157 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
158 SH_ETH_OFFSET_DEFAULTS,
203 [TSU_CTRST] = 0x0004,
204 [TSU_VTAG0] = 0x0058,
205 [TSU_ADSBSY] = 0x0060,
207 [TSU_ADRH0] = 0x0100,
215 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
216 SH_ETH_OFFSET_DEFAULTS,
263 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
264 SH_ETH_OFFSET_DEFAULTS,
317 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
318 SH_ETH_OFFSET_DEFAULTS,
366 [TSU_CTRST] = 0x0004,
367 [TSU_FWEN0] = 0x0010,
368 [TSU_FWEN1] = 0x0014,
370 [TSU_BSYSL0] = 0x0020,
371 [TSU_BSYSL1] = 0x0024,
372 [TSU_PRISL0] = 0x0028,
373 [TSU_PRISL1] = 0x002c,
374 [TSU_FWSL0] = 0x0030,
375 [TSU_FWSL1] = 0x0034,
376 [TSU_FWSLC] = 0x0038,
377 [TSU_QTAGM0] = 0x0040,
378 [TSU_QTAGM1] = 0x0044,
379 [TSU_ADQT0] = 0x0048,
380 [TSU_ADQT1] = 0x004c,
382 [TSU_FWINMK] = 0x0054,
383 [TSU_ADSBSY] = 0x0060,
385 [TSU_POST1] = 0x0070,
386 [TSU_POST2] = 0x0074,
387 [TSU_POST3] = 0x0078,
388 [TSU_POST4] = 0x007c,
403 [TSU_ADRH0] = 0x0100,
406 static void sh_eth_rcv_snd_disable(struct net_device *ndev);
407 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
409 static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
411 struct sh_eth_private *mdp = netdev_priv(ndev);
412 u16 offset = mdp->reg_offset[enum_index];
414 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
417 iowrite32(data, mdp->addr + offset);
420 static u32 sh_eth_read(struct net_device *ndev, int enum_index)
422 struct sh_eth_private *mdp = netdev_priv(ndev);
423 u16 offset = mdp->reg_offset[enum_index];
425 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
428 return ioread32(mdp->addr + offset);
431 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
433 return mdp->reg_offset == sh_eth_offset_gigabit;
436 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
438 return mdp->reg_offset == sh_eth_offset_fast_rz;
441 static void sh_eth_select_mii(struct net_device *ndev)
444 struct sh_eth_private *mdp = netdev_priv(ndev);
446 switch (mdp->phy_interface) {
447 case PHY_INTERFACE_MODE_GMII:
450 case PHY_INTERFACE_MODE_MII:
453 case PHY_INTERFACE_MODE_RMII:
458 "PHY interface mode was not setup. Set to MII.\n");
463 sh_eth_write(ndev, value, RMII_MII);
466 static void sh_eth_set_duplex(struct net_device *ndev)
468 struct sh_eth_private *mdp = netdev_priv(ndev);
470 if (mdp->duplex) /* Full */
471 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
473 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
476 /* There is CPU dependent code */
477 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
479 struct sh_eth_private *mdp = netdev_priv(ndev);
481 switch (mdp->speed) {
482 case 10: /* 10BASE */
483 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
485 case 100:/* 100BASE */
486 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
494 static struct sh_eth_cpu_data r8a777x_data = {
495 .set_duplex = sh_eth_set_duplex,
496 .set_rate = sh_eth_set_rate_r8a777x,
498 .register_type = SH_ETH_REG_FAST_RCAR,
500 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
501 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
502 .eesipr_value = 0x01ff009f,
504 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
505 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
506 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
508 .fdr_value = 0x00000f0f,
517 static struct sh_eth_cpu_data r8a779x_data = {
518 .set_duplex = sh_eth_set_duplex,
519 .set_rate = sh_eth_set_rate_r8a777x,
521 .register_type = SH_ETH_REG_FAST_RCAR,
523 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
524 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
525 .eesipr_value = 0x01ff009f,
527 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
528 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
529 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
531 .fdr_value = 0x00000f0f,
533 .trscer_err_mask = DESC_I_RINT8,
542 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
544 struct sh_eth_private *mdp = netdev_priv(ndev);
546 switch (mdp->speed) {
547 case 10: /* 10BASE */
548 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
550 case 100:/* 100BASE */
551 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
559 static struct sh_eth_cpu_data sh7724_data = {
560 .set_duplex = sh_eth_set_duplex,
561 .set_rate = sh_eth_set_rate_sh7724,
563 .register_type = SH_ETH_REG_FAST_SH4,
565 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
566 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
567 .eesipr_value = 0x01ff009f,
569 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
570 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
571 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
579 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
582 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
584 struct sh_eth_private *mdp = netdev_priv(ndev);
586 switch (mdp->speed) {
587 case 10: /* 10BASE */
588 sh_eth_write(ndev, 0, RTRATE);
590 case 100:/* 100BASE */
591 sh_eth_write(ndev, 1, RTRATE);
599 static struct sh_eth_cpu_data sh7757_data = {
600 .set_duplex = sh_eth_set_duplex,
601 .set_rate = sh_eth_set_rate_sh7757,
603 .register_type = SH_ETH_REG_FAST_SH4,
605 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
607 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
608 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
609 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
612 .irq_flags = IRQF_SHARED,
619 .rpadir_value = 2 << 16,
623 #define SH_GIGA_ETH_BASE 0xfee00000UL
624 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
625 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
626 static void sh_eth_chip_reset_giga(struct net_device *ndev)
629 u32 mahr[2], malr[2];
631 /* save MAHR and MALR */
632 for (i = 0; i < 2; i++) {
633 malr[i] = ioread32((void *)GIGA_MALR(i));
634 mahr[i] = ioread32((void *)GIGA_MAHR(i));
638 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
641 /* restore MAHR and MALR */
642 for (i = 0; i < 2; i++) {
643 iowrite32(malr[i], (void *)GIGA_MALR(i));
644 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
648 static void sh_eth_set_rate_giga(struct net_device *ndev)
650 struct sh_eth_private *mdp = netdev_priv(ndev);
652 switch (mdp->speed) {
653 case 10: /* 10BASE */
654 sh_eth_write(ndev, 0x00000000, GECMR);
656 case 100:/* 100BASE */
657 sh_eth_write(ndev, 0x00000010, GECMR);
659 case 1000: /* 1000BASE */
660 sh_eth_write(ndev, 0x00000020, GECMR);
667 /* SH7757(GETHERC) */
668 static struct sh_eth_cpu_data sh7757_data_giga = {
669 .chip_reset = sh_eth_chip_reset_giga,
670 .set_duplex = sh_eth_set_duplex,
671 .set_rate = sh_eth_set_rate_giga,
673 .register_type = SH_ETH_REG_GIGABIT,
675 .ecsr_value = ECSR_ICD | ECSR_MPD,
676 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
677 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
679 .tx_check = EESR_TC1 | EESR_FTC,
680 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
681 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
683 .fdr_value = 0x0000072f,
685 .irq_flags = IRQF_SHARED,
692 .rpadir_value = 2 << 16,
698 static void sh_eth_chip_reset(struct net_device *ndev)
700 struct sh_eth_private *mdp = netdev_priv(ndev);
703 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
707 static void sh_eth_set_rate_gether(struct net_device *ndev)
709 struct sh_eth_private *mdp = netdev_priv(ndev);
711 switch (mdp->speed) {
712 case 10: /* 10BASE */
713 sh_eth_write(ndev, GECMR_10, GECMR);
715 case 100:/* 100BASE */
716 sh_eth_write(ndev, GECMR_100, GECMR);
718 case 1000: /* 1000BASE */
719 sh_eth_write(ndev, GECMR_1000, GECMR);
727 static struct sh_eth_cpu_data sh7734_data = {
728 .chip_reset = sh_eth_chip_reset,
729 .set_duplex = sh_eth_set_duplex,
730 .set_rate = sh_eth_set_rate_gether,
732 .register_type = SH_ETH_REG_GIGABIT,
734 .ecsr_value = ECSR_ICD | ECSR_MPD,
735 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
736 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
738 .tx_check = EESR_TC1 | EESR_FTC,
739 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
740 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
757 static struct sh_eth_cpu_data sh7763_data = {
758 .chip_reset = sh_eth_chip_reset,
759 .set_duplex = sh_eth_set_duplex,
760 .set_rate = sh_eth_set_rate_gether,
762 .register_type = SH_ETH_REG_GIGABIT,
764 .ecsr_value = ECSR_ICD | ECSR_MPD,
765 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
766 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
768 .tx_check = EESR_TC1 | EESR_FTC,
769 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
770 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
781 .irq_flags = IRQF_SHARED,
784 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
786 struct sh_eth_private *mdp = netdev_priv(ndev);
789 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
792 sh_eth_select_mii(ndev);
796 static struct sh_eth_cpu_data r8a7740_data = {
797 .chip_reset = sh_eth_chip_reset_r8a7740,
798 .set_duplex = sh_eth_set_duplex,
799 .set_rate = sh_eth_set_rate_gether,
801 .register_type = SH_ETH_REG_GIGABIT,
803 .ecsr_value = ECSR_ICD | ECSR_MPD,
804 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
805 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
807 .tx_check = EESR_TC1 | EESR_FTC,
808 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
809 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
811 .fdr_value = 0x0000070f,
819 .rpadir_value = 2 << 16,
829 static struct sh_eth_cpu_data r7s72100_data = {
830 .chip_reset = sh_eth_chip_reset,
831 .set_duplex = sh_eth_set_duplex,
833 .register_type = SH_ETH_REG_FAST_RZ,
835 .ecsr_value = ECSR_ICD,
836 .ecsipr_value = ECSIPR_ICDIP,
837 .eesipr_value = 0xe77f009f,
839 .tx_check = EESR_TC1 | EESR_FTC,
840 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
841 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
843 .fdr_value = 0x0000070f,
851 .rpadir_value = 2 << 16,
859 static struct sh_eth_cpu_data sh7619_data = {
860 .register_type = SH_ETH_REG_FAST_SH3_SH2,
862 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
870 static struct sh_eth_cpu_data sh771x_data = {
871 .register_type = SH_ETH_REG_FAST_SH3_SH2,
873 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
877 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
880 cd->ecsr_value = DEFAULT_ECSR_INIT;
882 if (!cd->ecsipr_value)
883 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
885 if (!cd->fcftr_value)
886 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
887 DEFAULT_FIFO_F_D_RFD;
890 cd->fdr_value = DEFAULT_FDR_INIT;
893 cd->tx_check = DEFAULT_TX_CHECK;
895 if (!cd->eesr_err_check)
896 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
898 if (!cd->trscer_err_mask)
899 cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
902 static int sh_eth_check_reset(struct net_device *ndev)
908 if (!(sh_eth_read(ndev, EDMR) & 0x3))
914 netdev_err(ndev, "Device reset failed\n");
920 static int sh_eth_reset(struct net_device *ndev)
922 struct sh_eth_private *mdp = netdev_priv(ndev);
925 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
926 sh_eth_write(ndev, EDSR_ENALL, EDSR);
927 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
930 ret = sh_eth_check_reset(ndev);
935 sh_eth_write(ndev, 0x0, TDLAR);
936 sh_eth_write(ndev, 0x0, TDFAR);
937 sh_eth_write(ndev, 0x0, TDFXR);
938 sh_eth_write(ndev, 0x0, TDFFR);
939 sh_eth_write(ndev, 0x0, RDLAR);
940 sh_eth_write(ndev, 0x0, RDFAR);
941 sh_eth_write(ndev, 0x0, RDFXR);
942 sh_eth_write(ndev, 0x0, RDFFR);
944 /* Reset HW CRC register */
946 sh_eth_write(ndev, 0x0, CSMR);
948 /* Select MII mode */
949 if (mdp->cd->select_mii)
950 sh_eth_select_mii(ndev);
952 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
955 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
962 static void sh_eth_set_receive_align(struct sk_buff *skb)
964 uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
967 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
971 /* CPU <-> EDMAC endian convert */
972 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
974 switch (mdp->edmac_endian) {
975 case EDMAC_LITTLE_ENDIAN:
976 return cpu_to_le32(x);
977 case EDMAC_BIG_ENDIAN:
978 return cpu_to_be32(x);
983 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
985 switch (mdp->edmac_endian) {
986 case EDMAC_LITTLE_ENDIAN:
987 return le32_to_cpu(x);
988 case EDMAC_BIG_ENDIAN:
989 return be32_to_cpu(x);
994 /* Program the hardware MAC address from dev->dev_addr. */
995 static void update_mac_address(struct net_device *ndev)
998 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
999 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
1001 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
1004 /* Get MAC address from SuperH MAC address register
1006 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
1007 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
1008 * When you want use this device, you must set MAC address in bootloader.
1011 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
1013 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
1014 memcpy(ndev->dev_addr, mac, ETH_ALEN);
1016 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
1017 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
1018 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
1019 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
1020 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
1021 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
1025 static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
1027 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
1028 return EDTRR_TRNS_GETHER;
1030 return EDTRR_TRNS_ETHER;
1034 void (*set_gate)(void *addr);
1035 struct mdiobb_ctrl ctrl;
1037 u32 mmd_msk;/* MMD */
1044 static void bb_set(void *addr, u32 msk)
1046 iowrite32(ioread32(addr) | msk, addr);
1050 static void bb_clr(void *addr, u32 msk)
1052 iowrite32((ioread32(addr) & ~msk), addr);
1056 static int bb_read(void *addr, u32 msk)
1058 return (ioread32(addr) & msk) != 0;
1061 /* Data I/O pin control */
1062 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1064 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1066 if (bitbang->set_gate)
1067 bitbang->set_gate(bitbang->addr);
1070 bb_set(bitbang->addr, bitbang->mmd_msk);
1072 bb_clr(bitbang->addr, bitbang->mmd_msk);
1076 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1078 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1080 if (bitbang->set_gate)
1081 bitbang->set_gate(bitbang->addr);
1084 bb_set(bitbang->addr, bitbang->mdo_msk);
1086 bb_clr(bitbang->addr, bitbang->mdo_msk);
1090 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1092 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1094 if (bitbang->set_gate)
1095 bitbang->set_gate(bitbang->addr);
1097 return bb_read(bitbang->addr, bitbang->mdi_msk);
1100 /* MDC pin control */
1101 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1103 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1105 if (bitbang->set_gate)
1106 bitbang->set_gate(bitbang->addr);
1109 bb_set(bitbang->addr, bitbang->mdc_msk);
1111 bb_clr(bitbang->addr, bitbang->mdc_msk);
1114 /* mdio bus control struct */
1115 static struct mdiobb_ops bb_ops = {
1116 .owner = THIS_MODULE,
1117 .set_mdc = sh_mdc_ctrl,
1118 .set_mdio_dir = sh_mmd_ctrl,
1119 .set_mdio_data = sh_set_mdio,
1120 .get_mdio_data = sh_get_mdio,
1123 /* free skb and descriptor buffer */
1124 static void sh_eth_ring_free(struct net_device *ndev)
1126 struct sh_eth_private *mdp = netdev_priv(ndev);
1129 /* Free Rx skb ringbuffer */
1130 if (mdp->rx_skbuff) {
1131 for (i = 0; i < mdp->num_rx_ring; i++)
1132 dev_kfree_skb(mdp->rx_skbuff[i]);
1134 kfree(mdp->rx_skbuff);
1135 mdp->rx_skbuff = NULL;
1137 /* Free Tx skb ringbuffer */
1138 if (mdp->tx_skbuff) {
1139 for (i = 0; i < mdp->num_tx_ring; i++)
1140 dev_kfree_skb(mdp->tx_skbuff[i]);
1142 kfree(mdp->tx_skbuff);
1143 mdp->tx_skbuff = NULL;
1146 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1147 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1149 mdp->rx_ring = NULL;
1153 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1154 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1156 mdp->tx_ring = NULL;
1160 /* format skb and descriptor buffer */
1161 static void sh_eth_ring_format(struct net_device *ndev)
1163 struct sh_eth_private *mdp = netdev_priv(ndev);
1165 struct sk_buff *skb;
1166 struct sh_eth_rxdesc *rxdesc = NULL;
1167 struct sh_eth_txdesc *txdesc = NULL;
1168 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1169 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1170 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1171 dma_addr_t dma_addr;
1179 memset(mdp->rx_ring, 0, rx_ringsize);
1181 /* build Rx ring buffer */
1182 for (i = 0; i < mdp->num_rx_ring; i++) {
1184 mdp->rx_skbuff[i] = NULL;
1185 skb = netdev_alloc_skb(ndev, skbuff_size);
1188 sh_eth_set_receive_align(skb);
1190 /* The size of the buffer is a multiple of 32 bytes. */
1191 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1192 dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
1194 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1198 mdp->rx_skbuff[i] = skb;
1201 rxdesc = &mdp->rx_ring[i];
1202 rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
1203 rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
1204 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1206 /* Rx descriptor address set */
1208 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1209 if (sh_eth_is_gether(mdp) ||
1210 sh_eth_is_rz_fast_ether(mdp))
1211 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1215 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1217 /* Mark the last entry as wrapping the ring. */
1219 rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE);
1221 memset(mdp->tx_ring, 0, tx_ringsize);
1223 /* build Tx ring buffer */
1224 for (i = 0; i < mdp->num_tx_ring; i++) {
1225 mdp->tx_skbuff[i] = NULL;
1226 txdesc = &mdp->tx_ring[i];
1227 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1228 txdesc->len = cpu_to_edmac(mdp, 0);
1230 /* Tx descriptor address set */
1231 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1232 if (sh_eth_is_gether(mdp) ||
1233 sh_eth_is_rz_fast_ether(mdp))
1234 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1238 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1241 /* Get skb and descriptor buffer */
1242 static int sh_eth_ring_init(struct net_device *ndev)
1244 struct sh_eth_private *mdp = netdev_priv(ndev);
1245 int rx_ringsize, tx_ringsize;
1247 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1248 * card needs room to do 8 byte alignment, +2 so we can reserve
1249 * the first 2 bytes, and +16 gets room for the status word from the
1252 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1253 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1254 if (mdp->cd->rpadir)
1255 mdp->rx_buf_sz += NET_IP_ALIGN;
1257 /* Allocate RX and TX skb rings */
1258 mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
1260 if (!mdp->rx_skbuff)
1263 mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
1265 if (!mdp->tx_skbuff)
1268 /* Allocate all Rx descriptors. */
1269 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1270 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1277 /* Allocate all Tx descriptors. */
1278 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1279 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1286 /* Free Rx and Tx skb ring buffer and DMA buffer */
1287 sh_eth_ring_free(ndev);
1292 static int sh_eth_dev_init(struct net_device *ndev, bool start)
1295 struct sh_eth_private *mdp = netdev_priv(ndev);
1299 ret = sh_eth_reset(ndev);
1303 if (mdp->cd->rmiimode)
1304 sh_eth_write(ndev, 0x1, RMIIMODE);
1306 /* Descriptor format */
1307 sh_eth_ring_format(ndev);
1308 if (mdp->cd->rpadir)
1309 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1311 /* all sh_eth int mask */
1312 sh_eth_write(ndev, 0, EESIPR);
1314 #if defined(__LITTLE_ENDIAN)
1315 if (mdp->cd->hw_swap)
1316 sh_eth_write(ndev, EDMR_EL, EDMR);
1319 sh_eth_write(ndev, 0, EDMR);
1322 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1323 sh_eth_write(ndev, 0, TFTR);
1325 /* Frame recv control (enable multiple-packets per rx irq) */
1326 sh_eth_write(ndev, RMCR_RNC, RMCR);
1328 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1331 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
1333 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1335 if (!mdp->cd->no_trimd)
1336 sh_eth_write(ndev, 0, TRIMD);
1338 /* Recv frame limit set register */
1339 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1342 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1344 mdp->irq_enabled = true;
1345 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1348 /* PAUSE Prohibition */
1349 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1350 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1352 sh_eth_write(ndev, val, ECMR);
1354 if (mdp->cd->set_rate)
1355 mdp->cd->set_rate(ndev);
1357 /* E-MAC Status Register clear */
1358 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1360 /* E-MAC Interrupt Enable register */
1362 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1364 /* Set MAC address */
1365 update_mac_address(ndev);
1369 sh_eth_write(ndev, APR_AP, APR);
1371 sh_eth_write(ndev, MPR_MP, MPR);
1372 if (mdp->cd->tpauser)
1373 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1376 /* Setting the Rx mode will start the Rx process. */
1377 sh_eth_write(ndev, EDRRR_R, EDRRR);
1379 netif_start_queue(ndev);
1385 static void sh_eth_dev_exit(struct net_device *ndev)
1387 struct sh_eth_private *mdp = netdev_priv(ndev);
1390 /* Deactivate all TX descriptors, so DMA should stop at next
1391 * packet boundary if it's currently running
1393 for (i = 0; i < mdp->num_tx_ring; i++)
1394 mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
1396 /* Disable TX FIFO egress to MAC */
1397 sh_eth_rcv_snd_disable(ndev);
1399 /* Stop RX DMA at next packet boundary */
1400 sh_eth_write(ndev, 0, EDRRR);
1402 /* Aside from TX DMA, we can't tell when the hardware is
1403 * really stopped, so we need to reset to make sure.
1404 * Before doing that, wait for long enough to *probably*
1405 * finish transmitting the last packet and poll stats.
1407 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1408 sh_eth_get_stats(ndev);
1411 /* Set the RMII mode again if required */
1412 if (mdp->cd->rmiimode)
1413 sh_eth_write(ndev, 0x1, RMIIMODE);
1415 /* Set MAC address again */
1416 update_mac_address(ndev);
1419 /* free Tx skb function */
1420 static int sh_eth_txfree(struct net_device *ndev)
1422 struct sh_eth_private *mdp = netdev_priv(ndev);
1423 struct sh_eth_txdesc *txdesc;
1427 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1428 entry = mdp->dirty_tx % mdp->num_tx_ring;
1429 txdesc = &mdp->tx_ring[entry];
1430 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1432 /* TACT bit must be checked before all the following reads */
1434 netif_info(mdp, tx_done, ndev,
1435 "tx entry %d status 0x%08x\n",
1436 entry, edmac_to_cpu(mdp, txdesc->status));
1437 /* Free the original skb. */
1438 if (mdp->tx_skbuff[entry]) {
1439 dma_unmap_single(&ndev->dev,
1440 edmac_to_cpu(mdp, txdesc->addr),
1441 edmac_to_cpu(mdp, txdesc->len) >> 16,
1443 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1444 mdp->tx_skbuff[entry] = NULL;
1447 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1448 if (entry >= mdp->num_tx_ring - 1)
1449 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1451 ndev->stats.tx_packets++;
1452 ndev->stats.tx_bytes += edmac_to_cpu(mdp, txdesc->len) >> 16;
1457 /* Packet receive function */
1458 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1460 struct sh_eth_private *mdp = netdev_priv(ndev);
1461 struct sh_eth_rxdesc *rxdesc;
1463 int entry = mdp->cur_rx % mdp->num_rx_ring;
1464 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1466 struct sk_buff *skb;
1469 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1470 dma_addr_t dma_addr;
1473 boguscnt = min(boguscnt, *quota);
1475 rxdesc = &mdp->rx_ring[entry];
1476 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1477 /* RACT bit must be checked before all the following reads */
1479 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1480 pkt_len = edmac_to_cpu(mdp, rxdesc->len) & RD_RFL;
1485 netif_info(mdp, rx_status, ndev,
1486 "rx entry %d status 0x%08x len %d\n",
1487 entry, desc_status, pkt_len);
1489 if (!(desc_status & RDFEND))
1490 ndev->stats.rx_length_errors++;
1492 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1493 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1494 * bit 0. However, in case of the R8A7740 and R7S72100
1495 * the RFS bits are from bit 25 to bit 16. So, the
1496 * driver needs right shifting by 16.
1498 if (mdp->cd->shift_rd0)
1501 skb = mdp->rx_skbuff[entry];
1502 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1503 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1504 ndev->stats.rx_errors++;
1505 if (desc_status & RD_RFS1)
1506 ndev->stats.rx_crc_errors++;
1507 if (desc_status & RD_RFS2)
1508 ndev->stats.rx_frame_errors++;
1509 if (desc_status & RD_RFS3)
1510 ndev->stats.rx_length_errors++;
1511 if (desc_status & RD_RFS4)
1512 ndev->stats.rx_length_errors++;
1513 if (desc_status & RD_RFS6)
1514 ndev->stats.rx_missed_errors++;
1515 if (desc_status & RD_RFS10)
1516 ndev->stats.rx_over_errors++;
1518 dma_addr = edmac_to_cpu(mdp, rxdesc->addr);
1519 if (!mdp->cd->hw_swap)
1521 phys_to_virt(ALIGN(dma_addr, 4)),
1523 mdp->rx_skbuff[entry] = NULL;
1524 if (mdp->cd->rpadir)
1525 skb_reserve(skb, NET_IP_ALIGN);
1526 dma_unmap_single(&ndev->dev, dma_addr,
1527 ALIGN(mdp->rx_buf_sz, 32),
1529 skb_put(skb, pkt_len);
1530 skb->protocol = eth_type_trans(skb, ndev);
1531 netif_receive_skb(skb);
1532 ndev->stats.rx_packets++;
1533 ndev->stats.rx_bytes += pkt_len;
1534 if (desc_status & RD_RFS8)
1535 ndev->stats.multicast++;
1537 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1538 rxdesc = &mdp->rx_ring[entry];
1541 /* Refill the Rx ring buffers. */
1542 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1543 entry = mdp->dirty_rx % mdp->num_rx_ring;
1544 rxdesc = &mdp->rx_ring[entry];
1545 /* The size of the buffer is 32 byte boundary. */
1546 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1547 rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
1549 if (mdp->rx_skbuff[entry] == NULL) {
1550 skb = netdev_alloc_skb(ndev, skbuff_size);
1552 break; /* Better luck next round. */
1553 sh_eth_set_receive_align(skb);
1554 dma_addr = dma_map_single(&ndev->dev, skb->data,
1555 buf_len, DMA_FROM_DEVICE);
1556 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1560 mdp->rx_skbuff[entry] = skb;
1562 skb_checksum_none_assert(skb);
1563 rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
1565 dma_wmb(); /* RACT bit must be set after all the above writes */
1566 if (entry >= mdp->num_rx_ring - 1)
1568 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDLE);
1571 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1574 /* Restart Rx engine if stopped. */
1575 /* If we don't need to check status, don't. -KDU */
1576 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1577 /* fix the values for the next receiving if RDE is set */
1578 if (intr_status & EESR_RDE &&
1579 mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) {
1580 u32 count = (sh_eth_read(ndev, RDFAR) -
1581 sh_eth_read(ndev, RDLAR)) >> 4;
1583 mdp->cur_rx = count;
1584 mdp->dirty_rx = count;
1586 sh_eth_write(ndev, EDRRR_R, EDRRR);
1589 *quota -= limit - boguscnt - 1;
1594 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1596 /* disable tx and rx */
1597 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1598 ~(ECMR_RE | ECMR_TE), ECMR);
1601 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1603 /* enable tx and rx */
1604 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1605 (ECMR_RE | ECMR_TE), ECMR);
1608 /* error control function */
1609 static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1611 struct sh_eth_private *mdp = netdev_priv(ndev);
1616 if (intr_status & EESR_ECI) {
1617 felic_stat = sh_eth_read(ndev, ECSR);
1618 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
1619 if (felic_stat & ECSR_ICD)
1620 ndev->stats.tx_carrier_errors++;
1621 if (felic_stat & ECSR_LCHNG) {
1623 if (mdp->cd->no_psr || mdp->no_ether_link) {
1626 link_stat = (sh_eth_read(ndev, PSR));
1627 if (mdp->ether_link_active_low)
1628 link_stat = ~link_stat;
1630 if (!(link_stat & PHY_ST_LINK)) {
1631 sh_eth_rcv_snd_disable(ndev);
1634 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1635 ~DMAC_M_ECI, EESIPR);
1637 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1639 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1640 DMAC_M_ECI, EESIPR);
1641 /* enable tx and rx */
1642 sh_eth_rcv_snd_enable(ndev);
1648 if (intr_status & EESR_TWB) {
1649 /* Unused write back interrupt */
1650 if (intr_status & EESR_TABT) { /* Transmit Abort int */
1651 ndev->stats.tx_aborted_errors++;
1652 netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1656 if (intr_status & EESR_RABT) {
1657 /* Receive Abort int */
1658 if (intr_status & EESR_RFRMER) {
1659 /* Receive Frame Overflow int */
1660 ndev->stats.rx_frame_errors++;
1664 if (intr_status & EESR_TDE) {
1665 /* Transmit Descriptor Empty int */
1666 ndev->stats.tx_fifo_errors++;
1667 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1670 if (intr_status & EESR_TFE) {
1671 /* FIFO under flow */
1672 ndev->stats.tx_fifo_errors++;
1673 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1676 if (intr_status & EESR_RDE) {
1677 /* Receive Descriptor Empty int */
1678 ndev->stats.rx_over_errors++;
1681 if (intr_status & EESR_RFE) {
1682 /* Receive FIFO Overflow int */
1683 ndev->stats.rx_fifo_errors++;
1686 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1688 ndev->stats.tx_fifo_errors++;
1689 netif_err(mdp, tx_err, ndev, "Address Error\n");
1692 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1693 if (mdp->cd->no_ade)
1695 if (intr_status & mask) {
1697 u32 edtrr = sh_eth_read(ndev, EDTRR);
1700 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1701 intr_status, mdp->cur_tx, mdp->dirty_tx,
1702 (u32)ndev->state, edtrr);
1703 /* dirty buffer free */
1704 sh_eth_txfree(ndev);
1707 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1709 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1712 netif_wake_queue(ndev);
1716 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1718 struct net_device *ndev = netdev;
1719 struct sh_eth_private *mdp = netdev_priv(ndev);
1720 struct sh_eth_cpu_data *cd = mdp->cd;
1721 irqreturn_t ret = IRQ_NONE;
1722 u32 intr_status, intr_enable;
1724 spin_lock(&mdp->lock);
1726 /* Get interrupt status */
1727 intr_status = sh_eth_read(ndev, EESR);
1728 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1729 * enabled since it's the one that comes thru regardless of the mask,
1730 * and we need to fully handle it in sh_eth_error() in order to quench
1731 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1733 intr_enable = sh_eth_read(ndev, EESIPR);
1734 intr_status &= intr_enable | DMAC_M_ECI;
1735 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1740 if (!likely(mdp->irq_enabled)) {
1741 sh_eth_write(ndev, 0, EESIPR);
1745 if (intr_status & EESR_RX_CHECK) {
1746 if (napi_schedule_prep(&mdp->napi)) {
1747 /* Mask Rx interrupts */
1748 sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1750 __napi_schedule(&mdp->napi);
1753 "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1754 intr_status, intr_enable);
1759 if (intr_status & cd->tx_check) {
1760 /* Clear Tx interrupts */
1761 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1763 sh_eth_txfree(ndev);
1764 netif_wake_queue(ndev);
1767 if (intr_status & cd->eesr_err_check) {
1768 /* Clear error interrupts */
1769 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1771 sh_eth_error(ndev, intr_status);
1775 spin_unlock(&mdp->lock);
1780 static int sh_eth_poll(struct napi_struct *napi, int budget)
1782 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1784 struct net_device *ndev = napi->dev;
1789 intr_status = sh_eth_read(ndev, EESR);
1790 if (!(intr_status & EESR_RX_CHECK))
1792 /* Clear Rx interrupts */
1793 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1795 if (sh_eth_rx(ndev, intr_status, "a))
1799 napi_complete(napi);
1801 /* Reenable Rx interrupts */
1802 if (mdp->irq_enabled)
1803 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1805 return budget - quota;
1808 /* PHY state control function */
1809 static void sh_eth_adjust_link(struct net_device *ndev)
1811 struct sh_eth_private *mdp = netdev_priv(ndev);
1812 struct phy_device *phydev = mdp->phydev;
1816 if (phydev->duplex != mdp->duplex) {
1818 mdp->duplex = phydev->duplex;
1819 if (mdp->cd->set_duplex)
1820 mdp->cd->set_duplex(ndev);
1823 if (phydev->speed != mdp->speed) {
1825 mdp->speed = phydev->speed;
1826 if (mdp->cd->set_rate)
1827 mdp->cd->set_rate(ndev);
1831 sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1834 mdp->link = phydev->link;
1835 if (mdp->cd->no_psr || mdp->no_ether_link)
1836 sh_eth_rcv_snd_enable(ndev);
1838 } else if (mdp->link) {
1843 if (mdp->cd->no_psr || mdp->no_ether_link)
1844 sh_eth_rcv_snd_disable(ndev);
1847 if (new_state && netif_msg_link(mdp))
1848 phy_print_status(phydev);
1851 /* PHY init function */
1852 static int sh_eth_phy_init(struct net_device *ndev)
1854 struct device_node *np = ndev->dev.parent->of_node;
1855 struct sh_eth_private *mdp = netdev_priv(ndev);
1856 struct phy_device *phydev = NULL;
1862 /* Try connect to PHY */
1864 struct device_node *pn;
1866 pn = of_parse_phandle(np, "phy-handle", 0);
1867 phydev = of_phy_connect(ndev, pn,
1868 sh_eth_adjust_link, 0,
1869 mdp->phy_interface);
1872 phydev = ERR_PTR(-ENOENT);
1874 char phy_id[MII_BUS_ID_SIZE + 3];
1876 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1877 mdp->mii_bus->id, mdp->phy_id);
1879 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1880 mdp->phy_interface);
1883 if (IS_ERR(phydev)) {
1884 netdev_err(ndev, "failed to connect PHY\n");
1885 return PTR_ERR(phydev);
1888 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1889 phydev->addr, phydev->irq, phydev->drv->name);
1891 mdp->phydev = phydev;
1896 /* PHY control start function */
1897 static int sh_eth_phy_start(struct net_device *ndev)
1899 struct sh_eth_private *mdp = netdev_priv(ndev);
1902 ret = sh_eth_phy_init(ndev);
1906 phy_start(mdp->phydev);
1911 static int sh_eth_get_settings(struct net_device *ndev,
1912 struct ethtool_cmd *ecmd)
1914 struct sh_eth_private *mdp = netdev_priv(ndev);
1915 unsigned long flags;
1921 spin_lock_irqsave(&mdp->lock, flags);
1922 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1923 spin_unlock_irqrestore(&mdp->lock, flags);
1928 static int sh_eth_set_settings(struct net_device *ndev,
1929 struct ethtool_cmd *ecmd)
1931 struct sh_eth_private *mdp = netdev_priv(ndev);
1932 unsigned long flags;
1938 spin_lock_irqsave(&mdp->lock, flags);
1940 /* disable tx and rx */
1941 sh_eth_rcv_snd_disable(ndev);
1943 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1947 if (ecmd->duplex == DUPLEX_FULL)
1952 if (mdp->cd->set_duplex)
1953 mdp->cd->set_duplex(ndev);
1958 /* enable tx and rx */
1959 sh_eth_rcv_snd_enable(ndev);
1961 spin_unlock_irqrestore(&mdp->lock, flags);
1966 /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
1967 * version must be bumped as well. Just adding registers up to that
1968 * limit is fine, as long as the existing register indices don't
1971 #define SH_ETH_REG_DUMP_VERSION 1
1972 #define SH_ETH_REG_DUMP_MAX_REGS 256
1974 static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
1976 struct sh_eth_private *mdp = netdev_priv(ndev);
1977 struct sh_eth_cpu_data *cd = mdp->cd;
1981 BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
1983 /* Dump starts with a bitmap that tells ethtool which
1984 * registers are defined for this chip.
1986 len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
1994 /* Add a register to the dump, if it has a defined offset.
1995 * This automatically skips most undefined registers, but for
1996 * some it is also necessary to check a capability flag in
1997 * struct sh_eth_cpu_data.
1999 #define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
2000 #define add_reg_from(reg, read_expr) do { \
2001 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \
2003 mark_reg_valid(reg); \
2004 *buf++ = read_expr; \
2009 #define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
2010 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
2082 add_tsu_reg(TSU_CTRST);
2083 add_tsu_reg(TSU_FWEN0);
2084 add_tsu_reg(TSU_FWEN1);
2085 add_tsu_reg(TSU_FCM);
2086 add_tsu_reg(TSU_BSYSL0);
2087 add_tsu_reg(TSU_BSYSL1);
2088 add_tsu_reg(TSU_PRISL0);
2089 add_tsu_reg(TSU_PRISL1);
2090 add_tsu_reg(TSU_FWSL0);
2091 add_tsu_reg(TSU_FWSL1);
2092 add_tsu_reg(TSU_FWSLC);
2093 add_tsu_reg(TSU_QTAG0);
2094 add_tsu_reg(TSU_QTAG1);
2095 add_tsu_reg(TSU_QTAGM0);
2096 add_tsu_reg(TSU_QTAGM1);
2097 add_tsu_reg(TSU_FWSR);
2098 add_tsu_reg(TSU_FWINMK);
2099 add_tsu_reg(TSU_ADQT0);
2100 add_tsu_reg(TSU_ADQT1);
2101 add_tsu_reg(TSU_VTAG0);
2102 add_tsu_reg(TSU_VTAG1);
2103 add_tsu_reg(TSU_ADSBSY);
2104 add_tsu_reg(TSU_TEN);
2105 add_tsu_reg(TSU_POST1);
2106 add_tsu_reg(TSU_POST2);
2107 add_tsu_reg(TSU_POST3);
2108 add_tsu_reg(TSU_POST4);
2109 if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) {
2110 /* This is the start of a table, not just a single
2116 mark_reg_valid(TSU_ADRH0);
2117 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
2120 mdp->reg_offset[TSU_ADRH0] +
2123 len += SH_ETH_TSU_CAM_ENTRIES * 2;
2127 #undef mark_reg_valid
2135 static int sh_eth_get_regs_len(struct net_device *ndev)
2137 return __sh_eth_get_regs(ndev, NULL);
2140 static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2143 struct sh_eth_private *mdp = netdev_priv(ndev);
2145 regs->version = SH_ETH_REG_DUMP_VERSION;
2147 pm_runtime_get_sync(&mdp->pdev->dev);
2148 __sh_eth_get_regs(ndev, buf);
2149 pm_runtime_put_sync(&mdp->pdev->dev);
2152 static int sh_eth_nway_reset(struct net_device *ndev)
2154 struct sh_eth_private *mdp = netdev_priv(ndev);
2155 unsigned long flags;
2161 spin_lock_irqsave(&mdp->lock, flags);
2162 ret = phy_start_aneg(mdp->phydev);
2163 spin_unlock_irqrestore(&mdp->lock, flags);
2168 static u32 sh_eth_get_msglevel(struct net_device *ndev)
2170 struct sh_eth_private *mdp = netdev_priv(ndev);
2171 return mdp->msg_enable;
2174 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
2176 struct sh_eth_private *mdp = netdev_priv(ndev);
2177 mdp->msg_enable = value;
2180 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
2181 "rx_current", "tx_current",
2182 "rx_dirty", "tx_dirty",
2184 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
2186 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
2190 return SH_ETH_STATS_LEN;
2196 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
2197 struct ethtool_stats *stats, u64 *data)
2199 struct sh_eth_private *mdp = netdev_priv(ndev);
2202 /* device-specific stats */
2203 data[i++] = mdp->cur_rx;
2204 data[i++] = mdp->cur_tx;
2205 data[i++] = mdp->dirty_rx;
2206 data[i++] = mdp->dirty_tx;
2209 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
2211 switch (stringset) {
2213 memcpy(data, sh_eth_gstrings_stats,
2214 sizeof(sh_eth_gstrings_stats));
2219 static void sh_eth_get_ringparam(struct net_device *ndev,
2220 struct ethtool_ringparam *ring)
2222 struct sh_eth_private *mdp = netdev_priv(ndev);
2224 ring->rx_max_pending = RX_RING_MAX;
2225 ring->tx_max_pending = TX_RING_MAX;
2226 ring->rx_pending = mdp->num_rx_ring;
2227 ring->tx_pending = mdp->num_tx_ring;
2230 static int sh_eth_set_ringparam(struct net_device *ndev,
2231 struct ethtool_ringparam *ring)
2233 struct sh_eth_private *mdp = netdev_priv(ndev);
2236 if (ring->tx_pending > TX_RING_MAX ||
2237 ring->rx_pending > RX_RING_MAX ||
2238 ring->tx_pending < TX_RING_MIN ||
2239 ring->rx_pending < RX_RING_MIN)
2241 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2244 if (netif_running(ndev)) {
2245 netif_device_detach(ndev);
2246 netif_tx_disable(ndev);
2248 /* Serialise with the interrupt handler and NAPI, then
2249 * disable interrupts. We have to clear the
2250 * irq_enabled flag first to ensure that interrupts
2251 * won't be re-enabled.
2253 mdp->irq_enabled = false;
2254 synchronize_irq(ndev->irq);
2255 napi_synchronize(&mdp->napi);
2256 sh_eth_write(ndev, 0x0000, EESIPR);
2258 sh_eth_dev_exit(ndev);
2260 /* Free all the skbuffs in the Rx queue and the DMA buffers. */
2261 sh_eth_ring_free(ndev);
2264 /* Set new parameters */
2265 mdp->num_rx_ring = ring->rx_pending;
2266 mdp->num_tx_ring = ring->tx_pending;
2268 if (netif_running(ndev)) {
2269 ret = sh_eth_ring_init(ndev);
2271 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2275 ret = sh_eth_dev_init(ndev, false);
2277 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2282 mdp->irq_enabled = true;
2283 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2284 /* Setting the Rx mode will start the Rx process. */
2285 sh_eth_write(ndev, EDRRR_R, EDRRR);
2286 netif_device_attach(ndev);
2292 static const struct ethtool_ops sh_eth_ethtool_ops = {
2293 .get_settings = sh_eth_get_settings,
2294 .set_settings = sh_eth_set_settings,
2295 .get_regs_len = sh_eth_get_regs_len,
2296 .get_regs = sh_eth_get_regs,
2297 .nway_reset = sh_eth_nway_reset,
2298 .get_msglevel = sh_eth_get_msglevel,
2299 .set_msglevel = sh_eth_set_msglevel,
2300 .get_link = ethtool_op_get_link,
2301 .get_strings = sh_eth_get_strings,
2302 .get_ethtool_stats = sh_eth_get_ethtool_stats,
2303 .get_sset_count = sh_eth_get_sset_count,
2304 .get_ringparam = sh_eth_get_ringparam,
2305 .set_ringparam = sh_eth_set_ringparam,
2308 /* network device open function */
2309 static int sh_eth_open(struct net_device *ndev)
2312 struct sh_eth_private *mdp = netdev_priv(ndev);
2314 pm_runtime_get_sync(&mdp->pdev->dev);
2316 napi_enable(&mdp->napi);
2318 ret = request_irq(ndev->irq, sh_eth_interrupt,
2319 mdp->cd->irq_flags, ndev->name, ndev);
2321 netdev_err(ndev, "Can not assign IRQ number\n");
2325 /* Descriptor set */
2326 ret = sh_eth_ring_init(ndev);
2331 ret = sh_eth_dev_init(ndev, true);
2335 /* PHY control start*/
2336 ret = sh_eth_phy_start(ndev);
2345 free_irq(ndev->irq, ndev);
2347 napi_disable(&mdp->napi);
2348 pm_runtime_put_sync(&mdp->pdev->dev);
2352 /* Timeout function */
2353 static void sh_eth_tx_timeout(struct net_device *ndev)
2355 struct sh_eth_private *mdp = netdev_priv(ndev);
2356 struct sh_eth_rxdesc *rxdesc;
2359 netif_stop_queue(ndev);
2361 netif_err(mdp, timer, ndev,
2362 "transmit timed out, status %8.8x, resetting...\n",
2363 sh_eth_read(ndev, EESR));
2365 /* tx_errors count up */
2366 ndev->stats.tx_errors++;
2368 /* Free all the skbuffs in the Rx queue. */
2369 for (i = 0; i < mdp->num_rx_ring; i++) {
2370 rxdesc = &mdp->rx_ring[i];
2371 rxdesc->status = cpu_to_edmac(mdp, 0);
2372 rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0);
2373 dev_kfree_skb(mdp->rx_skbuff[i]);
2374 mdp->rx_skbuff[i] = NULL;
2376 for (i = 0; i < mdp->num_tx_ring; i++) {
2377 dev_kfree_skb(mdp->tx_skbuff[i]);
2378 mdp->tx_skbuff[i] = NULL;
2382 sh_eth_dev_init(ndev, true);
2385 /* Packet transmit function */
2386 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2388 struct sh_eth_private *mdp = netdev_priv(ndev);
2389 struct sh_eth_txdesc *txdesc;
2390 dma_addr_t dma_addr;
2392 unsigned long flags;
2394 spin_lock_irqsave(&mdp->lock, flags);
2395 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2396 if (!sh_eth_txfree(ndev)) {
2397 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2398 netif_stop_queue(ndev);
2399 spin_unlock_irqrestore(&mdp->lock, flags);
2400 return NETDEV_TX_BUSY;
2403 spin_unlock_irqrestore(&mdp->lock, flags);
2405 if (skb_put_padto(skb, ETH_ZLEN))
2406 return NETDEV_TX_OK;
2408 entry = mdp->cur_tx % mdp->num_tx_ring;
2409 mdp->tx_skbuff[entry] = skb;
2410 txdesc = &mdp->tx_ring[entry];
2412 if (!mdp->cd->hw_swap)
2413 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2414 dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2416 if (dma_mapping_error(&ndev->dev, dma_addr)) {
2418 return NETDEV_TX_OK;
2420 txdesc->addr = cpu_to_edmac(mdp, dma_addr);
2421 txdesc->len = cpu_to_edmac(mdp, skb->len << 16);
2423 dma_wmb(); /* TACT bit must be set after all the above writes */
2424 if (entry >= mdp->num_tx_ring - 1)
2425 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2427 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
2429 wmb(); /* cur_tx must be incremented after TACT bit was set */
2432 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2433 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2435 return NETDEV_TX_OK;
2438 /* The statistics registers have write-clear behaviour, which means we
2439 * will lose any increment between the read and write. We mitigate
2440 * this by only clearing when we read a non-zero value, so we will
2441 * never falsely report a total of zero.
2444 sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
2446 u32 delta = sh_eth_read(ndev, reg);
2450 sh_eth_write(ndev, 0, reg);
2454 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2456 struct sh_eth_private *mdp = netdev_priv(ndev);
2458 if (sh_eth_is_rz_fast_ether(mdp))
2459 return &ndev->stats;
2461 if (!mdp->is_opened)
2462 return &ndev->stats;
2464 sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
2465 sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
2466 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
2468 if (sh_eth_is_gether(mdp)) {
2469 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2471 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2474 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2478 return &ndev->stats;
2481 /* device close function */
2482 static int sh_eth_close(struct net_device *ndev)
2484 struct sh_eth_private *mdp = netdev_priv(ndev);
2486 netif_stop_queue(ndev);
2488 /* Serialise with the interrupt handler and NAPI, then disable
2489 * interrupts. We have to clear the irq_enabled flag first to
2490 * ensure that interrupts won't be re-enabled.
2492 mdp->irq_enabled = false;
2493 synchronize_irq(ndev->irq);
2494 napi_disable(&mdp->napi);
2495 sh_eth_write(ndev, 0x0000, EESIPR);
2497 sh_eth_dev_exit(ndev);
2499 /* PHY Disconnect */
2501 phy_stop(mdp->phydev);
2502 phy_disconnect(mdp->phydev);
2506 free_irq(ndev->irq, ndev);
2508 /* Free all the skbuffs in the Rx queue and the DMA buffer. */
2509 sh_eth_ring_free(ndev);
2513 pm_runtime_put(&mdp->pdev->dev);
2518 /* ioctl to device function */
2519 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2521 struct sh_eth_private *mdp = netdev_priv(ndev);
2522 struct phy_device *phydev = mdp->phydev;
2524 if (!netif_running(ndev))
2530 return phy_mii_ioctl(phydev, rq, cmd);
2533 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2534 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2537 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2540 static u32 sh_eth_tsu_get_post_mask(int entry)
2542 return 0x0f << (28 - ((entry % 8) * 4));
2545 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2547 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2550 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2553 struct sh_eth_private *mdp = netdev_priv(ndev);
2557 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2558 tmp = ioread32(reg_offset);
2559 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2562 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2565 struct sh_eth_private *mdp = netdev_priv(ndev);
2566 u32 post_mask, ref_mask, tmp;
2569 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2570 post_mask = sh_eth_tsu_get_post_mask(entry);
2571 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2573 tmp = ioread32(reg_offset);
2574 iowrite32(tmp & ~post_mask, reg_offset);
2576 /* If other port enables, the function returns "true" */
2577 return tmp & ref_mask;
2580 static int sh_eth_tsu_busy(struct net_device *ndev)
2582 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2583 struct sh_eth_private *mdp = netdev_priv(ndev);
2585 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2589 netdev_err(ndev, "%s: timeout\n", __func__);
2597 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2602 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2603 iowrite32(val, reg);
2604 if (sh_eth_tsu_busy(ndev) < 0)
2607 val = addr[4] << 8 | addr[5];
2608 iowrite32(val, reg + 4);
2609 if (sh_eth_tsu_busy(ndev) < 0)
2615 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2619 val = ioread32(reg);
2620 addr[0] = (val >> 24) & 0xff;
2621 addr[1] = (val >> 16) & 0xff;
2622 addr[2] = (val >> 8) & 0xff;
2623 addr[3] = val & 0xff;
2624 val = ioread32(reg + 4);
2625 addr[4] = (val >> 8) & 0xff;
2626 addr[5] = val & 0xff;
2630 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2632 struct sh_eth_private *mdp = netdev_priv(ndev);
2633 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2635 u8 c_addr[ETH_ALEN];
2637 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2638 sh_eth_tsu_read_entry(reg_offset, c_addr);
2639 if (ether_addr_equal(addr, c_addr))
2646 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2651 memset(blank, 0, sizeof(blank));
2652 entry = sh_eth_tsu_find_entry(ndev, blank);
2653 return (entry < 0) ? -ENOMEM : entry;
2656 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2659 struct sh_eth_private *mdp = netdev_priv(ndev);
2660 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2664 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2665 ~(1 << (31 - entry)), TSU_TEN);
2667 memset(blank, 0, sizeof(blank));
2668 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2674 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2676 struct sh_eth_private *mdp = netdev_priv(ndev);
2677 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2683 i = sh_eth_tsu_find_entry(ndev, addr);
2685 /* No entry found, create one */
2686 i = sh_eth_tsu_find_empty(ndev);
2689 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2693 /* Enable the entry */
2694 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2695 (1 << (31 - i)), TSU_TEN);
2698 /* Entry found or created, enable POST */
2699 sh_eth_tsu_enable_cam_entry_post(ndev, i);
2704 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2706 struct sh_eth_private *mdp = netdev_priv(ndev);
2712 i = sh_eth_tsu_find_entry(ndev, addr);
2715 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2718 /* Disable the entry if both ports was disabled */
2719 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2727 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2729 struct sh_eth_private *mdp = netdev_priv(ndev);
2735 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2736 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2739 /* Disable the entry if both ports was disabled */
2740 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2748 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2750 struct sh_eth_private *mdp = netdev_priv(ndev);
2752 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2758 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2759 sh_eth_tsu_read_entry(reg_offset, addr);
2760 if (is_multicast_ether_addr(addr))
2761 sh_eth_tsu_del_entry(ndev, addr);
2765 /* Update promiscuous flag and multicast filter */
2766 static void sh_eth_set_rx_mode(struct net_device *ndev)
2768 struct sh_eth_private *mdp = netdev_priv(ndev);
2771 unsigned long flags;
2773 spin_lock_irqsave(&mdp->lock, flags);
2774 /* Initial condition is MCT = 1, PRM = 0.
2775 * Depending on ndev->flags, set PRM or clear MCT
2777 ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2779 ecmr_bits |= ECMR_MCT;
2781 if (!(ndev->flags & IFF_MULTICAST)) {
2782 sh_eth_tsu_purge_mcast(ndev);
2785 if (ndev->flags & IFF_ALLMULTI) {
2786 sh_eth_tsu_purge_mcast(ndev);
2787 ecmr_bits &= ~ECMR_MCT;
2791 if (ndev->flags & IFF_PROMISC) {
2792 sh_eth_tsu_purge_all(ndev);
2793 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2794 } else if (mdp->cd->tsu) {
2795 struct netdev_hw_addr *ha;
2796 netdev_for_each_mc_addr(ha, ndev) {
2797 if (mcast_all && is_multicast_ether_addr(ha->addr))
2800 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2802 sh_eth_tsu_purge_mcast(ndev);
2803 ecmr_bits &= ~ECMR_MCT;
2810 /* update the ethernet mode */
2811 sh_eth_write(ndev, ecmr_bits, ECMR);
2813 spin_unlock_irqrestore(&mdp->lock, flags);
2816 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2824 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2825 __be16 proto, u16 vid)
2827 struct sh_eth_private *mdp = netdev_priv(ndev);
2828 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2830 if (unlikely(!mdp->cd->tsu))
2833 /* No filtering if vid = 0 */
2837 mdp->vlan_num_ids++;
2839 /* The controller has one VLAN tag HW filter. So, if the filter is
2840 * already enabled, the driver disables it and the filte
2842 if (mdp->vlan_num_ids > 1) {
2843 /* disable VLAN filter */
2844 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2848 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2854 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2855 __be16 proto, u16 vid)
2857 struct sh_eth_private *mdp = netdev_priv(ndev);
2858 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2860 if (unlikely(!mdp->cd->tsu))
2863 /* No filtering if vid = 0 */
2867 mdp->vlan_num_ids--;
2868 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2873 /* SuperH's TSU register init function */
2874 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2876 if (sh_eth_is_rz_fast_ether(mdp)) {
2877 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2881 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
2882 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
2883 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
2884 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2885 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2886 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2887 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2888 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2889 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2890 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2891 if (sh_eth_is_gether(mdp)) {
2892 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
2893 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
2895 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
2896 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
2898 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
2899 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
2900 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2901 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
2902 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
2903 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
2904 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
2907 /* MDIO bus release function */
2908 static int sh_mdio_release(struct sh_eth_private *mdp)
2910 /* unregister mdio bus */
2911 mdiobus_unregister(mdp->mii_bus);
2913 /* free bitbang info */
2914 free_mdio_bitbang(mdp->mii_bus);
2919 /* MDIO bus init function */
2920 static int sh_mdio_init(struct sh_eth_private *mdp,
2921 struct sh_eth_plat_data *pd)
2924 struct bb_info *bitbang;
2925 struct platform_device *pdev = mdp->pdev;
2926 struct device *dev = &mdp->pdev->dev;
2928 /* create bit control struct for PHY */
2929 bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2934 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2935 bitbang->set_gate = pd->set_mdio_gate;
2936 bitbang->mdi_msk = PIR_MDI;
2937 bitbang->mdo_msk = PIR_MDO;
2938 bitbang->mmd_msk = PIR_MMD;
2939 bitbang->mdc_msk = PIR_MDC;
2940 bitbang->ctrl.ops = &bb_ops;
2942 /* MII controller setting */
2943 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2947 /* Hook up MII support for ethtool */
2948 mdp->mii_bus->name = "sh_mii";
2949 mdp->mii_bus->parent = dev;
2950 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2951 pdev->name, pdev->id);
2954 mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
2956 if (!mdp->mii_bus->irq) {
2961 /* register MDIO bus */
2963 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2965 for (i = 0; i < PHY_MAX_ADDR; i++)
2966 mdp->mii_bus->irq[i] = PHY_POLL;
2967 if (pd->phy_irq > 0)
2968 mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2970 ret = mdiobus_register(mdp->mii_bus);
2979 free_mdio_bitbang(mdp->mii_bus);
2983 static const u16 *sh_eth_get_register_offset(int register_type)
2985 const u16 *reg_offset = NULL;
2987 switch (register_type) {
2988 case SH_ETH_REG_GIGABIT:
2989 reg_offset = sh_eth_offset_gigabit;
2991 case SH_ETH_REG_FAST_RZ:
2992 reg_offset = sh_eth_offset_fast_rz;
2994 case SH_ETH_REG_FAST_RCAR:
2995 reg_offset = sh_eth_offset_fast_rcar;
2997 case SH_ETH_REG_FAST_SH4:
2998 reg_offset = sh_eth_offset_fast_sh4;
3000 case SH_ETH_REG_FAST_SH3_SH2:
3001 reg_offset = sh_eth_offset_fast_sh3_sh2;
3010 static const struct net_device_ops sh_eth_netdev_ops = {
3011 .ndo_open = sh_eth_open,
3012 .ndo_stop = sh_eth_close,
3013 .ndo_start_xmit = sh_eth_start_xmit,
3014 .ndo_get_stats = sh_eth_get_stats,
3015 .ndo_set_rx_mode = sh_eth_set_rx_mode,
3016 .ndo_tx_timeout = sh_eth_tx_timeout,
3017 .ndo_do_ioctl = sh_eth_do_ioctl,
3018 .ndo_validate_addr = eth_validate_addr,
3019 .ndo_set_mac_address = eth_mac_addr,
3020 .ndo_change_mtu = eth_change_mtu,
3023 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
3024 .ndo_open = sh_eth_open,
3025 .ndo_stop = sh_eth_close,
3026 .ndo_start_xmit = sh_eth_start_xmit,
3027 .ndo_get_stats = sh_eth_get_stats,
3028 .ndo_set_rx_mode = sh_eth_set_rx_mode,
3029 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
3030 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
3031 .ndo_tx_timeout = sh_eth_tx_timeout,
3032 .ndo_do_ioctl = sh_eth_do_ioctl,
3033 .ndo_validate_addr = eth_validate_addr,
3034 .ndo_set_mac_address = eth_mac_addr,
3035 .ndo_change_mtu = eth_change_mtu,
3039 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3041 struct device_node *np = dev->of_node;
3042 struct sh_eth_plat_data *pdata;
3043 const char *mac_addr;
3046 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3050 ret = of_get_phy_mode(np);
3053 pdata->phy_interface = ret;
3055 mac_addr = of_get_mac_address(np);
3057 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
3059 pdata->no_ether_link =
3060 of_property_read_bool(np, "renesas,no-ether-link");
3061 pdata->ether_link_active_low =
3062 of_property_read_bool(np, "renesas,ether-link-active-low");
3067 static const struct of_device_id sh_eth_match_table[] = {
3068 { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
3069 { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
3070 { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
3071 { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
3072 { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
3073 { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data },
3074 { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
3075 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
3078 MODULE_DEVICE_TABLE(of, sh_eth_match_table);
3080 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3086 static int sh_eth_drv_probe(struct platform_device *pdev)
3089 struct resource *res;
3090 struct net_device *ndev = NULL;
3091 struct sh_eth_private *mdp = NULL;
3092 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
3093 const struct platform_device_id *id = platform_get_device_id(pdev);
3096 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3098 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
3102 pm_runtime_enable(&pdev->dev);
3103 pm_runtime_get_sync(&pdev->dev);
3110 ret = platform_get_irq(pdev, 0);
3115 SET_NETDEV_DEV(ndev, &pdev->dev);
3117 mdp = netdev_priv(ndev);
3118 mdp->num_tx_ring = TX_RING_SIZE;
3119 mdp->num_rx_ring = RX_RING_SIZE;
3120 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
3121 if (IS_ERR(mdp->addr)) {
3122 ret = PTR_ERR(mdp->addr);
3126 ndev->base_addr = res->start;
3128 spin_lock_init(&mdp->lock);
3131 if (pdev->dev.of_node)
3132 pd = sh_eth_parse_dt(&pdev->dev);
3134 dev_err(&pdev->dev, "no platform data\n");
3140 mdp->phy_id = pd->phy;
3141 mdp->phy_interface = pd->phy_interface;
3143 mdp->edmac_endian = pd->edmac_endian;
3144 mdp->no_ether_link = pd->no_ether_link;
3145 mdp->ether_link_active_low = pd->ether_link_active_low;
3149 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3151 const struct of_device_id *match;
3153 match = of_match_device(of_match_ptr(sh_eth_match_table),
3155 mdp->cd = (struct sh_eth_cpu_data *)match->data;
3157 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3158 if (!mdp->reg_offset) {
3159 dev_err(&pdev->dev, "Unknown register type (%d)\n",
3160 mdp->cd->register_type);
3164 sh_eth_set_default_cpu_data(mdp->cd);
3168 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
3170 ndev->netdev_ops = &sh_eth_netdev_ops;
3171 ndev->ethtool_ops = &sh_eth_ethtool_ops;
3172 ndev->watchdog_timeo = TX_TIMEOUT;
3174 /* debug message level */
3175 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3177 /* read and set MAC address */
3178 read_mac_address(ndev, pd->mac_addr);
3179 if (!is_valid_ether_addr(ndev->dev_addr)) {
3180 dev_warn(&pdev->dev,
3181 "no valid MAC address supplied, using a random one.\n");
3182 eth_hw_addr_random(ndev);
3185 /* ioremap the TSU registers */
3187 struct resource *rtsu;
3189 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3191 dev_err(&pdev->dev, "no TSU resource\n");
3195 /* We can only request the TSU region for the first port
3196 * of the two sharing this TSU for the probe to succeed...
3198 if (devno % 2 == 0 &&
3199 !devm_request_mem_region(&pdev->dev, rtsu->start,
3200 resource_size(rtsu),
3201 dev_name(&pdev->dev))) {
3202 dev_err(&pdev->dev, "can't request TSU resource.\n");
3206 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3207 resource_size(rtsu));
3208 if (!mdp->tsu_addr) {
3209 dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
3213 mdp->port = devno % 2;
3214 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
3217 /* Need to init only the first port of the two sharing a TSU */
3218 if (devno % 2 == 0) {
3219 if (mdp->cd->chip_reset)
3220 mdp->cd->chip_reset(ndev);
3223 /* TSU init (Init only)*/
3224 sh_eth_tsu_init(mdp);
3228 if (mdp->cd->rmiimode)
3229 sh_eth_write(ndev, 0x1, RMIIMODE);
3232 ret = sh_mdio_init(mdp, pd);
3234 dev_err(&pdev->dev, "failed to initialise MDIO\n");
3238 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
3240 /* network device register */
3241 ret = register_netdev(ndev);
3245 /* print device information */
3246 netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
3247 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3249 pm_runtime_put(&pdev->dev);
3250 platform_set_drvdata(pdev, ndev);
3255 netif_napi_del(&mdp->napi);
3256 sh_mdio_release(mdp);
3263 pm_runtime_put(&pdev->dev);
3264 pm_runtime_disable(&pdev->dev);
3268 static int sh_eth_drv_remove(struct platform_device *pdev)
3270 struct net_device *ndev = platform_get_drvdata(pdev);
3271 struct sh_eth_private *mdp = netdev_priv(ndev);
3273 unregister_netdev(ndev);
3274 netif_napi_del(&mdp->napi);
3275 sh_mdio_release(mdp);
3276 pm_runtime_disable(&pdev->dev);
3283 #ifdef CONFIG_PM_SLEEP
3284 static int sh_eth_suspend(struct device *dev)
3286 struct net_device *ndev = dev_get_drvdata(dev);
3289 if (netif_running(ndev)) {
3290 netif_device_detach(ndev);
3291 ret = sh_eth_close(ndev);
3297 static int sh_eth_resume(struct device *dev)
3299 struct net_device *ndev = dev_get_drvdata(dev);
3302 if (netif_running(ndev)) {
3303 ret = sh_eth_open(ndev);
3306 netif_device_attach(ndev);
3313 static int sh_eth_runtime_nop(struct device *dev)
3315 /* Runtime PM callback shared between ->runtime_suspend()
3316 * and ->runtime_resume(). Simply returns success.
3318 * This driver re-initializes all registers after
3319 * pm_runtime_get_sync() anyway so there is no need
3320 * to save and restore registers here.
3325 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3326 SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3327 SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3329 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3331 #define SH_ETH_PM_OPS NULL
3334 static struct platform_device_id sh_eth_id_table[] = {
3335 { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3336 { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3337 { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3338 { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3339 { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3340 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3341 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3342 { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
3343 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
3344 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
3345 { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
3346 { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
3347 { "r8a7793-ether", (kernel_ulong_t)&r8a779x_data },
3348 { "r8a7794-ether", (kernel_ulong_t)&r8a779x_data },
3351 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3353 static struct platform_driver sh_eth_driver = {
3354 .probe = sh_eth_drv_probe,
3355 .remove = sh_eth_drv_remove,
3356 .id_table = sh_eth_id_table,
3359 .pm = SH_ETH_PM_OPS,
3360 .of_match_table = of_match_ptr(sh_eth_match_table),
3364 module_platform_driver(sh_eth_driver);
3366 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3367 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3368 MODULE_LICENSE("GPL v2");