1 /* SuperH Ethernet device driver
3 * Copyright (C) 2014 Renesas Electronics Corporation
4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2014 Renesas Solutions Corp.
6 * Copyright (C) 2013-2016 Cogent Embedded, Inc.
7 * Copyright (C) 2014 Codethink Limited
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/etherdevice.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/mdio-bitbang.h>
31 #include <linux/netdevice.h>
33 #include <linux/of_device.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_net.h>
36 #include <linux/phy.h>
37 #include <linux/cache.h>
39 #include <linux/pm_runtime.h>
40 #include <linux/slab.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/clk.h>
44 #include <linux/sh_eth.h>
45 #include <linux/of_mdio.h>
49 #define SH_ETH_DEF_MSG_ENABLE \
55 #define SH_ETH_OFFSET_INVALID ((u16)~0)
57 #define SH_ETH_OFFSET_DEFAULTS \
58 [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
60 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
61 SH_ETH_OFFSET_DEFAULTS,
116 [TSU_CTRST] = 0x0004,
117 [TSU_FWEN0] = 0x0010,
118 [TSU_FWEN1] = 0x0014,
120 [TSU_BSYSL0] = 0x0020,
121 [TSU_BSYSL1] = 0x0024,
122 [TSU_PRISL0] = 0x0028,
123 [TSU_PRISL1] = 0x002c,
124 [TSU_FWSL0] = 0x0030,
125 [TSU_FWSL1] = 0x0034,
126 [TSU_FWSLC] = 0x0038,
127 [TSU_QTAG0] = 0x0040,
128 [TSU_QTAG1] = 0x0044,
130 [TSU_FWINMK] = 0x0054,
131 [TSU_ADQT0] = 0x0048,
132 [TSU_ADQT1] = 0x004c,
133 [TSU_VTAG0] = 0x0058,
134 [TSU_VTAG1] = 0x005c,
135 [TSU_ADSBSY] = 0x0060,
137 [TSU_POST1] = 0x0070,
138 [TSU_POST2] = 0x0074,
139 [TSU_POST3] = 0x0078,
140 [TSU_POST4] = 0x007c,
141 [TSU_ADRH0] = 0x0100,
157 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
158 SH_ETH_OFFSET_DEFAULTS,
203 [TSU_CTRST] = 0x0004,
204 [TSU_FWSLC] = 0x0038,
205 [TSU_VTAG0] = 0x0058,
206 [TSU_ADSBSY] = 0x0060,
208 [TSU_POST1] = 0x0070,
209 [TSU_POST2] = 0x0074,
210 [TSU_POST3] = 0x0078,
211 [TSU_POST4] = 0x007c,
212 [TSU_ADRH0] = 0x0100,
220 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
221 SH_ETH_OFFSET_DEFAULTS,
268 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
269 SH_ETH_OFFSET_DEFAULTS,
322 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
323 SH_ETH_OFFSET_DEFAULTS,
371 [TSU_CTRST] = 0x0004,
372 [TSU_FWEN0] = 0x0010,
373 [TSU_FWEN1] = 0x0014,
375 [TSU_BSYSL0] = 0x0020,
376 [TSU_BSYSL1] = 0x0024,
377 [TSU_PRISL0] = 0x0028,
378 [TSU_PRISL1] = 0x002c,
379 [TSU_FWSL0] = 0x0030,
380 [TSU_FWSL1] = 0x0034,
381 [TSU_FWSLC] = 0x0038,
382 [TSU_QTAGM0] = 0x0040,
383 [TSU_QTAGM1] = 0x0044,
384 [TSU_ADQT0] = 0x0048,
385 [TSU_ADQT1] = 0x004c,
387 [TSU_FWINMK] = 0x0054,
388 [TSU_ADSBSY] = 0x0060,
390 [TSU_POST1] = 0x0070,
391 [TSU_POST2] = 0x0074,
392 [TSU_POST3] = 0x0078,
393 [TSU_POST4] = 0x007c,
408 [TSU_ADRH0] = 0x0100,
411 static void sh_eth_rcv_snd_disable(struct net_device *ndev);
412 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
414 static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
416 struct sh_eth_private *mdp = netdev_priv(ndev);
417 u16 offset = mdp->reg_offset[enum_index];
419 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
422 iowrite32(data, mdp->addr + offset);
425 static u32 sh_eth_read(struct net_device *ndev, int enum_index)
427 struct sh_eth_private *mdp = netdev_priv(ndev);
428 u16 offset = mdp->reg_offset[enum_index];
430 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
433 return ioread32(mdp->addr + offset);
436 static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
439 sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
443 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
445 return mdp->reg_offset == sh_eth_offset_gigabit;
448 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
450 return mdp->reg_offset == sh_eth_offset_fast_rz;
453 static void sh_eth_select_mii(struct net_device *ndev)
455 struct sh_eth_private *mdp = netdev_priv(ndev);
458 switch (mdp->phy_interface) {
459 case PHY_INTERFACE_MODE_GMII:
462 case PHY_INTERFACE_MODE_MII:
465 case PHY_INTERFACE_MODE_RMII:
470 "PHY interface mode was not setup. Set to MII.\n");
475 sh_eth_write(ndev, value, RMII_MII);
478 static void sh_eth_set_duplex(struct net_device *ndev)
480 struct sh_eth_private *mdp = netdev_priv(ndev);
482 sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
485 static void sh_eth_chip_reset(struct net_device *ndev)
487 struct sh_eth_private *mdp = netdev_priv(ndev);
490 sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
494 static void sh_eth_set_rate_gether(struct net_device *ndev)
496 struct sh_eth_private *mdp = netdev_priv(ndev);
498 switch (mdp->speed) {
499 case 10: /* 10BASE */
500 sh_eth_write(ndev, GECMR_10, GECMR);
502 case 100:/* 100BASE */
503 sh_eth_write(ndev, GECMR_100, GECMR);
505 case 1000: /* 1000BASE */
506 sh_eth_write(ndev, GECMR_1000, GECMR);
513 static struct sh_eth_cpu_data r7s72100_data = {
514 .chip_reset = sh_eth_chip_reset,
515 .set_duplex = sh_eth_set_duplex,
517 .register_type = SH_ETH_REG_FAST_RZ,
519 .ecsr_value = ECSR_ICD,
520 .ecsipr_value = ECSIPR_ICDIP,
521 .eesipr_value = 0xe77f009f,
523 .tx_check = EESR_TC1 | EESR_FTC,
524 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
525 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
527 .fdr_value = 0x0000070f,
529 .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5,
537 .rpadir_value = 2 << 16,
545 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
547 sh_eth_chip_reset(ndev);
549 sh_eth_select_mii(ndev);
553 static struct sh_eth_cpu_data r8a7740_data = {
554 .chip_reset = sh_eth_chip_reset_r8a7740,
555 .set_duplex = sh_eth_set_duplex,
556 .set_rate = sh_eth_set_rate_gether,
558 .register_type = SH_ETH_REG_GIGABIT,
560 .ecsr_value = ECSR_ICD | ECSR_MPD,
561 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
562 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
564 .tx_check = EESR_TC1 | EESR_FTC,
565 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
566 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
568 .fdr_value = 0x0000070f,
576 .rpadir_value = 2 << 16,
585 /* There is CPU dependent code */
586 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
588 struct sh_eth_private *mdp = netdev_priv(ndev);
590 switch (mdp->speed) {
591 case 10: /* 10BASE */
592 sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
594 case 100:/* 100BASE */
595 sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
601 static struct sh_eth_cpu_data r8a777x_data = {
602 .set_duplex = sh_eth_set_duplex,
603 .set_rate = sh_eth_set_rate_r8a777x,
605 .register_type = SH_ETH_REG_FAST_RCAR,
607 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
608 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
609 .eesipr_value = 0x01ff009f,
611 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
612 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
613 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
615 .fdr_value = 0x00000f0f,
624 static struct sh_eth_cpu_data r8a779x_data = {
625 .set_duplex = sh_eth_set_duplex,
626 .set_rate = sh_eth_set_rate_r8a777x,
628 .register_type = SH_ETH_REG_FAST_RCAR,
630 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
631 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
632 .eesipr_value = 0x01ff009f,
634 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
635 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
636 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
638 .fdr_value = 0x00000f0f,
640 .trscer_err_mask = DESC_I_RINT8,
648 #endif /* CONFIG_OF */
650 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
652 struct sh_eth_private *mdp = netdev_priv(ndev);
654 switch (mdp->speed) {
655 case 10: /* 10BASE */
656 sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
658 case 100:/* 100BASE */
659 sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
665 static struct sh_eth_cpu_data sh7724_data = {
666 .set_duplex = sh_eth_set_duplex,
667 .set_rate = sh_eth_set_rate_sh7724,
669 .register_type = SH_ETH_REG_FAST_SH4,
671 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
672 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
673 .eesipr_value = 0x01ff009f,
675 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
676 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
677 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
685 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
688 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
690 struct sh_eth_private *mdp = netdev_priv(ndev);
692 switch (mdp->speed) {
693 case 10: /* 10BASE */
694 sh_eth_write(ndev, 0, RTRATE);
696 case 100:/* 100BASE */
697 sh_eth_write(ndev, 1, RTRATE);
703 static struct sh_eth_cpu_data sh7757_data = {
704 .set_duplex = sh_eth_set_duplex,
705 .set_rate = sh_eth_set_rate_sh7757,
707 .register_type = SH_ETH_REG_FAST_SH4,
709 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
711 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
712 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
713 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
716 .irq_flags = IRQF_SHARED,
723 .rpadir_value = 2 << 16,
727 #define SH_GIGA_ETH_BASE 0xfee00000UL
728 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
729 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
730 static void sh_eth_chip_reset_giga(struct net_device *ndev)
732 u32 mahr[2], malr[2];
735 /* save MAHR and MALR */
736 for (i = 0; i < 2; i++) {
737 malr[i] = ioread32((void *)GIGA_MALR(i));
738 mahr[i] = ioread32((void *)GIGA_MAHR(i));
741 sh_eth_chip_reset(ndev);
743 /* restore MAHR and MALR */
744 for (i = 0; i < 2; i++) {
745 iowrite32(malr[i], (void *)GIGA_MALR(i));
746 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
750 static void sh_eth_set_rate_giga(struct net_device *ndev)
752 struct sh_eth_private *mdp = netdev_priv(ndev);
754 switch (mdp->speed) {
755 case 10: /* 10BASE */
756 sh_eth_write(ndev, 0x00000000, GECMR);
758 case 100:/* 100BASE */
759 sh_eth_write(ndev, 0x00000010, GECMR);
761 case 1000: /* 1000BASE */
762 sh_eth_write(ndev, 0x00000020, GECMR);
767 /* SH7757(GETHERC) */
768 static struct sh_eth_cpu_data sh7757_data_giga = {
769 .chip_reset = sh_eth_chip_reset_giga,
770 .set_duplex = sh_eth_set_duplex,
771 .set_rate = sh_eth_set_rate_giga,
773 .register_type = SH_ETH_REG_GIGABIT,
775 .ecsr_value = ECSR_ICD | ECSR_MPD,
776 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
777 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
779 .tx_check = EESR_TC1 | EESR_FTC,
780 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
781 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
783 .fdr_value = 0x0000072f,
785 .irq_flags = IRQF_SHARED,
792 .rpadir_value = 2 << 16,
799 static struct sh_eth_cpu_data sh7734_data = {
800 .chip_reset = sh_eth_chip_reset,
801 .set_duplex = sh_eth_set_duplex,
802 .set_rate = sh_eth_set_rate_gether,
804 .register_type = SH_ETH_REG_GIGABIT,
806 .ecsr_value = ECSR_ICD | ECSR_MPD,
807 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
808 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
810 .tx_check = EESR_TC1 | EESR_FTC,
811 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
812 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
829 static struct sh_eth_cpu_data sh7763_data = {
830 .chip_reset = sh_eth_chip_reset,
831 .set_duplex = sh_eth_set_duplex,
832 .set_rate = sh_eth_set_rate_gether,
834 .register_type = SH_ETH_REG_GIGABIT,
836 .ecsr_value = ECSR_ICD | ECSR_MPD,
837 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
838 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
840 .tx_check = EESR_TC1 | EESR_FTC,
841 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
842 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
853 .irq_flags = IRQF_SHARED,
856 static struct sh_eth_cpu_data sh7619_data = {
857 .register_type = SH_ETH_REG_FAST_SH3_SH2,
859 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
867 static struct sh_eth_cpu_data sh771x_data = {
868 .register_type = SH_ETH_REG_FAST_SH3_SH2,
870 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
874 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
877 cd->ecsr_value = DEFAULT_ECSR_INIT;
879 if (!cd->ecsipr_value)
880 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
882 if (!cd->fcftr_value)
883 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
884 DEFAULT_FIFO_F_D_RFD;
887 cd->fdr_value = DEFAULT_FDR_INIT;
890 cd->tx_check = DEFAULT_TX_CHECK;
892 if (!cd->eesr_err_check)
893 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
895 if (!cd->trscer_err_mask)
896 cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
899 static int sh_eth_check_reset(struct net_device *ndev)
905 if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
911 netdev_err(ndev, "Device reset failed\n");
917 static int sh_eth_reset(struct net_device *ndev)
919 struct sh_eth_private *mdp = netdev_priv(ndev);
922 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
923 sh_eth_write(ndev, EDSR_ENALL, EDSR);
924 sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
926 ret = sh_eth_check_reset(ndev);
931 sh_eth_write(ndev, 0x0, TDLAR);
932 sh_eth_write(ndev, 0x0, TDFAR);
933 sh_eth_write(ndev, 0x0, TDFXR);
934 sh_eth_write(ndev, 0x0, TDFFR);
935 sh_eth_write(ndev, 0x0, RDLAR);
936 sh_eth_write(ndev, 0x0, RDFAR);
937 sh_eth_write(ndev, 0x0, RDFXR);
938 sh_eth_write(ndev, 0x0, RDFFR);
940 /* Reset HW CRC register */
942 sh_eth_write(ndev, 0x0, CSMR);
944 /* Select MII mode */
945 if (mdp->cd->select_mii)
946 sh_eth_select_mii(ndev);
948 sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
950 sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
956 static void sh_eth_set_receive_align(struct sk_buff *skb)
958 uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
961 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
964 /* Program the hardware MAC address from dev->dev_addr. */
965 static void update_mac_address(struct net_device *ndev)
968 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
969 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
971 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
974 /* Get MAC address from SuperH MAC address register
976 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
977 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
978 * When you want use this device, you must set MAC address in bootloader.
981 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
983 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
984 memcpy(ndev->dev_addr, mac, ETH_ALEN);
986 u32 mahr = sh_eth_read(ndev, MAHR);
987 u32 malr = sh_eth_read(ndev, MALR);
989 ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
990 ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
991 ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
992 ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
993 ndev->dev_addr[4] = (malr >> 8) & 0xFF;
994 ndev->dev_addr[5] = (malr >> 0) & 0xFF;
998 static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
1000 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
1001 return EDTRR_TRNS_GETHER;
1003 return EDTRR_TRNS_ETHER;
1007 void (*set_gate)(void *addr);
1008 struct mdiobb_ctrl ctrl;
1012 static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
1014 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1017 if (bitbang->set_gate)
1018 bitbang->set_gate(bitbang->addr);
1020 pir = ioread32(bitbang->addr);
1025 iowrite32(pir, bitbang->addr);
1028 /* Data I/O pin control */
1029 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1031 sh_mdio_ctrl(ctrl, PIR_MMD, bit);
1035 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1037 sh_mdio_ctrl(ctrl, PIR_MDO, bit);
1041 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1043 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1045 if (bitbang->set_gate)
1046 bitbang->set_gate(bitbang->addr);
1048 return (ioread32(bitbang->addr) & PIR_MDI) != 0;
1051 /* MDC pin control */
1052 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1054 sh_mdio_ctrl(ctrl, PIR_MDC, bit);
1057 /* mdio bus control struct */
1058 static struct mdiobb_ops bb_ops = {
1059 .owner = THIS_MODULE,
1060 .set_mdc = sh_mdc_ctrl,
1061 .set_mdio_dir = sh_mmd_ctrl,
1062 .set_mdio_data = sh_set_mdio,
1063 .get_mdio_data = sh_get_mdio,
1066 /* free Tx skb function */
1067 static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
1069 struct sh_eth_private *mdp = netdev_priv(ndev);
1070 struct sh_eth_txdesc *txdesc;
1075 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1076 entry = mdp->dirty_tx % mdp->num_tx_ring;
1077 txdesc = &mdp->tx_ring[entry];
1078 sent = !(txdesc->status & cpu_to_le32(TD_TACT));
1079 if (sent_only && !sent)
1081 /* TACT bit must be checked before all the following reads */
1083 netif_info(mdp, tx_done, ndev,
1084 "tx entry %d status 0x%08x\n",
1085 entry, le32_to_cpu(txdesc->status));
1086 /* Free the original skb. */
1087 if (mdp->tx_skbuff[entry]) {
1088 dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
1089 le32_to_cpu(txdesc->len) >> 16,
1091 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1092 mdp->tx_skbuff[entry] = NULL;
1095 txdesc->status = cpu_to_le32(TD_TFP);
1096 if (entry >= mdp->num_tx_ring - 1)
1097 txdesc->status |= cpu_to_le32(TD_TDLE);
1100 ndev->stats.tx_packets++;
1101 ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1107 /* free skb and descriptor buffer */
1108 static void sh_eth_ring_free(struct net_device *ndev)
1110 struct sh_eth_private *mdp = netdev_priv(ndev);
1114 for (i = 0; i < mdp->num_rx_ring; i++) {
1115 if (mdp->rx_skbuff[i]) {
1116 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1118 dma_unmap_single(&ndev->dev,
1119 le32_to_cpu(rxdesc->addr),
1120 ALIGN(mdp->rx_buf_sz, 32),
1124 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1125 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1127 mdp->rx_ring = NULL;
1130 /* Free Rx skb ringbuffer */
1131 if (mdp->rx_skbuff) {
1132 for (i = 0; i < mdp->num_rx_ring; i++)
1133 dev_kfree_skb(mdp->rx_skbuff[i]);
1135 kfree(mdp->rx_skbuff);
1136 mdp->rx_skbuff = NULL;
1139 sh_eth_tx_free(ndev, false);
1141 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1142 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1144 mdp->tx_ring = NULL;
1147 /* Free Tx skb ringbuffer */
1148 kfree(mdp->tx_skbuff);
1149 mdp->tx_skbuff = NULL;
1152 /* format skb and descriptor buffer */
1153 static void sh_eth_ring_format(struct net_device *ndev)
1155 struct sh_eth_private *mdp = netdev_priv(ndev);
1157 struct sk_buff *skb;
1158 struct sh_eth_rxdesc *rxdesc = NULL;
1159 struct sh_eth_txdesc *txdesc = NULL;
1160 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1161 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1162 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1163 dma_addr_t dma_addr;
1171 memset(mdp->rx_ring, 0, rx_ringsize);
1173 /* build Rx ring buffer */
1174 for (i = 0; i < mdp->num_rx_ring; i++) {
1176 mdp->rx_skbuff[i] = NULL;
1177 skb = netdev_alloc_skb(ndev, skbuff_size);
1180 sh_eth_set_receive_align(skb);
1182 /* The size of the buffer is a multiple of 32 bytes. */
1183 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1184 dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
1186 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1190 mdp->rx_skbuff[i] = skb;
1193 rxdesc = &mdp->rx_ring[i];
1194 rxdesc->len = cpu_to_le32(buf_len << 16);
1195 rxdesc->addr = cpu_to_le32(dma_addr);
1196 rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
1198 /* Rx descriptor address set */
1200 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1201 if (sh_eth_is_gether(mdp) ||
1202 sh_eth_is_rz_fast_ether(mdp))
1203 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1207 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1209 /* Mark the last entry as wrapping the ring. */
1211 rxdesc->status |= cpu_to_le32(RD_RDLE);
1213 memset(mdp->tx_ring, 0, tx_ringsize);
1215 /* build Tx ring buffer */
1216 for (i = 0; i < mdp->num_tx_ring; i++) {
1217 mdp->tx_skbuff[i] = NULL;
1218 txdesc = &mdp->tx_ring[i];
1219 txdesc->status = cpu_to_le32(TD_TFP);
1220 txdesc->len = cpu_to_le32(0);
1222 /* Tx descriptor address set */
1223 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1224 if (sh_eth_is_gether(mdp) ||
1225 sh_eth_is_rz_fast_ether(mdp))
1226 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1230 txdesc->status |= cpu_to_le32(TD_TDLE);
1233 /* Get skb and descriptor buffer */
1234 static int sh_eth_ring_init(struct net_device *ndev)
1236 struct sh_eth_private *mdp = netdev_priv(ndev);
1237 int rx_ringsize, tx_ringsize;
1239 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1240 * card needs room to do 8 byte alignment, +2 so we can reserve
1241 * the first 2 bytes, and +16 gets room for the status word from the
1244 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1245 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1246 if (mdp->cd->rpadir)
1247 mdp->rx_buf_sz += NET_IP_ALIGN;
1249 /* Allocate RX and TX skb rings */
1250 mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
1252 if (!mdp->rx_skbuff)
1255 mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
1257 if (!mdp->tx_skbuff)
1260 /* Allocate all Rx descriptors. */
1261 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1262 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1269 /* Allocate all Tx descriptors. */
1270 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1271 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1278 /* Free Rx and Tx skb ring buffer and DMA buffer */
1279 sh_eth_ring_free(ndev);
1284 static int sh_eth_dev_init(struct net_device *ndev)
1286 struct sh_eth_private *mdp = netdev_priv(ndev);
1290 ret = sh_eth_reset(ndev);
1294 if (mdp->cd->rmiimode)
1295 sh_eth_write(ndev, 0x1, RMIIMODE);
1297 /* Descriptor format */
1298 sh_eth_ring_format(ndev);
1299 if (mdp->cd->rpadir)
1300 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1302 /* all sh_eth int mask */
1303 sh_eth_write(ndev, 0, EESIPR);
1305 #if defined(__LITTLE_ENDIAN)
1306 if (mdp->cd->hw_swap)
1307 sh_eth_write(ndev, EDMR_EL, EDMR);
1310 sh_eth_write(ndev, 0, EDMR);
1313 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1314 sh_eth_write(ndev, 0, TFTR);
1316 /* Frame recv control (enable multiple-packets per rx irq) */
1317 sh_eth_write(ndev, RMCR_RNC, RMCR);
1319 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1322 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
1324 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1326 if (!mdp->cd->no_trimd)
1327 sh_eth_write(ndev, 0, TRIMD);
1329 /* Recv frame limit set register */
1330 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1333 sh_eth_modify(ndev, EESR, 0, 0);
1334 mdp->irq_enabled = true;
1335 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1337 /* PAUSE Prohibition */
1338 sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
1339 ECMR_TE | ECMR_RE, ECMR);
1341 if (mdp->cd->set_rate)
1342 mdp->cd->set_rate(ndev);
1344 /* E-MAC Status Register clear */
1345 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1347 /* E-MAC Interrupt Enable register */
1348 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1350 /* Set MAC address */
1351 update_mac_address(ndev);
1355 sh_eth_write(ndev, APR_AP, APR);
1357 sh_eth_write(ndev, MPR_MP, MPR);
1358 if (mdp->cd->tpauser)
1359 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1361 /* Setting the Rx mode will start the Rx process. */
1362 sh_eth_write(ndev, EDRRR_R, EDRRR);
1367 static void sh_eth_dev_exit(struct net_device *ndev)
1369 struct sh_eth_private *mdp = netdev_priv(ndev);
1372 /* Deactivate all TX descriptors, so DMA should stop at next
1373 * packet boundary if it's currently running
1375 for (i = 0; i < mdp->num_tx_ring; i++)
1376 mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
1378 /* Disable TX FIFO egress to MAC */
1379 sh_eth_rcv_snd_disable(ndev);
1381 /* Stop RX DMA at next packet boundary */
1382 sh_eth_write(ndev, 0, EDRRR);
1384 /* Aside from TX DMA, we can't tell when the hardware is
1385 * really stopped, so we need to reset to make sure.
1386 * Before doing that, wait for long enough to *probably*
1387 * finish transmitting the last packet and poll stats.
1389 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1390 sh_eth_get_stats(ndev);
1393 /* Set the RMII mode again if required */
1394 if (mdp->cd->rmiimode)
1395 sh_eth_write(ndev, 0x1, RMIIMODE);
1397 /* Set MAC address again */
1398 update_mac_address(ndev);
1401 /* Packet receive function */
1402 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1404 struct sh_eth_private *mdp = netdev_priv(ndev);
1405 struct sh_eth_rxdesc *rxdesc;
1407 int entry = mdp->cur_rx % mdp->num_rx_ring;
1408 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1410 struct sk_buff *skb;
1412 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1413 dma_addr_t dma_addr;
1417 boguscnt = min(boguscnt, *quota);
1419 rxdesc = &mdp->rx_ring[entry];
1420 while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
1421 /* RACT bit must be checked before all the following reads */
1423 desc_status = le32_to_cpu(rxdesc->status);
1424 pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL;
1429 netif_info(mdp, rx_status, ndev,
1430 "rx entry %d status 0x%08x len %d\n",
1431 entry, desc_status, pkt_len);
1433 if (!(desc_status & RDFEND))
1434 ndev->stats.rx_length_errors++;
1436 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1437 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1438 * bit 0. However, in case of the R8A7740 and R7S72100
1439 * the RFS bits are from bit 25 to bit 16. So, the
1440 * driver needs right shifting by 16.
1442 if (mdp->cd->shift_rd0)
1445 skb = mdp->rx_skbuff[entry];
1446 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1447 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1448 ndev->stats.rx_errors++;
1449 if (desc_status & RD_RFS1)
1450 ndev->stats.rx_crc_errors++;
1451 if (desc_status & RD_RFS2)
1452 ndev->stats.rx_frame_errors++;
1453 if (desc_status & RD_RFS3)
1454 ndev->stats.rx_length_errors++;
1455 if (desc_status & RD_RFS4)
1456 ndev->stats.rx_length_errors++;
1457 if (desc_status & RD_RFS6)
1458 ndev->stats.rx_missed_errors++;
1459 if (desc_status & RD_RFS10)
1460 ndev->stats.rx_over_errors++;
1462 dma_addr = le32_to_cpu(rxdesc->addr);
1463 if (!mdp->cd->hw_swap)
1465 phys_to_virt(ALIGN(dma_addr, 4)),
1467 mdp->rx_skbuff[entry] = NULL;
1468 if (mdp->cd->rpadir)
1469 skb_reserve(skb, NET_IP_ALIGN);
1470 dma_unmap_single(&ndev->dev, dma_addr,
1471 ALIGN(mdp->rx_buf_sz, 32),
1473 skb_put(skb, pkt_len);
1474 skb->protocol = eth_type_trans(skb, ndev);
1475 netif_receive_skb(skb);
1476 ndev->stats.rx_packets++;
1477 ndev->stats.rx_bytes += pkt_len;
1478 if (desc_status & RD_RFS8)
1479 ndev->stats.multicast++;
1481 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1482 rxdesc = &mdp->rx_ring[entry];
1485 /* Refill the Rx ring buffers. */
1486 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1487 entry = mdp->dirty_rx % mdp->num_rx_ring;
1488 rxdesc = &mdp->rx_ring[entry];
1489 /* The size of the buffer is 32 byte boundary. */
1490 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1491 rxdesc->len = cpu_to_le32(buf_len << 16);
1493 if (mdp->rx_skbuff[entry] == NULL) {
1494 skb = netdev_alloc_skb(ndev, skbuff_size);
1496 break; /* Better luck next round. */
1497 sh_eth_set_receive_align(skb);
1498 dma_addr = dma_map_single(&ndev->dev, skb->data,
1499 buf_len, DMA_FROM_DEVICE);
1500 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1504 mdp->rx_skbuff[entry] = skb;
1506 skb_checksum_none_assert(skb);
1507 rxdesc->addr = cpu_to_le32(dma_addr);
1509 dma_wmb(); /* RACT bit must be set after all the above writes */
1510 if (entry >= mdp->num_rx_ring - 1)
1512 cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE);
1514 rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP);
1517 /* Restart Rx engine if stopped. */
1518 /* If we don't need to check status, don't. -KDU */
1519 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1520 /* fix the values for the next receiving if RDE is set */
1521 if (intr_status & EESR_RDE &&
1522 mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) {
1523 u32 count = (sh_eth_read(ndev, RDFAR) -
1524 sh_eth_read(ndev, RDLAR)) >> 4;
1526 mdp->cur_rx = count;
1527 mdp->dirty_rx = count;
1529 sh_eth_write(ndev, EDRRR_R, EDRRR);
1532 *quota -= limit - boguscnt - 1;
1537 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1539 /* disable tx and rx */
1540 sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1543 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1545 /* enable tx and rx */
1546 sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1549 /* error control function */
1550 static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1552 struct sh_eth_private *mdp = netdev_priv(ndev);
1557 if (intr_status & EESR_ECI) {
1558 felic_stat = sh_eth_read(ndev, ECSR);
1559 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
1560 if (felic_stat & ECSR_ICD)
1561 ndev->stats.tx_carrier_errors++;
1562 if (felic_stat & ECSR_LCHNG) {
1564 if (mdp->cd->no_psr || mdp->no_ether_link) {
1567 link_stat = (sh_eth_read(ndev, PSR));
1568 if (mdp->ether_link_active_low)
1569 link_stat = ~link_stat;
1571 if (!(link_stat & PHY_ST_LINK)) {
1572 sh_eth_rcv_snd_disable(ndev);
1575 sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0);
1577 sh_eth_modify(ndev, ECSR, 0, 0);
1578 sh_eth_modify(ndev, EESIPR, DMAC_M_ECI,
1580 /* enable tx and rx */
1581 sh_eth_rcv_snd_enable(ndev);
1587 if (intr_status & EESR_TWB) {
1588 /* Unused write back interrupt */
1589 if (intr_status & EESR_TABT) { /* Transmit Abort int */
1590 ndev->stats.tx_aborted_errors++;
1591 netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1595 if (intr_status & EESR_RABT) {
1596 /* Receive Abort int */
1597 if (intr_status & EESR_RFRMER) {
1598 /* Receive Frame Overflow int */
1599 ndev->stats.rx_frame_errors++;
1603 if (intr_status & EESR_TDE) {
1604 /* Transmit Descriptor Empty int */
1605 ndev->stats.tx_fifo_errors++;
1606 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1609 if (intr_status & EESR_TFE) {
1610 /* FIFO under flow */
1611 ndev->stats.tx_fifo_errors++;
1612 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1615 if (intr_status & EESR_RDE) {
1616 /* Receive Descriptor Empty int */
1617 ndev->stats.rx_over_errors++;
1620 if (intr_status & EESR_RFE) {
1621 /* Receive FIFO Overflow int */
1622 ndev->stats.rx_fifo_errors++;
1625 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1627 ndev->stats.tx_fifo_errors++;
1628 netif_err(mdp, tx_err, ndev, "Address Error\n");
1631 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1632 if (mdp->cd->no_ade)
1634 if (intr_status & mask) {
1636 u32 edtrr = sh_eth_read(ndev, EDTRR);
1639 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1640 intr_status, mdp->cur_tx, mdp->dirty_tx,
1641 (u32)ndev->state, edtrr);
1642 /* dirty buffer free */
1643 sh_eth_tx_free(ndev, true);
1646 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1648 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1651 netif_wake_queue(ndev);
1655 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1657 struct net_device *ndev = netdev;
1658 struct sh_eth_private *mdp = netdev_priv(ndev);
1659 struct sh_eth_cpu_data *cd = mdp->cd;
1660 irqreturn_t ret = IRQ_NONE;
1661 u32 intr_status, intr_enable;
1663 spin_lock(&mdp->lock);
1665 /* Get interrupt status */
1666 intr_status = sh_eth_read(ndev, EESR);
1667 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1668 * enabled since it's the one that comes thru regardless of the mask,
1669 * and we need to fully handle it in sh_eth_error() in order to quench
1670 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1672 intr_enable = sh_eth_read(ndev, EESIPR);
1673 intr_status &= intr_enable | DMAC_M_ECI;
1674 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1679 if (!likely(mdp->irq_enabled)) {
1680 sh_eth_write(ndev, 0, EESIPR);
1684 if (intr_status & EESR_RX_CHECK) {
1685 if (napi_schedule_prep(&mdp->napi)) {
1686 /* Mask Rx interrupts */
1687 sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1689 __napi_schedule(&mdp->napi);
1692 "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1693 intr_status, intr_enable);
1698 if (intr_status & cd->tx_check) {
1699 /* Clear Tx interrupts */
1700 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1702 sh_eth_tx_free(ndev, true);
1703 netif_wake_queue(ndev);
1706 if (intr_status & cd->eesr_err_check) {
1707 /* Clear error interrupts */
1708 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1710 sh_eth_error(ndev, intr_status);
1714 spin_unlock(&mdp->lock);
1719 static int sh_eth_poll(struct napi_struct *napi, int budget)
1721 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1723 struct net_device *ndev = napi->dev;
1728 intr_status = sh_eth_read(ndev, EESR);
1729 if (!(intr_status & EESR_RX_CHECK))
1731 /* Clear Rx interrupts */
1732 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1734 if (sh_eth_rx(ndev, intr_status, "a))
1738 napi_complete(napi);
1740 /* Reenable Rx interrupts */
1741 if (mdp->irq_enabled)
1742 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1744 return budget - quota;
1747 /* PHY state control function */
1748 static void sh_eth_adjust_link(struct net_device *ndev)
1750 struct sh_eth_private *mdp = netdev_priv(ndev);
1751 struct phy_device *phydev = ndev->phydev;
1752 unsigned long flags;
1755 spin_lock_irqsave(&mdp->lock, flags);
1757 /* Disable TX and RX right over here, if E-MAC change is ignored */
1758 if (mdp->cd->no_psr || mdp->no_ether_link)
1759 sh_eth_rcv_snd_disable(ndev);
1762 if (phydev->duplex != mdp->duplex) {
1764 mdp->duplex = phydev->duplex;
1765 if (mdp->cd->set_duplex)
1766 mdp->cd->set_duplex(ndev);
1769 if (phydev->speed != mdp->speed) {
1771 mdp->speed = phydev->speed;
1772 if (mdp->cd->set_rate)
1773 mdp->cd->set_rate(ndev);
1776 sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
1778 mdp->link = phydev->link;
1780 } else if (mdp->link) {
1787 /* Enable TX and RX right over here, if E-MAC change is ignored */
1788 if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
1789 sh_eth_rcv_snd_enable(ndev);
1792 spin_unlock_irqrestore(&mdp->lock, flags);
1794 if (new_state && netif_msg_link(mdp))
1795 phy_print_status(phydev);
1798 /* PHY init function */
1799 static int sh_eth_phy_init(struct net_device *ndev)
1801 struct device_node *np = ndev->dev.parent->of_node;
1802 struct sh_eth_private *mdp = netdev_priv(ndev);
1803 struct phy_device *phydev;
1809 /* Try connect to PHY */
1811 struct device_node *pn;
1813 pn = of_parse_phandle(np, "phy-handle", 0);
1814 phydev = of_phy_connect(ndev, pn,
1815 sh_eth_adjust_link, 0,
1816 mdp->phy_interface);
1820 phydev = ERR_PTR(-ENOENT);
1822 char phy_id[MII_BUS_ID_SIZE + 3];
1824 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1825 mdp->mii_bus->id, mdp->phy_id);
1827 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1828 mdp->phy_interface);
1831 if (IS_ERR(phydev)) {
1832 netdev_err(ndev, "failed to connect PHY\n");
1833 return PTR_ERR(phydev);
1836 phy_attached_info(phydev);
1841 /* PHY control start function */
1842 static int sh_eth_phy_start(struct net_device *ndev)
1846 ret = sh_eth_phy_init(ndev);
1850 phy_start(ndev->phydev);
1855 static int sh_eth_get_link_ksettings(struct net_device *ndev,
1856 struct ethtool_link_ksettings *cmd)
1858 struct sh_eth_private *mdp = netdev_priv(ndev);
1859 unsigned long flags;
1865 spin_lock_irqsave(&mdp->lock, flags);
1866 ret = phy_ethtool_ksettings_get(ndev->phydev, cmd);
1867 spin_unlock_irqrestore(&mdp->lock, flags);
1872 static int sh_eth_set_link_ksettings(struct net_device *ndev,
1873 const struct ethtool_link_ksettings *cmd)
1878 return phy_ethtool_ksettings_set(ndev->phydev, cmd);
1881 /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
1882 * version must be bumped as well. Just adding registers up to that
1883 * limit is fine, as long as the existing register indices don't
1886 #define SH_ETH_REG_DUMP_VERSION 1
1887 #define SH_ETH_REG_DUMP_MAX_REGS 256
1889 static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
1891 struct sh_eth_private *mdp = netdev_priv(ndev);
1892 struct sh_eth_cpu_data *cd = mdp->cd;
1896 BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
1898 /* Dump starts with a bitmap that tells ethtool which
1899 * registers are defined for this chip.
1901 len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
1909 /* Add a register to the dump, if it has a defined offset.
1910 * This automatically skips most undefined registers, but for
1911 * some it is also necessary to check a capability flag in
1912 * struct sh_eth_cpu_data.
1914 #define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
1915 #define add_reg_from(reg, read_expr) do { \
1916 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \
1918 mark_reg_valid(reg); \
1919 *buf++ = read_expr; \
1924 #define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
1925 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
1997 add_tsu_reg(TSU_CTRST);
1998 add_tsu_reg(TSU_FWEN0);
1999 add_tsu_reg(TSU_FWEN1);
2000 add_tsu_reg(TSU_FCM);
2001 add_tsu_reg(TSU_BSYSL0);
2002 add_tsu_reg(TSU_BSYSL1);
2003 add_tsu_reg(TSU_PRISL0);
2004 add_tsu_reg(TSU_PRISL1);
2005 add_tsu_reg(TSU_FWSL0);
2006 add_tsu_reg(TSU_FWSL1);
2007 add_tsu_reg(TSU_FWSLC);
2008 add_tsu_reg(TSU_QTAG0);
2009 add_tsu_reg(TSU_QTAG1);
2010 add_tsu_reg(TSU_QTAGM0);
2011 add_tsu_reg(TSU_QTAGM1);
2012 add_tsu_reg(TSU_FWSR);
2013 add_tsu_reg(TSU_FWINMK);
2014 add_tsu_reg(TSU_ADQT0);
2015 add_tsu_reg(TSU_ADQT1);
2016 add_tsu_reg(TSU_VTAG0);
2017 add_tsu_reg(TSU_VTAG1);
2018 add_tsu_reg(TSU_ADSBSY);
2019 add_tsu_reg(TSU_TEN);
2020 add_tsu_reg(TSU_POST1);
2021 add_tsu_reg(TSU_POST2);
2022 add_tsu_reg(TSU_POST3);
2023 add_tsu_reg(TSU_POST4);
2024 if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) {
2025 /* This is the start of a table, not just a single
2031 mark_reg_valid(TSU_ADRH0);
2032 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
2035 mdp->reg_offset[TSU_ADRH0] +
2038 len += SH_ETH_TSU_CAM_ENTRIES * 2;
2042 #undef mark_reg_valid
2050 static int sh_eth_get_regs_len(struct net_device *ndev)
2052 return __sh_eth_get_regs(ndev, NULL);
2055 static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2058 struct sh_eth_private *mdp = netdev_priv(ndev);
2060 regs->version = SH_ETH_REG_DUMP_VERSION;
2062 pm_runtime_get_sync(&mdp->pdev->dev);
2063 __sh_eth_get_regs(ndev, buf);
2064 pm_runtime_put_sync(&mdp->pdev->dev);
2067 static int sh_eth_nway_reset(struct net_device *ndev)
2072 return phy_start_aneg(ndev->phydev);
2075 static u32 sh_eth_get_msglevel(struct net_device *ndev)
2077 struct sh_eth_private *mdp = netdev_priv(ndev);
2078 return mdp->msg_enable;
2081 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
2083 struct sh_eth_private *mdp = netdev_priv(ndev);
2084 mdp->msg_enable = value;
2087 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
2088 "rx_current", "tx_current",
2089 "rx_dirty", "tx_dirty",
2091 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
2093 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
2097 return SH_ETH_STATS_LEN;
2103 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
2104 struct ethtool_stats *stats, u64 *data)
2106 struct sh_eth_private *mdp = netdev_priv(ndev);
2109 /* device-specific stats */
2110 data[i++] = mdp->cur_rx;
2111 data[i++] = mdp->cur_tx;
2112 data[i++] = mdp->dirty_rx;
2113 data[i++] = mdp->dirty_tx;
2116 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
2118 switch (stringset) {
2120 memcpy(data, sh_eth_gstrings_stats,
2121 sizeof(sh_eth_gstrings_stats));
2126 static void sh_eth_get_ringparam(struct net_device *ndev,
2127 struct ethtool_ringparam *ring)
2129 struct sh_eth_private *mdp = netdev_priv(ndev);
2131 ring->rx_max_pending = RX_RING_MAX;
2132 ring->tx_max_pending = TX_RING_MAX;
2133 ring->rx_pending = mdp->num_rx_ring;
2134 ring->tx_pending = mdp->num_tx_ring;
2137 static int sh_eth_set_ringparam(struct net_device *ndev,
2138 struct ethtool_ringparam *ring)
2140 struct sh_eth_private *mdp = netdev_priv(ndev);
2143 if (ring->tx_pending > TX_RING_MAX ||
2144 ring->rx_pending > RX_RING_MAX ||
2145 ring->tx_pending < TX_RING_MIN ||
2146 ring->rx_pending < RX_RING_MIN)
2148 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2151 if (netif_running(ndev)) {
2152 netif_device_detach(ndev);
2153 netif_tx_disable(ndev);
2155 /* Serialise with the interrupt handler and NAPI, then
2156 * disable interrupts. We have to clear the
2157 * irq_enabled flag first to ensure that interrupts
2158 * won't be re-enabled.
2160 mdp->irq_enabled = false;
2161 synchronize_irq(ndev->irq);
2162 napi_synchronize(&mdp->napi);
2163 sh_eth_write(ndev, 0x0000, EESIPR);
2165 sh_eth_dev_exit(ndev);
2167 /* Free all the skbuffs in the Rx queue and the DMA buffers. */
2168 sh_eth_ring_free(ndev);
2171 /* Set new parameters */
2172 mdp->num_rx_ring = ring->rx_pending;
2173 mdp->num_tx_ring = ring->tx_pending;
2175 if (netif_running(ndev)) {
2176 ret = sh_eth_ring_init(ndev);
2178 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2182 ret = sh_eth_dev_init(ndev);
2184 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2189 netif_device_attach(ndev);
2195 static const struct ethtool_ops sh_eth_ethtool_ops = {
2196 .get_regs_len = sh_eth_get_regs_len,
2197 .get_regs = sh_eth_get_regs,
2198 .nway_reset = sh_eth_nway_reset,
2199 .get_msglevel = sh_eth_get_msglevel,
2200 .set_msglevel = sh_eth_set_msglevel,
2201 .get_link = ethtool_op_get_link,
2202 .get_strings = sh_eth_get_strings,
2203 .get_ethtool_stats = sh_eth_get_ethtool_stats,
2204 .get_sset_count = sh_eth_get_sset_count,
2205 .get_ringparam = sh_eth_get_ringparam,
2206 .set_ringparam = sh_eth_set_ringparam,
2207 .get_link_ksettings = sh_eth_get_link_ksettings,
2208 .set_link_ksettings = sh_eth_set_link_ksettings,
2211 /* network device open function */
2212 static int sh_eth_open(struct net_device *ndev)
2214 struct sh_eth_private *mdp = netdev_priv(ndev);
2217 pm_runtime_get_sync(&mdp->pdev->dev);
2219 napi_enable(&mdp->napi);
2221 ret = request_irq(ndev->irq, sh_eth_interrupt,
2222 mdp->cd->irq_flags, ndev->name, ndev);
2224 netdev_err(ndev, "Can not assign IRQ number\n");
2228 /* Descriptor set */
2229 ret = sh_eth_ring_init(ndev);
2234 ret = sh_eth_dev_init(ndev);
2238 /* PHY control start*/
2239 ret = sh_eth_phy_start(ndev);
2243 netif_start_queue(ndev);
2250 free_irq(ndev->irq, ndev);
2252 napi_disable(&mdp->napi);
2253 pm_runtime_put_sync(&mdp->pdev->dev);
2257 /* Timeout function */
2258 static void sh_eth_tx_timeout(struct net_device *ndev)
2260 struct sh_eth_private *mdp = netdev_priv(ndev);
2261 struct sh_eth_rxdesc *rxdesc;
2264 netif_stop_queue(ndev);
2266 netif_err(mdp, timer, ndev,
2267 "transmit timed out, status %8.8x, resetting...\n",
2268 sh_eth_read(ndev, EESR));
2270 /* tx_errors count up */
2271 ndev->stats.tx_errors++;
2273 /* Free all the skbuffs in the Rx queue. */
2274 for (i = 0; i < mdp->num_rx_ring; i++) {
2275 rxdesc = &mdp->rx_ring[i];
2276 rxdesc->status = cpu_to_le32(0);
2277 rxdesc->addr = cpu_to_le32(0xBADF00D0);
2278 dev_kfree_skb(mdp->rx_skbuff[i]);
2279 mdp->rx_skbuff[i] = NULL;
2281 for (i = 0; i < mdp->num_tx_ring; i++) {
2282 dev_kfree_skb(mdp->tx_skbuff[i]);
2283 mdp->tx_skbuff[i] = NULL;
2287 sh_eth_dev_init(ndev);
2289 netif_start_queue(ndev);
2292 /* Packet transmit function */
2293 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2295 struct sh_eth_private *mdp = netdev_priv(ndev);
2296 struct sh_eth_txdesc *txdesc;
2297 dma_addr_t dma_addr;
2299 unsigned long flags;
2301 spin_lock_irqsave(&mdp->lock, flags);
2302 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2303 if (!sh_eth_tx_free(ndev, true)) {
2304 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2305 netif_stop_queue(ndev);
2306 spin_unlock_irqrestore(&mdp->lock, flags);
2307 return NETDEV_TX_BUSY;
2310 spin_unlock_irqrestore(&mdp->lock, flags);
2312 if (skb_put_padto(skb, ETH_ZLEN))
2313 return NETDEV_TX_OK;
2315 entry = mdp->cur_tx % mdp->num_tx_ring;
2316 mdp->tx_skbuff[entry] = skb;
2317 txdesc = &mdp->tx_ring[entry];
2319 if (!mdp->cd->hw_swap)
2320 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2321 dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2323 if (dma_mapping_error(&ndev->dev, dma_addr)) {
2325 return NETDEV_TX_OK;
2327 txdesc->addr = cpu_to_le32(dma_addr);
2328 txdesc->len = cpu_to_le32(skb->len << 16);
2330 dma_wmb(); /* TACT bit must be set after all the above writes */
2331 if (entry >= mdp->num_tx_ring - 1)
2332 txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
2334 txdesc->status |= cpu_to_le32(TD_TACT);
2336 wmb(); /* cur_tx must be incremented after TACT bit was set */
2339 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2340 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2342 return NETDEV_TX_OK;
2345 /* The statistics registers have write-clear behaviour, which means we
2346 * will lose any increment between the read and write. We mitigate
2347 * this by only clearing when we read a non-zero value, so we will
2348 * never falsely report a total of zero.
2351 sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
2353 u32 delta = sh_eth_read(ndev, reg);
2357 sh_eth_write(ndev, 0, reg);
2361 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2363 struct sh_eth_private *mdp = netdev_priv(ndev);
2365 if (sh_eth_is_rz_fast_ether(mdp))
2366 return &ndev->stats;
2368 if (!mdp->is_opened)
2369 return &ndev->stats;
2371 sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
2372 sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
2373 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
2375 if (sh_eth_is_gether(mdp)) {
2376 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2378 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2381 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2385 return &ndev->stats;
2388 /* device close function */
2389 static int sh_eth_close(struct net_device *ndev)
2391 struct sh_eth_private *mdp = netdev_priv(ndev);
2393 netif_stop_queue(ndev);
2395 /* Serialise with the interrupt handler and NAPI, then disable
2396 * interrupts. We have to clear the irq_enabled flag first to
2397 * ensure that interrupts won't be re-enabled.
2399 mdp->irq_enabled = false;
2400 synchronize_irq(ndev->irq);
2401 napi_disable(&mdp->napi);
2402 sh_eth_write(ndev, 0x0000, EESIPR);
2404 sh_eth_dev_exit(ndev);
2406 /* PHY Disconnect */
2408 phy_stop(ndev->phydev);
2409 phy_disconnect(ndev->phydev);
2412 free_irq(ndev->irq, ndev);
2414 /* Free all the skbuffs in the Rx queue and the DMA buffer. */
2415 sh_eth_ring_free(ndev);
2419 pm_runtime_put(&mdp->pdev->dev);
2424 /* ioctl to device function */
2425 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2427 struct phy_device *phydev = ndev->phydev;
2429 if (!netif_running(ndev))
2435 return phy_mii_ioctl(phydev, rq, cmd);
2438 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2439 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2442 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2445 static u32 sh_eth_tsu_get_post_mask(int entry)
2447 return 0x0f << (28 - ((entry % 8) * 4));
2450 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2452 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2455 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2458 struct sh_eth_private *mdp = netdev_priv(ndev);
2462 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2463 tmp = ioread32(reg_offset);
2464 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2467 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2470 struct sh_eth_private *mdp = netdev_priv(ndev);
2471 u32 post_mask, ref_mask, tmp;
2474 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2475 post_mask = sh_eth_tsu_get_post_mask(entry);
2476 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2478 tmp = ioread32(reg_offset);
2479 iowrite32(tmp & ~post_mask, reg_offset);
2481 /* If other port enables, the function returns "true" */
2482 return tmp & ref_mask;
2485 static int sh_eth_tsu_busy(struct net_device *ndev)
2487 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2488 struct sh_eth_private *mdp = netdev_priv(ndev);
2490 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2494 netdev_err(ndev, "%s: timeout\n", __func__);
2502 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2507 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2508 iowrite32(val, reg);
2509 if (sh_eth_tsu_busy(ndev) < 0)
2512 val = addr[4] << 8 | addr[5];
2513 iowrite32(val, reg + 4);
2514 if (sh_eth_tsu_busy(ndev) < 0)
2520 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2524 val = ioread32(reg);
2525 addr[0] = (val >> 24) & 0xff;
2526 addr[1] = (val >> 16) & 0xff;
2527 addr[2] = (val >> 8) & 0xff;
2528 addr[3] = val & 0xff;
2529 val = ioread32(reg + 4);
2530 addr[4] = (val >> 8) & 0xff;
2531 addr[5] = val & 0xff;
2535 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2537 struct sh_eth_private *mdp = netdev_priv(ndev);
2538 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2540 u8 c_addr[ETH_ALEN];
2542 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2543 sh_eth_tsu_read_entry(reg_offset, c_addr);
2544 if (ether_addr_equal(addr, c_addr))
2551 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2556 memset(blank, 0, sizeof(blank));
2557 entry = sh_eth_tsu_find_entry(ndev, blank);
2558 return (entry < 0) ? -ENOMEM : entry;
2561 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2564 struct sh_eth_private *mdp = netdev_priv(ndev);
2565 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2569 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2570 ~(1 << (31 - entry)), TSU_TEN);
2572 memset(blank, 0, sizeof(blank));
2573 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2579 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2581 struct sh_eth_private *mdp = netdev_priv(ndev);
2582 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2588 i = sh_eth_tsu_find_entry(ndev, addr);
2590 /* No entry found, create one */
2591 i = sh_eth_tsu_find_empty(ndev);
2594 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2598 /* Enable the entry */
2599 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2600 (1 << (31 - i)), TSU_TEN);
2603 /* Entry found or created, enable POST */
2604 sh_eth_tsu_enable_cam_entry_post(ndev, i);
2609 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2611 struct sh_eth_private *mdp = netdev_priv(ndev);
2617 i = sh_eth_tsu_find_entry(ndev, addr);
2620 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2623 /* Disable the entry if both ports was disabled */
2624 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2632 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2634 struct sh_eth_private *mdp = netdev_priv(ndev);
2640 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2641 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2644 /* Disable the entry if both ports was disabled */
2645 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2653 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2655 struct sh_eth_private *mdp = netdev_priv(ndev);
2657 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2663 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2664 sh_eth_tsu_read_entry(reg_offset, addr);
2665 if (is_multicast_ether_addr(addr))
2666 sh_eth_tsu_del_entry(ndev, addr);
2670 /* Update promiscuous flag and multicast filter */
2671 static void sh_eth_set_rx_mode(struct net_device *ndev)
2673 struct sh_eth_private *mdp = netdev_priv(ndev);
2676 unsigned long flags;
2678 spin_lock_irqsave(&mdp->lock, flags);
2679 /* Initial condition is MCT = 1, PRM = 0.
2680 * Depending on ndev->flags, set PRM or clear MCT
2682 ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2684 ecmr_bits |= ECMR_MCT;
2686 if (!(ndev->flags & IFF_MULTICAST)) {
2687 sh_eth_tsu_purge_mcast(ndev);
2690 if (ndev->flags & IFF_ALLMULTI) {
2691 sh_eth_tsu_purge_mcast(ndev);
2692 ecmr_bits &= ~ECMR_MCT;
2696 if (ndev->flags & IFF_PROMISC) {
2697 sh_eth_tsu_purge_all(ndev);
2698 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2699 } else if (mdp->cd->tsu) {
2700 struct netdev_hw_addr *ha;
2701 netdev_for_each_mc_addr(ha, ndev) {
2702 if (mcast_all && is_multicast_ether_addr(ha->addr))
2705 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2707 sh_eth_tsu_purge_mcast(ndev);
2708 ecmr_bits &= ~ECMR_MCT;
2715 /* update the ethernet mode */
2716 sh_eth_write(ndev, ecmr_bits, ECMR);
2718 spin_unlock_irqrestore(&mdp->lock, flags);
2721 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2729 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2730 __be16 proto, u16 vid)
2732 struct sh_eth_private *mdp = netdev_priv(ndev);
2733 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2735 if (unlikely(!mdp->cd->tsu))
2738 /* No filtering if vid = 0 */
2742 mdp->vlan_num_ids++;
2744 /* The controller has one VLAN tag HW filter. So, if the filter is
2745 * already enabled, the driver disables it and the filte
2747 if (mdp->vlan_num_ids > 1) {
2748 /* disable VLAN filter */
2749 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2753 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2759 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2760 __be16 proto, u16 vid)
2762 struct sh_eth_private *mdp = netdev_priv(ndev);
2763 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2765 if (unlikely(!mdp->cd->tsu))
2768 /* No filtering if vid = 0 */
2772 mdp->vlan_num_ids--;
2773 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2778 /* SuperH's TSU register init function */
2779 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2781 if (sh_eth_is_rz_fast_ether(mdp)) {
2782 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2783 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
2784 TSU_FWSLC); /* Enable POST registers */
2788 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
2789 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
2790 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
2791 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2792 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2793 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2794 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2795 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2796 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2797 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2798 if (sh_eth_is_gether(mdp)) {
2799 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
2800 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
2802 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
2803 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
2805 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
2806 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
2807 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2808 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
2809 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
2810 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
2811 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
2814 /* MDIO bus release function */
2815 static int sh_mdio_release(struct sh_eth_private *mdp)
2817 /* unregister mdio bus */
2818 mdiobus_unregister(mdp->mii_bus);
2820 /* free bitbang info */
2821 free_mdio_bitbang(mdp->mii_bus);
2826 /* MDIO bus init function */
2827 static int sh_mdio_init(struct sh_eth_private *mdp,
2828 struct sh_eth_plat_data *pd)
2831 struct bb_info *bitbang;
2832 struct platform_device *pdev = mdp->pdev;
2833 struct device *dev = &mdp->pdev->dev;
2835 /* create bit control struct for PHY */
2836 bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2841 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2842 bitbang->set_gate = pd->set_mdio_gate;
2843 bitbang->ctrl.ops = &bb_ops;
2845 /* MII controller setting */
2846 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2850 /* Hook up MII support for ethtool */
2851 mdp->mii_bus->name = "sh_mii";
2852 mdp->mii_bus->parent = dev;
2853 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2854 pdev->name, pdev->id);
2856 /* register MDIO bus */
2858 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2860 if (pd->phy_irq > 0)
2861 mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2863 ret = mdiobus_register(mdp->mii_bus);
2872 free_mdio_bitbang(mdp->mii_bus);
2876 static const u16 *sh_eth_get_register_offset(int register_type)
2878 const u16 *reg_offset = NULL;
2880 switch (register_type) {
2881 case SH_ETH_REG_GIGABIT:
2882 reg_offset = sh_eth_offset_gigabit;
2884 case SH_ETH_REG_FAST_RZ:
2885 reg_offset = sh_eth_offset_fast_rz;
2887 case SH_ETH_REG_FAST_RCAR:
2888 reg_offset = sh_eth_offset_fast_rcar;
2890 case SH_ETH_REG_FAST_SH4:
2891 reg_offset = sh_eth_offset_fast_sh4;
2893 case SH_ETH_REG_FAST_SH3_SH2:
2894 reg_offset = sh_eth_offset_fast_sh3_sh2;
2901 static const struct net_device_ops sh_eth_netdev_ops = {
2902 .ndo_open = sh_eth_open,
2903 .ndo_stop = sh_eth_close,
2904 .ndo_start_xmit = sh_eth_start_xmit,
2905 .ndo_get_stats = sh_eth_get_stats,
2906 .ndo_set_rx_mode = sh_eth_set_rx_mode,
2907 .ndo_tx_timeout = sh_eth_tx_timeout,
2908 .ndo_do_ioctl = sh_eth_do_ioctl,
2909 .ndo_validate_addr = eth_validate_addr,
2910 .ndo_set_mac_address = eth_mac_addr,
2911 .ndo_change_mtu = eth_change_mtu,
2914 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2915 .ndo_open = sh_eth_open,
2916 .ndo_stop = sh_eth_close,
2917 .ndo_start_xmit = sh_eth_start_xmit,
2918 .ndo_get_stats = sh_eth_get_stats,
2919 .ndo_set_rx_mode = sh_eth_set_rx_mode,
2920 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
2921 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
2922 .ndo_tx_timeout = sh_eth_tx_timeout,
2923 .ndo_do_ioctl = sh_eth_do_ioctl,
2924 .ndo_validate_addr = eth_validate_addr,
2925 .ndo_set_mac_address = eth_mac_addr,
2926 .ndo_change_mtu = eth_change_mtu,
2930 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2932 struct device_node *np = dev->of_node;
2933 struct sh_eth_plat_data *pdata;
2934 const char *mac_addr;
2937 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2941 ret = of_get_phy_mode(np);
2944 pdata->phy_interface = ret;
2946 mac_addr = of_get_mac_address(np);
2948 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
2950 pdata->no_ether_link =
2951 of_property_read_bool(np, "renesas,no-ether-link");
2952 pdata->ether_link_active_low =
2953 of_property_read_bool(np, "renesas,ether-link-active-low");
2958 static const struct of_device_id sh_eth_match_table[] = {
2959 { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
2960 { .compatible = "renesas,ether-r8a7743", .data = &r8a779x_data },
2961 { .compatible = "renesas,ether-r8a7745", .data = &r8a779x_data },
2962 { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
2963 { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
2964 { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
2965 { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
2966 { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data },
2967 { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
2968 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
2971 MODULE_DEVICE_TABLE(of, sh_eth_match_table);
2973 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2979 static int sh_eth_drv_probe(struct platform_device *pdev)
2981 struct resource *res;
2982 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
2983 const struct platform_device_id *id = platform_get_device_id(pdev);
2984 struct sh_eth_private *mdp;
2985 struct net_device *ndev;
2989 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2991 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2995 pm_runtime_enable(&pdev->dev);
2996 pm_runtime_get_sync(&pdev->dev);
3002 ret = platform_get_irq(pdev, 0);
3007 SET_NETDEV_DEV(ndev, &pdev->dev);
3009 mdp = netdev_priv(ndev);
3010 mdp->num_tx_ring = TX_RING_SIZE;
3011 mdp->num_rx_ring = RX_RING_SIZE;
3012 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
3013 if (IS_ERR(mdp->addr)) {
3014 ret = PTR_ERR(mdp->addr);
3018 ndev->base_addr = res->start;
3020 spin_lock_init(&mdp->lock);
3023 if (pdev->dev.of_node)
3024 pd = sh_eth_parse_dt(&pdev->dev);
3026 dev_err(&pdev->dev, "no platform data\n");
3032 mdp->phy_id = pd->phy;
3033 mdp->phy_interface = pd->phy_interface;
3034 mdp->no_ether_link = pd->no_ether_link;
3035 mdp->ether_link_active_low = pd->ether_link_active_low;
3039 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3041 mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
3043 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3044 if (!mdp->reg_offset) {
3045 dev_err(&pdev->dev, "Unknown register type (%d)\n",
3046 mdp->cd->register_type);
3050 sh_eth_set_default_cpu_data(mdp->cd);
3054 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
3056 ndev->netdev_ops = &sh_eth_netdev_ops;
3057 ndev->ethtool_ops = &sh_eth_ethtool_ops;
3058 ndev->watchdog_timeo = TX_TIMEOUT;
3060 /* debug message level */
3061 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3063 /* read and set MAC address */
3064 read_mac_address(ndev, pd->mac_addr);
3065 if (!is_valid_ether_addr(ndev->dev_addr)) {
3066 dev_warn(&pdev->dev,
3067 "no valid MAC address supplied, using a random one.\n");
3068 eth_hw_addr_random(ndev);
3071 /* ioremap the TSU registers */
3073 struct resource *rtsu;
3075 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3077 dev_err(&pdev->dev, "no TSU resource\n");
3081 /* We can only request the TSU region for the first port
3082 * of the two sharing this TSU for the probe to succeed...
3084 if (devno % 2 == 0 &&
3085 !devm_request_mem_region(&pdev->dev, rtsu->start,
3086 resource_size(rtsu),
3087 dev_name(&pdev->dev))) {
3088 dev_err(&pdev->dev, "can't request TSU resource.\n");
3092 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3093 resource_size(rtsu));
3094 if (!mdp->tsu_addr) {
3095 dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
3099 mdp->port = devno % 2;
3100 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
3103 /* Need to init only the first port of the two sharing a TSU */
3104 if (devno % 2 == 0) {
3105 if (mdp->cd->chip_reset)
3106 mdp->cd->chip_reset(ndev);
3109 /* TSU init (Init only)*/
3110 sh_eth_tsu_init(mdp);
3114 if (mdp->cd->rmiimode)
3115 sh_eth_write(ndev, 0x1, RMIIMODE);
3118 ret = sh_mdio_init(mdp, pd);
3120 dev_err(&pdev->dev, "failed to initialise MDIO\n");
3124 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
3126 /* network device register */
3127 ret = register_netdev(ndev);
3131 /* print device information */
3132 netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
3133 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3135 pm_runtime_put(&pdev->dev);
3136 platform_set_drvdata(pdev, ndev);
3141 netif_napi_del(&mdp->napi);
3142 sh_mdio_release(mdp);
3149 pm_runtime_put(&pdev->dev);
3150 pm_runtime_disable(&pdev->dev);
3154 static int sh_eth_drv_remove(struct platform_device *pdev)
3156 struct net_device *ndev = platform_get_drvdata(pdev);
3157 struct sh_eth_private *mdp = netdev_priv(ndev);
3159 unregister_netdev(ndev);
3160 netif_napi_del(&mdp->napi);
3161 sh_mdio_release(mdp);
3162 pm_runtime_disable(&pdev->dev);
3169 #ifdef CONFIG_PM_SLEEP
3170 static int sh_eth_suspend(struct device *dev)
3172 struct net_device *ndev = dev_get_drvdata(dev);
3175 if (netif_running(ndev)) {
3176 netif_device_detach(ndev);
3177 ret = sh_eth_close(ndev);
3183 static int sh_eth_resume(struct device *dev)
3185 struct net_device *ndev = dev_get_drvdata(dev);
3188 if (netif_running(ndev)) {
3189 ret = sh_eth_open(ndev);
3192 netif_device_attach(ndev);
3199 static int sh_eth_runtime_nop(struct device *dev)
3201 /* Runtime PM callback shared between ->runtime_suspend()
3202 * and ->runtime_resume(). Simply returns success.
3204 * This driver re-initializes all registers after
3205 * pm_runtime_get_sync() anyway so there is no need
3206 * to save and restore registers here.
3211 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3212 SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3213 SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3215 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3217 #define SH_ETH_PM_OPS NULL
3220 static struct platform_device_id sh_eth_id_table[] = {
3221 { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3222 { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3223 { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3224 { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3225 { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3226 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3227 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3230 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3232 static struct platform_driver sh_eth_driver = {
3233 .probe = sh_eth_drv_probe,
3234 .remove = sh_eth_drv_remove,
3235 .id_table = sh_eth_id_table,
3238 .pm = SH_ETH_PM_OPS,
3239 .of_match_table = of_match_ptr(sh_eth_match_table),
3243 module_platform_driver(sh_eth_driver);
3245 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3246 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3247 MODULE_LICENSE("GPL v2");