1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
4 * Copyright (C) 2018-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
10 #define HW_ATL_PTP_DISABLE_MSK BIT(10)
12 bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw)
17 err = readx_poll_timeout_atomic(hw_atl_mdio_busy_get, aq_hw,
18 val, val == 0U, 10U, 100000U);
26 u16 aq_mdio_read_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr)
28 u16 phy_addr = aq_hw->phy_id << 5 | mmd;
30 /* Set Address register. */
31 hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
32 HW_ATL_MDIO_ADDRESS_SHIFT);
33 /* Send Address command. */
34 hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
35 (3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
36 ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
37 HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
39 aq_mdio_busy_wait(aq_hw);
41 /* Send Read command. */
42 hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
43 (1 << HW_ATL_MDIO_OP_MODE_SHIFT) |
44 ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
45 HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
47 aq_mdio_busy_wait(aq_hw);
49 return (u16)hw_atl_glb_mdio_iface5_get(aq_hw);
52 void aq_mdio_write_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr, u16 data)
54 u16 phy_addr = aq_hw->phy_id << 5 | mmd;
56 /* Set Address register. */
57 hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
58 HW_ATL_MDIO_ADDRESS_SHIFT);
59 /* Send Address command. */
60 hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
61 (3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
62 ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
63 HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
65 aq_mdio_busy_wait(aq_hw);
67 hw_atl_glb_mdio_iface3_set(aq_hw, (data & HW_ATL_MDIO_WRITE_DATA_MSK) <<
68 HW_ATL_MDIO_WRITE_DATA_SHIFT);
69 /* Send Write command. */
70 hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
71 (2 << HW_ATL_MDIO_OP_MODE_SHIFT) |
72 ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
73 HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
75 aq_mdio_busy_wait(aq_hw);
78 u16 aq_phy_read_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address)
83 err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
84 val, val == 1U, 10U, 100000U);
91 err = aq_mdio_read_word(aq_hw, mmd, address);
93 hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
99 void aq_phy_write_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address, u16 data)
104 err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
105 val, val == 1U, 10U, 100000U);
109 aq_mdio_write_word(aq_hw, mmd, address, data);
110 hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
113 bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw)
117 for (aq_hw->phy_id = 0; aq_hw->phy_id < HW_ATL_PHY_ID_MAX;
119 /* PMA Standard Device Identifier 2: Address 1.3 */
120 val = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
129 bool aq_phy_init(struct aq_hw_s *aq_hw)
133 if (aq_hw->phy_id == HW_ATL_PHY_ID_MAX)
134 if (!aq_phy_init_phy_id(aq_hw))
137 /* PMA Standard Device Identifier:
141 dev_id = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 2);
143 dev_id |= aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
145 if (dev_id == 0xffffffff) {
146 aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
153 void aq_phy_disable_ptp(struct aq_hw_s *aq_hw)
155 static const u16 ptp_registers[] = {
164 for (i = 0; i < ARRAY_SIZE(ptp_registers); i++) {
165 val = aq_phy_read_reg(aq_hw, MDIO_MMD_VEND1,
168 aq_phy_write_reg(aq_hw, MDIO_MMD_VEND1,
170 val & ~HW_ATL_PTP_DISABLE_MSK);