1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
7 * Haijun Liu <haijun.liu@mediatek.com>
8 * Moises Veleta <moises.veleta@intel.com>
9 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
12 * Amir Hanania <amir.hanania@intel.com>
13 * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
14 * Eliot Lee <eliot.lee@intel.com>
15 * Sreehari Kancharla <sreehari.kancharla@intel.com>
18 #include <linux/bits.h>
19 #include <linux/delay.h>
21 #include <linux/io-64-nonatomic-lo-hi.h>
22 #include <linux/types.h>
24 #include "t7xx_cldma.h"
28 void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info)
32 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
33 val |= IP_BUSY_WAKEUP;
34 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
38 * t7xx_cldma_hw_restore() - Restore CLDMA HW registers.
39 * @hw_info: Pointer to struct t7xx_cldma_hw.
41 * Restore HW after resume. Writes uplink configuration for CLDMA HW.
43 void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info)
47 ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
48 ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
50 if (hw_info->hw_mode == MODE_BIT_64)
51 ul_cfg |= UL_CFG_BIT_MODE_64;
52 else if (hw_info->hw_mode == MODE_BIT_40)
53 ul_cfg |= UL_CFG_BIT_MODE_40;
54 else if (hw_info->hw_mode == MODE_BIT_36)
55 ul_cfg |= UL_CFG_BIT_MODE_36;
57 iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
58 /* Disable TX and RX invalid address check */
59 iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
60 iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
63 void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
69 reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_START_CMD :
70 hw_info->ap_pdn_base + REG_CLDMA_UL_START_CMD;
71 val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
75 void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info)
77 /* Enable the TX & RX interrupts */
78 iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
79 iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
80 /* Enable the empty queue interrupt */
81 iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
82 iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
85 void t7xx_cldma_hw_reset(void __iomem *ao_base)
89 val = ioread32(ao_base + REG_INFRA_RST2_SET);
90 val |= RST2_PMIC_SW_RST_SET;
91 iowrite32(val, ao_base + REG_INFRA_RST2_SET);
92 val = ioread32(ao_base + REG_INFRA_RST4_SET);
93 val |= RST4_CLDMA1_SW_RST_SET;
94 iowrite32(val, ao_base + REG_INFRA_RST4_SET);
97 val = ioread32(ao_base + REG_INFRA_RST4_CLR);
98 val |= RST4_CLDMA1_SW_RST_CLR;
99 iowrite32(val, ao_base + REG_INFRA_RST4_CLR);
100 val = ioread32(ao_base + REG_INFRA_RST2_CLR);
101 val |= RST2_PMIC_SW_RST_CLR;
102 iowrite32(val, ao_base + REG_INFRA_RST2_CLR);
105 bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
107 u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
109 return ioread64_lo_hi(hw_info->ap_pdn_base + offset);
112 void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
115 u32 offset = qno * ADDR_SIZE;
118 reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
119 hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
120 iowrite64_lo_hi(address, reg + offset);
123 void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
126 void __iomem *base = hw_info->ap_pdn_base;
129 iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD);
131 iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD);
134 unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
140 mask = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
141 reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_STATUS :
142 hw_info->ap_pdn_base + REG_CLDMA_UL_STATUS;
148 void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
152 ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
154 /* Clear the ch IDs in the TX interrupt status register */
155 iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
156 ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
159 void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
163 ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
165 /* Clear the ch IDs in the RX interrupt status register */
166 iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
167 ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
170 unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask,
176 reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0 :
177 hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0;
179 return val & bitmask;
182 void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
188 reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
189 hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
190 val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
194 void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
199 reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
200 hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
201 val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
202 iowrite32(val << EQ_STA_BIT_OFFSET, reg);
205 void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
211 reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
212 hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
213 val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
217 void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
222 reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
223 hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
224 val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
225 iowrite32(val << EQ_STA_BIT_OFFSET, reg);
229 * t7xx_cldma_hw_init() - Initialize CLDMA HW.
230 * @hw_info: Pointer to struct t7xx_cldma_hw.
232 * Write uplink and downlink configuration to CLDMA HW.
234 void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info)
238 ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
239 dl_cfg = ioread32(hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
240 /* Configure the DRAM address mode */
241 ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
242 dl_cfg &= ~DL_CFG_BIT_MODE_MASK;
244 if (hw_info->hw_mode == MODE_BIT_64) {
245 ul_cfg |= UL_CFG_BIT_MODE_64;
246 dl_cfg |= DL_CFG_BIT_MODE_64;
247 } else if (hw_info->hw_mode == MODE_BIT_40) {
248 ul_cfg |= UL_CFG_BIT_MODE_40;
249 dl_cfg |= DL_CFG_BIT_MODE_40;
250 } else if (hw_info->hw_mode == MODE_BIT_36) {
251 ul_cfg |= UL_CFG_BIT_MODE_36;
252 dl_cfg |= DL_CFG_BIT_MODE_36;
255 iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
256 dl_cfg |= DL_CFG_UP_HW_LAST;
257 iowrite32(dl_cfg, hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
258 iowrite32(0, hw_info->ap_ao_base + REG_CLDMA_INT_MASK);
259 iowrite32(BUSY_MASK_MD, hw_info->ap_ao_base + REG_CLDMA_BUSY_MASK);
260 iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
261 iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
264 void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
268 reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_STOP_CMD :
269 hw_info->ap_pdn_base + REG_CLDMA_UL_STOP_CMD;
270 iowrite32(CLDMA_ALL_Q, reg);
273 void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
277 reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
278 hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
279 iowrite32(TXRX_STATUS_BITMASK, reg);
280 iowrite32(EMPTY_STATUS_BITMASK, reg);