1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (c) 2018 MediaTek Inc.
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
11 #include <linux/soc/mediatek/mtk-cmdq.h>
13 #define CMDQ_WRITE_ENABLE_MASK BIT(0)
14 #define CMDQ_POLL_ENABLE_MASK BIT(0)
15 #define CMDQ_EOC_IRQ_EN BIT(0)
16 #define CMDQ_REG_TYPE 1
17 #define CMDQ_JUMP_RELATIVE 1
19 struct cmdq_instruction {
45 int cmdq_dev_get_client_reg(struct device *dev,
46 struct cmdq_client_reg *client_reg, int idx)
48 struct of_phandle_args spec;
54 err = of_parse_phandle_with_fixed_args(dev->of_node,
55 "mediatek,gce-client-reg",
59 "error %d can't parse gce-client-reg property (%d)",
65 client_reg->subsys = (u8)spec.args[0];
66 client_reg->offset = (u16)spec.args[1];
67 client_reg->size = (u16)spec.args[2];
72 EXPORT_SYMBOL(cmdq_dev_get_client_reg);
74 struct cmdq_client *cmdq_mbox_create(struct device *dev, int index)
76 struct cmdq_client *client;
78 client = kzalloc(sizeof(*client), GFP_KERNEL);
80 return (struct cmdq_client *)-ENOMEM;
82 client->client.dev = dev;
83 client->client.tx_block = false;
84 client->client.knows_txdone = true;
85 client->chan = mbox_request_channel(&client->client, index);
87 if (IS_ERR(client->chan)) {
90 dev_err(dev, "failed to request channel\n");
91 err = PTR_ERR(client->chan);
99 EXPORT_SYMBOL(cmdq_mbox_create);
101 void cmdq_mbox_destroy(struct cmdq_client *client)
103 mbox_free_channel(client->chan);
106 EXPORT_SYMBOL(cmdq_mbox_destroy);
108 struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
110 struct cmdq_pkt *pkt;
114 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
116 return ERR_PTR(-ENOMEM);
117 pkt->va_base = kzalloc(size, GFP_KERNEL);
120 return ERR_PTR(-ENOMEM);
122 pkt->buf_size = size;
123 pkt->cl = (void *)client;
125 dev = client->chan->mbox->dev;
126 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
128 if (dma_mapping_error(dev, dma_addr)) {
129 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
132 return ERR_PTR(-ENOMEM);
135 pkt->pa_base = dma_addr;
139 EXPORT_SYMBOL(cmdq_pkt_create);
141 void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
143 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
145 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
150 EXPORT_SYMBOL(cmdq_pkt_destroy);
152 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
153 struct cmdq_instruction inst)
155 struct cmdq_instruction *cmd_ptr;
157 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
159 * In the case of allocated buffer size (pkt->buf_size) is used
160 * up, the real required size (pkt->cmdq_buf_size) is still
161 * increased, so that the user knows how much memory should be
162 * ultimately allocated after appending all commands and
163 * flushing the command packet. Therefor, the user can call
164 * cmdq_pkt_create() again with the real required buffer size.
166 pkt->cmd_buf_size += CMDQ_INST_SIZE;
167 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
168 __func__, (u32)pkt->buf_size);
172 cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
174 pkt->cmd_buf_size += CMDQ_INST_SIZE;
179 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
181 struct cmdq_instruction inst;
183 inst.op = CMDQ_CODE_WRITE;
185 inst.offset = offset;
186 inst.subsys = subsys;
188 return cmdq_pkt_append_command(pkt, inst);
190 EXPORT_SYMBOL(cmdq_pkt_write);
192 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
193 u16 offset, u32 value, u32 mask)
195 struct cmdq_instruction inst = { {0} };
196 u16 offset_mask = offset;
199 if (mask != 0xffffffff) {
200 inst.op = CMDQ_CODE_MASK;
202 err = cmdq_pkt_append_command(pkt, inst);
206 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
208 err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
212 EXPORT_SYMBOL(cmdq_pkt_write_mask);
214 int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
217 struct cmdq_instruction inst = {};
219 inst.op = CMDQ_CODE_READ_S;
220 inst.dst_t = CMDQ_REG_TYPE;
221 inst.sop = high_addr_reg_idx;
222 inst.reg_dst = reg_idx;
223 inst.src_reg = addr_low;
225 return cmdq_pkt_append_command(pkt, inst);
227 EXPORT_SYMBOL(cmdq_pkt_read_s);
229 int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
230 u16 addr_low, u16 src_reg_idx)
232 struct cmdq_instruction inst = {};
234 inst.op = CMDQ_CODE_WRITE_S;
235 inst.src_t = CMDQ_REG_TYPE;
236 inst.sop = high_addr_reg_idx;
237 inst.offset = addr_low;
238 inst.src_reg = src_reg_idx;
240 return cmdq_pkt_append_command(pkt, inst);
242 EXPORT_SYMBOL(cmdq_pkt_write_s);
244 int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
245 u16 addr_low, u16 src_reg_idx, u32 mask)
247 struct cmdq_instruction inst = {};
250 inst.op = CMDQ_CODE_MASK;
252 err = cmdq_pkt_append_command(pkt, inst);
257 inst.op = CMDQ_CODE_WRITE_S_MASK;
258 inst.src_t = CMDQ_REG_TYPE;
259 inst.sop = high_addr_reg_idx;
260 inst.offset = addr_low;
261 inst.src_reg = src_reg_idx;
263 return cmdq_pkt_append_command(pkt, inst);
265 EXPORT_SYMBOL(cmdq_pkt_write_s_mask);
267 int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
268 u16 addr_low, u32 value)
270 struct cmdq_instruction inst = {};
272 inst.op = CMDQ_CODE_WRITE_S;
273 inst.sop = high_addr_reg_idx;
274 inst.offset = addr_low;
277 return cmdq_pkt_append_command(pkt, inst);
279 EXPORT_SYMBOL(cmdq_pkt_write_s_value);
281 int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
282 u16 addr_low, u32 value, u32 mask)
284 struct cmdq_instruction inst = {};
287 inst.op = CMDQ_CODE_MASK;
289 err = cmdq_pkt_append_command(pkt, inst);
293 inst.op = CMDQ_CODE_WRITE_S_MASK;
294 inst.sop = high_addr_reg_idx;
295 inst.offset = addr_low;
298 return cmdq_pkt_append_command(pkt, inst);
300 EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value);
302 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
304 struct cmdq_instruction inst = { {0} };
305 u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
307 if (event >= CMDQ_MAX_EVENT)
310 inst.op = CMDQ_CODE_WFE;
311 inst.value = CMDQ_WFE_OPTION | clear_option;
314 return cmdq_pkt_append_command(pkt, inst);
316 EXPORT_SYMBOL(cmdq_pkt_wfe);
318 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
320 struct cmdq_instruction inst = { {0} };
322 if (event >= CMDQ_MAX_EVENT)
325 inst.op = CMDQ_CODE_WFE;
326 inst.value = CMDQ_WFE_UPDATE;
329 return cmdq_pkt_append_command(pkt, inst);
331 EXPORT_SYMBOL(cmdq_pkt_clear_event);
333 int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
335 struct cmdq_instruction inst = {};
337 if (event >= CMDQ_MAX_EVENT)
340 inst.op = CMDQ_CODE_WFE;
341 inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
344 return cmdq_pkt_append_command(pkt, inst);
346 EXPORT_SYMBOL(cmdq_pkt_set_event);
348 int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
349 u16 offset, u32 value)
351 struct cmdq_instruction inst = { {0} };
354 inst.op = CMDQ_CODE_POLL;
356 inst.offset = offset;
357 inst.subsys = subsys;
358 err = cmdq_pkt_append_command(pkt, inst);
362 EXPORT_SYMBOL(cmdq_pkt_poll);
364 int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
365 u16 offset, u32 value, u32 mask)
367 struct cmdq_instruction inst = { {0} };
370 inst.op = CMDQ_CODE_MASK;
372 err = cmdq_pkt_append_command(pkt, inst);
376 offset = offset | CMDQ_POLL_ENABLE_MASK;
377 err = cmdq_pkt_poll(pkt, subsys, offset, value);
381 EXPORT_SYMBOL(cmdq_pkt_poll_mask);
383 int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
385 struct cmdq_instruction inst = {};
387 inst.op = CMDQ_CODE_LOGIC;
388 inst.dst_t = CMDQ_REG_TYPE;
389 inst.reg_dst = reg_idx;
391 return cmdq_pkt_append_command(pkt, inst);
393 EXPORT_SYMBOL(cmdq_pkt_assign);
395 int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
397 struct cmdq_instruction inst = {};
399 inst.op = CMDQ_CODE_JUMP;
400 inst.offset = CMDQ_JUMP_RELATIVE;
402 cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
403 return cmdq_pkt_append_command(pkt, inst);
405 EXPORT_SYMBOL(cmdq_pkt_jump);
407 int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
409 struct cmdq_instruction inst = { {0} };
412 /* insert EOC and generate IRQ for each command iteration */
413 inst.op = CMDQ_CODE_EOC;
414 inst.value = CMDQ_EOC_IRQ_EN;
415 err = cmdq_pkt_append_command(pkt, inst);
420 inst.op = CMDQ_CODE_JUMP;
421 inst.value = CMDQ_JUMP_PASS >>
422 cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
423 err = cmdq_pkt_append_command(pkt, inst);
427 EXPORT_SYMBOL(cmdq_pkt_finalize);
429 int cmdq_pkt_flush_async(struct cmdq_pkt *pkt)
432 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
434 err = mbox_send_message(client->chan, pkt);
437 /* We can send next packet immediately, so just call txdone. */
438 mbox_client_txdone(client->chan, 0);
442 EXPORT_SYMBOL(cmdq_pkt_flush_async);
444 MODULE_LICENSE("GPL v2");