1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (c) 2018 MediaTek Inc.
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
10 #include <linux/soc/mediatek/mtk-cmdq.h>
12 #define CMDQ_WRITE_ENABLE_MASK BIT(0)
13 #define CMDQ_POLL_ENABLE_MASK BIT(0)
14 #define CMDQ_EOC_IRQ_EN BIT(0)
15 #define CMDQ_REG_TYPE 1
16 #define CMDQ_JUMP_RELATIVE 1
18 struct cmdq_instruction {
44 int cmdq_dev_get_client_reg(struct device *dev,
45 struct cmdq_client_reg *client_reg, int idx)
47 struct of_phandle_args spec;
53 err = of_parse_phandle_with_fixed_args(dev->of_node,
54 "mediatek,gce-client-reg",
58 "error %d can't parse gce-client-reg property (%d)",
64 client_reg->subsys = (u8)spec.args[0];
65 client_reg->offset = (u16)spec.args[1];
66 client_reg->size = (u16)spec.args[2];
71 EXPORT_SYMBOL(cmdq_dev_get_client_reg);
73 static void cmdq_client_timeout(struct timer_list *t)
75 struct cmdq_client *client = from_timer(client, t, timer);
77 dev_err(client->client.dev, "cmdq timeout!\n");
80 struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
82 struct cmdq_client *client;
84 client = kzalloc(sizeof(*client), GFP_KERNEL);
86 return (struct cmdq_client *)-ENOMEM;
88 client->timeout_ms = timeout;
89 if (timeout != CMDQ_NO_TIMEOUT) {
90 spin_lock_init(&client->lock);
91 timer_setup(&client->timer, cmdq_client_timeout, 0);
94 client->client.dev = dev;
95 client->client.tx_block = false;
96 client->client.knows_txdone = true;
97 client->chan = mbox_request_channel(&client->client, index);
99 if (IS_ERR(client->chan)) {
102 dev_err(dev, "failed to request channel\n");
103 err = PTR_ERR(client->chan);
111 EXPORT_SYMBOL(cmdq_mbox_create);
113 void cmdq_mbox_destroy(struct cmdq_client *client)
115 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
116 spin_lock(&client->lock);
117 del_timer_sync(&client->timer);
118 spin_unlock(&client->lock);
120 mbox_free_channel(client->chan);
123 EXPORT_SYMBOL(cmdq_mbox_destroy);
125 struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
127 struct cmdq_pkt *pkt;
131 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
133 return ERR_PTR(-ENOMEM);
134 pkt->va_base = kzalloc(size, GFP_KERNEL);
137 return ERR_PTR(-ENOMEM);
139 pkt->buf_size = size;
140 pkt->cl = (void *)client;
142 dev = client->chan->mbox->dev;
143 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
145 if (dma_mapping_error(dev, dma_addr)) {
146 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
149 return ERR_PTR(-ENOMEM);
152 pkt->pa_base = dma_addr;
156 EXPORT_SYMBOL(cmdq_pkt_create);
158 void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
160 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
162 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
167 EXPORT_SYMBOL(cmdq_pkt_destroy);
169 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
170 struct cmdq_instruction inst)
172 struct cmdq_instruction *cmd_ptr;
174 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
176 * In the case of allocated buffer size (pkt->buf_size) is used
177 * up, the real required size (pkt->cmdq_buf_size) is still
178 * increased, so that the user knows how much memory should be
179 * ultimately allocated after appending all commands and
180 * flushing the command packet. Therefor, the user can call
181 * cmdq_pkt_create() again with the real required buffer size.
183 pkt->cmd_buf_size += CMDQ_INST_SIZE;
184 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
185 __func__, (u32)pkt->buf_size);
189 cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
191 pkt->cmd_buf_size += CMDQ_INST_SIZE;
196 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
198 struct cmdq_instruction inst;
200 inst.op = CMDQ_CODE_WRITE;
202 inst.offset = offset;
203 inst.subsys = subsys;
205 return cmdq_pkt_append_command(pkt, inst);
207 EXPORT_SYMBOL(cmdq_pkt_write);
209 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
210 u16 offset, u32 value, u32 mask)
212 struct cmdq_instruction inst = { {0} };
213 u16 offset_mask = offset;
216 if (mask != 0xffffffff) {
217 inst.op = CMDQ_CODE_MASK;
219 err = cmdq_pkt_append_command(pkt, inst);
223 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
225 err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
229 EXPORT_SYMBOL(cmdq_pkt_write_mask);
231 int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
234 struct cmdq_instruction inst = {};
236 inst.op = CMDQ_CODE_READ_S;
237 inst.dst_t = CMDQ_REG_TYPE;
238 inst.sop = high_addr_reg_idx;
239 inst.reg_dst = reg_idx;
240 inst.src_reg = addr_low;
242 return cmdq_pkt_append_command(pkt, inst);
244 EXPORT_SYMBOL(cmdq_pkt_read_s);
246 int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
247 u16 addr_low, u16 src_reg_idx)
249 struct cmdq_instruction inst = {};
251 inst.op = CMDQ_CODE_WRITE_S;
252 inst.src_t = CMDQ_REG_TYPE;
253 inst.sop = high_addr_reg_idx;
254 inst.offset = addr_low;
255 inst.src_reg = src_reg_idx;
257 return cmdq_pkt_append_command(pkt, inst);
259 EXPORT_SYMBOL(cmdq_pkt_write_s);
261 int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
262 u16 addr_low, u16 src_reg_idx, u32 mask)
264 struct cmdq_instruction inst = {};
267 inst.op = CMDQ_CODE_MASK;
269 err = cmdq_pkt_append_command(pkt, inst);
274 inst.op = CMDQ_CODE_WRITE_S_MASK;
275 inst.src_t = CMDQ_REG_TYPE;
276 inst.sop = high_addr_reg_idx;
277 inst.offset = addr_low;
278 inst.src_reg = src_reg_idx;
280 return cmdq_pkt_append_command(pkt, inst);
282 EXPORT_SYMBOL(cmdq_pkt_write_s_mask);
284 int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
285 u16 addr_low, u32 value)
287 struct cmdq_instruction inst = {};
289 inst.op = CMDQ_CODE_WRITE_S;
290 inst.sop = high_addr_reg_idx;
291 inst.offset = addr_low;
294 return cmdq_pkt_append_command(pkt, inst);
296 EXPORT_SYMBOL(cmdq_pkt_write_s_value);
298 int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
299 u16 addr_low, u32 value, u32 mask)
301 struct cmdq_instruction inst = {};
304 inst.op = CMDQ_CODE_MASK;
306 err = cmdq_pkt_append_command(pkt, inst);
310 inst.op = CMDQ_CODE_WRITE_S_MASK;
311 inst.sop = high_addr_reg_idx;
312 inst.offset = addr_low;
315 return cmdq_pkt_append_command(pkt, inst);
317 EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value);
319 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
321 struct cmdq_instruction inst = { {0} };
322 u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
324 if (event >= CMDQ_MAX_EVENT)
327 inst.op = CMDQ_CODE_WFE;
328 inst.value = CMDQ_WFE_OPTION | clear_option;
331 return cmdq_pkt_append_command(pkt, inst);
333 EXPORT_SYMBOL(cmdq_pkt_wfe);
335 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
337 struct cmdq_instruction inst = { {0} };
339 if (event >= CMDQ_MAX_EVENT)
342 inst.op = CMDQ_CODE_WFE;
343 inst.value = CMDQ_WFE_UPDATE;
346 return cmdq_pkt_append_command(pkt, inst);
348 EXPORT_SYMBOL(cmdq_pkt_clear_event);
350 int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
352 struct cmdq_instruction inst = {};
354 if (event >= CMDQ_MAX_EVENT)
357 inst.op = CMDQ_CODE_WFE;
358 inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
361 return cmdq_pkt_append_command(pkt, inst);
363 EXPORT_SYMBOL(cmdq_pkt_set_event);
365 int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
366 u16 offset, u32 value)
368 struct cmdq_instruction inst = { {0} };
371 inst.op = CMDQ_CODE_POLL;
373 inst.offset = offset;
374 inst.subsys = subsys;
375 err = cmdq_pkt_append_command(pkt, inst);
379 EXPORT_SYMBOL(cmdq_pkt_poll);
381 int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
382 u16 offset, u32 value, u32 mask)
384 struct cmdq_instruction inst = { {0} };
387 inst.op = CMDQ_CODE_MASK;
389 err = cmdq_pkt_append_command(pkt, inst);
393 offset = offset | CMDQ_POLL_ENABLE_MASK;
394 err = cmdq_pkt_poll(pkt, subsys, offset, value);
398 EXPORT_SYMBOL(cmdq_pkt_poll_mask);
400 int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
402 struct cmdq_instruction inst = {};
404 inst.op = CMDQ_CODE_LOGIC;
405 inst.dst_t = CMDQ_REG_TYPE;
406 inst.reg_dst = reg_idx;
408 return cmdq_pkt_append_command(pkt, inst);
410 EXPORT_SYMBOL(cmdq_pkt_assign);
412 int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
414 struct cmdq_instruction inst = {};
416 inst.op = CMDQ_CODE_JUMP;
417 inst.offset = CMDQ_JUMP_RELATIVE;
419 cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
420 return cmdq_pkt_append_command(pkt, inst);
422 EXPORT_SYMBOL(cmdq_pkt_jump);
424 int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
426 struct cmdq_instruction inst = { {0} };
429 /* insert EOC and generate IRQ for each command iteration */
430 inst.op = CMDQ_CODE_EOC;
431 inst.value = CMDQ_EOC_IRQ_EN;
432 err = cmdq_pkt_append_command(pkt, inst);
437 inst.op = CMDQ_CODE_JUMP;
438 inst.value = CMDQ_JUMP_PASS >>
439 cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
440 err = cmdq_pkt_append_command(pkt, inst);
444 EXPORT_SYMBOL(cmdq_pkt_finalize);
446 static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
448 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
449 struct cmdq_task_cb *cb = &pkt->cb;
450 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
452 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
453 unsigned long flags = 0;
455 spin_lock_irqsave(&client->lock, flags);
456 if (--client->pkt_cnt == 0)
457 del_timer(&client->timer);
459 mod_timer(&client->timer, jiffies +
460 msecs_to_jiffies(client->timeout_ms));
461 spin_unlock_irqrestore(&client->lock, flags);
464 dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
465 pkt->cmd_buf_size, DMA_TO_DEVICE);
467 data.data = cb->data;
472 int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
476 unsigned long flags = 0;
477 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
481 pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
482 pkt->async_cb.data = pkt;
484 dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
485 pkt->cmd_buf_size, DMA_TO_DEVICE);
487 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
488 spin_lock_irqsave(&client->lock, flags);
489 if (client->pkt_cnt++ == 0)
490 mod_timer(&client->timer, jiffies +
491 msecs_to_jiffies(client->timeout_ms));
492 spin_unlock_irqrestore(&client->lock, flags);
495 err = mbox_send_message(client->chan, pkt);
498 /* We can send next packet immediately, so just call txdone. */
499 mbox_client_txdone(client->chan, 0);
503 EXPORT_SYMBOL(cmdq_pkt_flush_async);
505 struct cmdq_flush_completion {
506 struct completion cmplt;
510 static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
512 struct cmdq_flush_completion *cmplt;
514 cmplt = (struct cmdq_flush_completion *)data.data;
515 if (data.sta != CMDQ_CB_NORMAL)
519 complete(&cmplt->cmplt);
522 int cmdq_pkt_flush(struct cmdq_pkt *pkt)
524 struct cmdq_flush_completion cmplt;
527 init_completion(&cmplt.cmplt);
528 err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
531 wait_for_completion(&cmplt.cmplt);
533 return cmplt.err ? -EFAULT : 0;
535 EXPORT_SYMBOL(cmdq_pkt_flush);
537 MODULE_LICENSE("GPL v2");