1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (c) 2018 MediaTek Inc.
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
10 #include <linux/soc/mediatek/mtk-cmdq.h>
12 #define CMDQ_WRITE_ENABLE_MASK BIT(0)
13 #define CMDQ_POLL_ENABLE_MASK BIT(0)
14 #define CMDQ_EOC_IRQ_EN BIT(0)
15 #define CMDQ_REG_TYPE 1
16 #define CMDQ_JUMP_RELATIVE 0
17 #define CMDQ_JUMP_ABSOLUTE 1
19 struct cmdq_instruction {
45 int cmdq_dev_get_client_reg(struct device *dev,
46 struct cmdq_client_reg *client_reg, int idx)
48 struct of_phandle_args spec;
54 err = of_parse_phandle_with_fixed_args(dev->of_node,
55 "mediatek,gce-client-reg",
59 "error %d can't parse gce-client-reg property (%d)",
65 client_reg->subsys = (u8)spec.args[0];
66 client_reg->offset = (u16)spec.args[1];
67 client_reg->size = (u16)spec.args[2];
72 EXPORT_SYMBOL(cmdq_dev_get_client_reg);
74 static void cmdq_client_timeout(struct timer_list *t)
76 struct cmdq_client *client = from_timer(client, t, timer);
78 dev_err(client->client.dev, "cmdq timeout!\n");
81 struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
83 struct cmdq_client *client;
85 client = kzalloc(sizeof(*client), GFP_KERNEL);
87 return (struct cmdq_client *)-ENOMEM;
89 client->timeout_ms = timeout;
90 if (timeout != CMDQ_NO_TIMEOUT) {
91 spin_lock_init(&client->lock);
92 timer_setup(&client->timer, cmdq_client_timeout, 0);
95 client->client.dev = dev;
96 client->client.tx_block = false;
97 client->client.knows_txdone = true;
98 client->chan = mbox_request_channel(&client->client, index);
100 if (IS_ERR(client->chan)) {
103 dev_err(dev, "failed to request channel\n");
104 err = PTR_ERR(client->chan);
112 EXPORT_SYMBOL(cmdq_mbox_create);
114 void cmdq_mbox_destroy(struct cmdq_client *client)
116 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
117 spin_lock(&client->lock);
118 del_timer_sync(&client->timer);
119 spin_unlock(&client->lock);
121 mbox_free_channel(client->chan);
124 EXPORT_SYMBOL(cmdq_mbox_destroy);
126 struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
128 struct cmdq_pkt *pkt;
132 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
134 return ERR_PTR(-ENOMEM);
135 pkt->va_base = kzalloc(size, GFP_KERNEL);
138 return ERR_PTR(-ENOMEM);
140 pkt->buf_size = size;
141 pkt->cl = (void *)client;
143 dev = client->chan->mbox->dev;
144 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
146 if (dma_mapping_error(dev, dma_addr)) {
147 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
150 return ERR_PTR(-ENOMEM);
153 pkt->pa_base = dma_addr;
157 EXPORT_SYMBOL(cmdq_pkt_create);
159 void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
161 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
163 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
168 EXPORT_SYMBOL(cmdq_pkt_destroy);
170 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
171 struct cmdq_instruction inst)
173 struct cmdq_instruction *cmd_ptr;
175 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
177 * In the case of allocated buffer size (pkt->buf_size) is used
178 * up, the real required size (pkt->cmdq_buf_size) is still
179 * increased, so that the user knows how much memory should be
180 * ultimately allocated after appending all commands and
181 * flushing the command packet. Therefor, the user can call
182 * cmdq_pkt_create() again with the real required buffer size.
184 pkt->cmd_buf_size += CMDQ_INST_SIZE;
185 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
186 __func__, (u32)pkt->buf_size);
190 cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
192 pkt->cmd_buf_size += CMDQ_INST_SIZE;
197 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
199 struct cmdq_instruction inst;
201 inst.op = CMDQ_CODE_WRITE;
203 inst.offset = offset;
204 inst.subsys = subsys;
206 return cmdq_pkt_append_command(pkt, inst);
208 EXPORT_SYMBOL(cmdq_pkt_write);
210 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
211 u16 offset, u32 value, u32 mask)
213 struct cmdq_instruction inst = { {0} };
214 u16 offset_mask = offset;
217 if (mask != 0xffffffff) {
218 inst.op = CMDQ_CODE_MASK;
220 err = cmdq_pkt_append_command(pkt, inst);
224 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
226 err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
230 EXPORT_SYMBOL(cmdq_pkt_write_mask);
232 int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
235 struct cmdq_instruction inst = {};
237 inst.op = CMDQ_CODE_READ_S;
238 inst.dst_t = CMDQ_REG_TYPE;
239 inst.sop = high_addr_reg_idx;
240 inst.reg_dst = reg_idx;
241 inst.src_reg = addr_low;
243 return cmdq_pkt_append_command(pkt, inst);
245 EXPORT_SYMBOL(cmdq_pkt_read_s);
247 int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
248 u16 addr_low, u16 src_reg_idx)
250 struct cmdq_instruction inst = {};
252 inst.op = CMDQ_CODE_WRITE_S;
253 inst.src_t = CMDQ_REG_TYPE;
254 inst.sop = high_addr_reg_idx;
255 inst.offset = addr_low;
256 inst.src_reg = src_reg_idx;
258 return cmdq_pkt_append_command(pkt, inst);
260 EXPORT_SYMBOL(cmdq_pkt_write_s);
262 int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
263 u16 addr_low, u16 src_reg_idx, u32 mask)
265 struct cmdq_instruction inst = {};
268 inst.op = CMDQ_CODE_MASK;
270 err = cmdq_pkt_append_command(pkt, inst);
275 inst.op = CMDQ_CODE_WRITE_S_MASK;
276 inst.src_t = CMDQ_REG_TYPE;
277 inst.sop = high_addr_reg_idx;
278 inst.offset = addr_low;
279 inst.src_reg = src_reg_idx;
281 return cmdq_pkt_append_command(pkt, inst);
283 EXPORT_SYMBOL(cmdq_pkt_write_s_mask);
285 int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
286 u16 addr_low, u32 value)
288 struct cmdq_instruction inst = {};
290 inst.op = CMDQ_CODE_WRITE_S;
291 inst.sop = high_addr_reg_idx;
292 inst.offset = addr_low;
295 return cmdq_pkt_append_command(pkt, inst);
297 EXPORT_SYMBOL(cmdq_pkt_write_s_value);
299 int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
300 u16 addr_low, u32 value, u32 mask)
302 struct cmdq_instruction inst = {};
305 inst.op = CMDQ_CODE_MASK;
307 err = cmdq_pkt_append_command(pkt, inst);
311 inst.op = CMDQ_CODE_WRITE_S_MASK;
312 inst.sop = high_addr_reg_idx;
313 inst.offset = addr_low;
316 return cmdq_pkt_append_command(pkt, inst);
318 EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value);
320 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
322 struct cmdq_instruction inst = { {0} };
323 u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
325 if (event >= CMDQ_MAX_EVENT)
328 inst.op = CMDQ_CODE_WFE;
329 inst.value = CMDQ_WFE_OPTION | clear_option;
332 return cmdq_pkt_append_command(pkt, inst);
334 EXPORT_SYMBOL(cmdq_pkt_wfe);
336 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
338 struct cmdq_instruction inst = { {0} };
340 if (event >= CMDQ_MAX_EVENT)
343 inst.op = CMDQ_CODE_WFE;
344 inst.value = CMDQ_WFE_UPDATE;
347 return cmdq_pkt_append_command(pkt, inst);
349 EXPORT_SYMBOL(cmdq_pkt_clear_event);
351 int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
353 struct cmdq_instruction inst = {};
355 if (event >= CMDQ_MAX_EVENT)
358 inst.op = CMDQ_CODE_WFE;
359 inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
362 return cmdq_pkt_append_command(pkt, inst);
364 EXPORT_SYMBOL(cmdq_pkt_set_event);
366 int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
367 u16 offset, u32 value)
369 struct cmdq_instruction inst = { {0} };
372 inst.op = CMDQ_CODE_POLL;
374 inst.offset = offset;
375 inst.subsys = subsys;
376 err = cmdq_pkt_append_command(pkt, inst);
380 EXPORT_SYMBOL(cmdq_pkt_poll);
382 int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
383 u16 offset, u32 value, u32 mask)
385 struct cmdq_instruction inst = { {0} };
388 inst.op = CMDQ_CODE_MASK;
390 err = cmdq_pkt_append_command(pkt, inst);
394 offset = offset | CMDQ_POLL_ENABLE_MASK;
395 err = cmdq_pkt_poll(pkt, subsys, offset, value);
399 EXPORT_SYMBOL(cmdq_pkt_poll_mask);
401 int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
403 struct cmdq_instruction inst = {};
405 inst.op = CMDQ_CODE_LOGIC;
406 inst.dst_t = CMDQ_REG_TYPE;
407 inst.reg_dst = reg_idx;
409 return cmdq_pkt_append_command(pkt, inst);
411 EXPORT_SYMBOL(cmdq_pkt_assign);
413 int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
415 struct cmdq_instruction inst = {};
417 inst.op = CMDQ_CODE_JUMP;
418 inst.offset = CMDQ_JUMP_ABSOLUTE;
420 cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
421 return cmdq_pkt_append_command(pkt, inst);
423 EXPORT_SYMBOL(cmdq_pkt_jump);
425 int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
427 struct cmdq_instruction inst = { {0} };
430 /* insert EOC and generate IRQ for each command iteration */
431 inst.op = CMDQ_CODE_EOC;
432 inst.value = CMDQ_EOC_IRQ_EN;
433 err = cmdq_pkt_append_command(pkt, inst);
438 inst.op = CMDQ_CODE_JUMP;
439 inst.value = CMDQ_JUMP_PASS >>
440 cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
441 err = cmdq_pkt_append_command(pkt, inst);
445 EXPORT_SYMBOL(cmdq_pkt_finalize);
447 static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
449 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
450 struct cmdq_task_cb *cb = &pkt->cb;
451 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
453 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
454 unsigned long flags = 0;
456 spin_lock_irqsave(&client->lock, flags);
457 if (--client->pkt_cnt == 0)
458 del_timer(&client->timer);
460 mod_timer(&client->timer, jiffies +
461 msecs_to_jiffies(client->timeout_ms));
462 spin_unlock_irqrestore(&client->lock, flags);
465 dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
466 pkt->cmd_buf_size, DMA_TO_DEVICE);
468 data.data = cb->data;
473 int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
477 unsigned long flags = 0;
478 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
482 pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
483 pkt->async_cb.data = pkt;
485 dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
486 pkt->cmd_buf_size, DMA_TO_DEVICE);
488 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
489 spin_lock_irqsave(&client->lock, flags);
490 if (client->pkt_cnt++ == 0)
491 mod_timer(&client->timer, jiffies +
492 msecs_to_jiffies(client->timeout_ms));
493 spin_unlock_irqrestore(&client->lock, flags);
496 err = mbox_send_message(client->chan, pkt);
499 /* We can send next packet immediately, so just call txdone. */
500 mbox_client_txdone(client->chan, 0);
504 EXPORT_SYMBOL(cmdq_pkt_flush_async);
506 struct cmdq_flush_completion {
507 struct completion cmplt;
511 static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
513 struct cmdq_flush_completion *cmplt;
515 cmplt = (struct cmdq_flush_completion *)data.data;
516 if (data.sta != CMDQ_CB_NORMAL)
520 complete(&cmplt->cmplt);
523 int cmdq_pkt_flush(struct cmdq_pkt *pkt)
525 struct cmdq_flush_completion cmplt;
528 init_completion(&cmplt.cmplt);
529 err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
532 wait_for_completion(&cmplt.cmplt);
534 return cmplt.err ? -EFAULT : 0;
536 EXPORT_SYMBOL(cmdq_pkt_flush);
538 MODULE_LICENSE("GPL v2");