1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2020-2021 NXP
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_address.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/delay.h>
18 #include <linux/vmalloc.h>
25 struct vpu_cmd_request {
32 struct list_head list;
34 struct vpu_cmd_request *request;
35 struct vpu_rpc_event *pkt;
39 static struct vpu_cmd_request vpu_cmd_requests[] = {
41 .request = VPU_CMD_ID_CONFIGURE_CODEC,
42 .response = VPU_MSG_ID_MEM_REQUEST,
46 .request = VPU_CMD_ID_START,
47 .response = VPU_MSG_ID_START_DONE,
51 .request = VPU_CMD_ID_STOP,
52 .response = VPU_MSG_ID_STOP_DONE,
56 .request = VPU_CMD_ID_ABORT,
57 .response = VPU_MSG_ID_ABORT_DONE,
61 .request = VPU_CMD_ID_RST_BUF,
62 .response = VPU_MSG_ID_BUF_RST,
67 static int vpu_cmd_send(struct vpu_core *core, struct vpu_rpc_event *pkt)
71 ret = vpu_iface_send_cmd(core, pkt);
75 /*write cmd data to cmd buffer before trigger a cmd interrupt*/
77 vpu_mbox_send_type(core, COMMAND);
82 static struct vpu_cmd_t *vpu_alloc_cmd(struct vpu_inst *inst, u32 id, void *data)
84 struct vpu_cmd_t *cmd;
88 cmd = vzalloc(sizeof(*cmd));
92 cmd->pkt = vzalloc(sizeof(*cmd->pkt));
99 ret = vpu_iface_pack_cmd(inst->core, cmd->pkt, inst->id, id, data);
101 dev_err(inst->dev, "iface pack cmd(%d) fail\n", id);
106 for (i = 0; i < ARRAY_SIZE(vpu_cmd_requests); i++) {
107 if (vpu_cmd_requests[i].request == id) {
108 cmd->request = &vpu_cmd_requests[i];
116 static void vpu_free_cmd(struct vpu_cmd_t *cmd)
125 static int vpu_session_process_cmd(struct vpu_inst *inst, struct vpu_cmd_t *cmd)
129 dev_dbg(inst->dev, "[%d]send cmd(0x%x)\n", inst->id, cmd->id);
130 vpu_iface_pre_send_cmd(inst);
131 ret = vpu_cmd_send(inst->core, cmd->pkt);
133 vpu_iface_post_send_cmd(inst);
134 vpu_inst_record_flow(inst, cmd->id);
136 dev_err(inst->dev, "[%d] iface send cmd(0x%x) fail\n", inst->id, cmd->id);
142 static void vpu_process_cmd_request(struct vpu_inst *inst)
144 struct vpu_cmd_t *cmd;
145 struct vpu_cmd_t *tmp;
147 if (!inst || inst->pending)
150 list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
151 list_del_init(&cmd->list);
152 if (vpu_session_process_cmd(inst, cmd))
153 dev_err(inst->dev, "[%d] process cmd(%d) fail\n", inst->id, cmd->id);
155 inst->pending = (void *)cmd;
162 static int vpu_request_cmd(struct vpu_inst *inst, u32 id, void *data,
163 unsigned long *key, int *sync)
165 struct vpu_core *core;
166 struct vpu_cmd_t *cmd;
168 if (!inst || !inst->core)
172 cmd = vpu_alloc_cmd(inst, id, data);
176 mutex_lock(&core->cmd_lock);
177 cmd->key = core->cmd_seq++;
181 *sync = cmd->request ? true : false;
182 list_add_tail(&cmd->list, &inst->cmd_q);
183 vpu_process_cmd_request(inst);
184 mutex_unlock(&core->cmd_lock);
189 static void vpu_clear_pending(struct vpu_inst *inst)
191 if (!inst || !inst->pending)
194 vpu_free_cmd(inst->pending);
195 wake_up_all(&inst->core->ack_wq);
196 inst->pending = NULL;
199 static bool vpu_check_response(struct vpu_cmd_t *cmd, u32 response, u32 handled)
201 struct vpu_cmd_request *request;
203 if (!cmd || !cmd->request)
206 request = cmd->request;
207 if (request->response != response)
209 if (request->handled != handled)
215 int vpu_response_cmd(struct vpu_inst *inst, u32 response, u32 handled)
217 struct vpu_core *core;
219 if (!inst || !inst->core)
223 mutex_lock(&core->cmd_lock);
224 if (vpu_check_response(inst->pending, response, handled))
225 vpu_clear_pending(inst);
227 vpu_process_cmd_request(inst);
228 mutex_unlock(&core->cmd_lock);
233 void vpu_clear_request(struct vpu_inst *inst)
235 struct vpu_cmd_t *cmd;
236 struct vpu_cmd_t *tmp;
238 mutex_lock(&inst->core->cmd_lock);
240 vpu_clear_pending(inst);
242 list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
243 list_del_init(&cmd->list);
246 mutex_unlock(&inst->core->cmd_lock);
249 static bool check_is_responsed(struct vpu_inst *inst, unsigned long key)
251 struct vpu_core *core = inst->core;
252 struct vpu_cmd_t *cmd;
255 mutex_lock(&core->cmd_lock);
257 if (cmd && key == cmd->key) {
261 list_for_each_entry(cmd, &inst->cmd_q, list) {
262 if (key == cmd->key) {
268 mutex_unlock(&core->cmd_lock);
273 static int sync_session_response(struct vpu_inst *inst, unsigned long key)
275 struct vpu_core *core;
277 if (!inst || !inst->core)
282 call_void_vop(inst, wait_prepare);
283 wait_event_timeout(core->ack_wq, check_is_responsed(inst, key), VPU_TIMEOUT);
284 call_void_vop(inst, wait_finish);
286 if (!check_is_responsed(inst, key)) {
287 dev_err(inst->dev, "[%d] sync session timeout\n", inst->id);
288 set_bit(inst->id, &core->hang_mask);
289 mutex_lock(&inst->core->cmd_lock);
290 vpu_clear_pending(inst);
291 mutex_unlock(&inst->core->cmd_lock);
298 static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
307 ret = vpu_request_cmd(inst, id, data, &key, &sync);
309 ret = sync_session_response(inst, key);
312 dev_err(inst->dev, "[%d] send cmd(0x%x) fail\n", inst->id, id);
317 int vpu_session_configure_codec(struct vpu_inst *inst)
319 return vpu_session_send_cmd(inst, VPU_CMD_ID_CONFIGURE_CODEC, NULL);
322 int vpu_session_start(struct vpu_inst *inst)
324 vpu_trace(inst->dev, "[%d]\n", inst->id);
326 return vpu_session_send_cmd(inst, VPU_CMD_ID_START, NULL);
329 int vpu_session_stop(struct vpu_inst *inst)
333 vpu_trace(inst->dev, "[%d]\n", inst->id);
335 ret = vpu_session_send_cmd(inst, VPU_CMD_ID_STOP, NULL);
336 /* workaround for a firmware bug,
337 * if the next command is too close after stop cmd,
338 * the firmware may enter wfi wrongly.
340 usleep_range(3000, 5000);
344 int vpu_session_encode_frame(struct vpu_inst *inst, s64 timestamp)
346 return vpu_session_send_cmd(inst, VPU_CMD_ID_FRAME_ENCODE, ×tamp);
349 int vpu_session_alloc_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
351 return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_ALLOC, fs);
354 int vpu_session_release_fs(struct vpu_inst *inst, struct vpu_fs_info *fs)
356 return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_RELEASE, fs);
359 int vpu_session_abort(struct vpu_inst *inst)
361 return vpu_session_send_cmd(inst, VPU_CMD_ID_ABORT, NULL);
364 int vpu_session_rst_buf(struct vpu_inst *inst)
366 return vpu_session_send_cmd(inst, VPU_CMD_ID_RST_BUF, NULL);
369 int vpu_session_fill_timestamp(struct vpu_inst *inst, struct vpu_ts_info *info)
371 return vpu_session_send_cmd(inst, VPU_CMD_ID_TIMESTAMP, info);
374 int vpu_session_update_parameters(struct vpu_inst *inst, void *arg)
376 if (inst->type & VPU_CORE_TYPE_DEC)
377 vpu_iface_set_decode_params(inst, arg, 1);
379 vpu_iface_set_encode_params(inst, arg, 1);
381 return vpu_session_send_cmd(inst, VPU_CMD_ID_UPDATE_PARAMETER, arg);
384 int vpu_session_debug(struct vpu_inst *inst)
386 return vpu_session_send_cmd(inst, VPU_CMD_ID_DEBUG, NULL);
389 int vpu_core_snapshot(struct vpu_core *core)
391 struct vpu_inst *inst;
394 if (!core || list_empty(&core->instances))
397 inst = list_first_entry(&core->instances, struct vpu_inst, list);
399 reinit_completion(&core->cmp);
400 ret = vpu_session_send_cmd(inst, VPU_CMD_ID_SNAPSHOT, NULL);
403 ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
405 dev_err(core->dev, "snapshot timeout\n");
412 int vpu_core_sw_reset(struct vpu_core *core)
414 struct vpu_rpc_event pkt;
417 memset(&pkt, 0, sizeof(pkt));
418 vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_FIRM_RESET, NULL);
420 reinit_completion(&core->cmp);
421 mutex_lock(&core->cmd_lock);
422 ret = vpu_cmd_send(core, &pkt);
423 mutex_unlock(&core->cmd_lock);
426 ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
428 dev_err(core->dev, "sw reset timeout\n");