GNU Linux-libre 6.8.7-gnu
[releases.git] / drivers / media / platform / amphion / vpu_core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2020-2021 NXP
4  */
5
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_address.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/pm_domain.h>
19 #include <linux/firmware.h>
20 #include <linux/vmalloc.h>
21 #include "vpu.h"
22 #include "vpu_defs.h"
23 #include "vpu_core.h"
24 #include "vpu_mbox.h"
25 #include "vpu_msgs.h"
26 #include "vpu_rpc.h"
27 #include "vpu_cmds.h"
28
29 void csr_writel(struct vpu_core *core, u32 reg, u32 val)
30 {
31         writel(val, core->base + reg);
32 }
33
34 u32 csr_readl(struct vpu_core *core, u32 reg)
35 {
36         return readl(core->base + reg);
37 }
38
39 static int vpu_core_load_firmware(struct vpu_core *core)
40 {
41         const struct firmware *pfw = NULL;
42         int ret = 0;
43
44         if (!core->fw.virt) {
45                 dev_err(core->dev, "firmware buffer is not ready\n");
46                 return -EINVAL;
47         }
48
49         ret = reject_firmware(&pfw, core->res->fwname, core->dev);
50         dev_dbg(core->dev, "request_firmware %s : %d\n", core->res->fwname, ret);
51         if (ret) {
52                 dev_err(core->dev, "request firmware %s failed, ret = %d\n",
53                         core->res->fwname, ret);
54                 return ret;
55         }
56
57         if (core->fw.length < pfw->size) {
58                 dev_err(core->dev, "firmware buffer size want %zu, but %d\n",
59                         pfw->size, core->fw.length);
60                 ret = -EINVAL;
61                 goto exit;
62         }
63
64         memset(core->fw.virt, 0, core->fw.length);
65         memcpy(core->fw.virt, pfw->data, pfw->size);
66         core->fw.bytesused = pfw->size;
67         ret = vpu_iface_on_firmware_loaded(core);
68 exit:
69         release_firmware(pfw);
70         pfw = NULL;
71
72         return ret;
73 }
74
75 static int vpu_core_boot_done(struct vpu_core *core)
76 {
77         u32 fw_version;
78
79         fw_version = vpu_iface_get_version(core);
80         dev_info(core->dev, "%s firmware version : %d.%d.%d\n",
81                  vpu_core_type_desc(core->type),
82                  (fw_version >> 16) & 0xff,
83                  (fw_version >> 8) & 0xff,
84                  fw_version & 0xff);
85         core->supported_instance_count = vpu_iface_get_max_instance_count(core);
86         if (core->res->act_size) {
87                 u32 count = core->act.length / core->res->act_size;
88
89                 core->supported_instance_count = min(core->supported_instance_count, count);
90         }
91         if (core->supported_instance_count >= BITS_PER_TYPE(core->instance_mask))
92                 core->supported_instance_count = BITS_PER_TYPE(core->instance_mask);
93         core->fw_version = fw_version;
94         vpu_core_set_state(core, VPU_CORE_ACTIVE);
95
96         return 0;
97 }
98
99 static int vpu_core_wait_boot_done(struct vpu_core *core)
100 {
101         int ret;
102
103         ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
104         if (!ret) {
105                 dev_err(core->dev, "boot timeout\n");
106                 return -EINVAL;
107         }
108         return vpu_core_boot_done(core);
109 }
110
111 static int vpu_core_boot(struct vpu_core *core, bool load)
112 {
113         int ret;
114
115         reinit_completion(&core->cmp);
116         if (load) {
117                 ret = vpu_core_load_firmware(core);
118                 if (ret)
119                         return ret;
120         }
121
122         vpu_iface_boot_core(core);
123         return vpu_core_wait_boot_done(core);
124 }
125
126 static int vpu_core_shutdown(struct vpu_core *core)
127 {
128         return vpu_iface_shutdown_core(core);
129 }
130
131 static int vpu_core_restore(struct vpu_core *core)
132 {
133         int ret;
134
135         ret = vpu_core_sw_reset(core);
136         if (ret)
137                 return ret;
138
139         vpu_core_boot_done(core);
140         return vpu_iface_restore_core(core);
141 }
142
143 static int __vpu_alloc_dma(struct device *dev, struct vpu_buffer *buf)
144 {
145         gfp_t gfp = GFP_KERNEL | GFP_DMA32;
146
147         if (!buf->length)
148                 return 0;
149
150         buf->virt = dma_alloc_coherent(dev, buf->length, &buf->phys, gfp);
151         if (!buf->virt)
152                 return -ENOMEM;
153
154         buf->dev = dev;
155
156         return 0;
157 }
158
159 void vpu_free_dma(struct vpu_buffer *buf)
160 {
161         if (!buf->virt || !buf->dev)
162                 return;
163
164         dma_free_coherent(buf->dev, buf->length, buf->virt, buf->phys);
165         buf->virt = NULL;
166         buf->phys = 0;
167         buf->length = 0;
168         buf->bytesused = 0;
169         buf->dev = NULL;
170 }
171
172 int vpu_alloc_dma(struct vpu_core *core, struct vpu_buffer *buf)
173 {
174         return __vpu_alloc_dma(core->dev, buf);
175 }
176
177 void vpu_core_set_state(struct vpu_core *core, enum vpu_core_state state)
178 {
179         if (state != core->state)
180                 vpu_trace(core->dev, "vpu core state change from %d to %d\n", core->state, state);
181         core->state = state;
182         if (core->state == VPU_CORE_DEINIT)
183                 core->hang_mask = 0;
184 }
185
186 static void vpu_core_update_state(struct vpu_core *core)
187 {
188         if (!vpu_iface_get_power_state(core)) {
189                 if (core->request_count)
190                         vpu_core_set_state(core, VPU_CORE_HANG);
191                 else
192                         vpu_core_set_state(core, VPU_CORE_DEINIT);
193
194         } else if (core->state == VPU_CORE_ACTIVE && core->hang_mask) {
195                 vpu_core_set_state(core, VPU_CORE_HANG);
196         }
197 }
198
199 static struct vpu_core *vpu_core_find_proper_by_type(struct vpu_dev *vpu, u32 type)
200 {
201         struct vpu_core *core = NULL;
202         int request_count = INT_MAX;
203         struct vpu_core *c;
204
205         list_for_each_entry(c, &vpu->cores, list) {
206                 dev_dbg(c->dev, "instance_mask = 0x%lx, state = %d\n", c->instance_mask, c->state);
207                 if (c->type != type)
208                         continue;
209                 mutex_lock(&c->lock);
210                 vpu_core_update_state(c);
211                 mutex_unlock(&c->lock);
212                 if (c->state == VPU_CORE_DEINIT) {
213                         core = c;
214                         break;
215                 }
216                 if (c->state != VPU_CORE_ACTIVE)
217                         continue;
218                 if (c->request_count < request_count) {
219                         request_count = c->request_count;
220                         core = c;
221                 }
222                 if (!request_count)
223                         break;
224         }
225
226         return core;
227 }
228
229 static bool vpu_core_is_exist(struct vpu_dev *vpu, struct vpu_core *core)
230 {
231         struct vpu_core *c;
232
233         list_for_each_entry(c, &vpu->cores, list) {
234                 if (c == core)
235                         return true;
236         }
237
238         return false;
239 }
240
241 static void vpu_core_get_vpu(struct vpu_core *core)
242 {
243         core->vpu->get_vpu(core->vpu);
244         if (core->type == VPU_CORE_TYPE_ENC)
245                 core->vpu->get_enc(core->vpu);
246         if (core->type == VPU_CORE_TYPE_DEC)
247                 core->vpu->get_dec(core->vpu);
248 }
249
250 static int vpu_core_register(struct device *dev, struct vpu_core *core)
251 {
252         struct vpu_dev *vpu = dev_get_drvdata(dev);
253         int ret = 0;
254
255         dev_dbg(core->dev, "register core %s\n", vpu_core_type_desc(core->type));
256         if (vpu_core_is_exist(vpu, core))
257                 return 0;
258
259         core->workqueue = alloc_ordered_workqueue("vpu", WQ_MEM_RECLAIM);
260         if (!core->workqueue) {
261                 dev_err(core->dev, "fail to alloc workqueue\n");
262                 return -ENOMEM;
263         }
264         INIT_WORK(&core->msg_work, vpu_msg_run_work);
265         INIT_DELAYED_WORK(&core->msg_delayed_work, vpu_msg_delayed_work);
266         core->msg_buffer_size = roundup_pow_of_two(VPU_MSG_BUFFER_SIZE);
267         core->msg_buffer = vzalloc(core->msg_buffer_size);
268         if (!core->msg_buffer) {
269                 dev_err(core->dev, "failed allocate buffer for fifo\n");
270                 ret = -ENOMEM;
271                 goto error;
272         }
273         ret = kfifo_init(&core->msg_fifo, core->msg_buffer, core->msg_buffer_size);
274         if (ret) {
275                 dev_err(core->dev, "failed init kfifo\n");
276                 goto error;
277         }
278
279         list_add_tail(&core->list, &vpu->cores);
280         vpu_core_get_vpu(core);
281
282         return 0;
283 error:
284         if (core->msg_buffer) {
285                 vfree(core->msg_buffer);
286                 core->msg_buffer = NULL;
287         }
288         if (core->workqueue) {
289                 destroy_workqueue(core->workqueue);
290                 core->workqueue = NULL;
291         }
292         return ret;
293 }
294
295 static void vpu_core_put_vpu(struct vpu_core *core)
296 {
297         if (core->type == VPU_CORE_TYPE_ENC)
298                 core->vpu->put_enc(core->vpu);
299         if (core->type == VPU_CORE_TYPE_DEC)
300                 core->vpu->put_dec(core->vpu);
301         core->vpu->put_vpu(core->vpu);
302 }
303
304 static int vpu_core_unregister(struct device *dev, struct vpu_core *core)
305 {
306         list_del_init(&core->list);
307
308         vpu_core_put_vpu(core);
309         core->vpu = NULL;
310         vfree(core->msg_buffer);
311         core->msg_buffer = NULL;
312
313         if (core->workqueue) {
314                 cancel_work_sync(&core->msg_work);
315                 cancel_delayed_work_sync(&core->msg_delayed_work);
316                 destroy_workqueue(core->workqueue);
317                 core->workqueue = NULL;
318         }
319
320         return 0;
321 }
322
323 static int vpu_core_acquire_instance(struct vpu_core *core)
324 {
325         int id;
326
327         id = ffz(core->instance_mask);
328         if (id >= core->supported_instance_count)
329                 return -EINVAL;
330
331         set_bit(id, &core->instance_mask);
332
333         return id;
334 }
335
336 static void vpu_core_release_instance(struct vpu_core *core, int id)
337 {
338         if (id < 0 || id >= core->supported_instance_count)
339                 return;
340
341         clear_bit(id, &core->instance_mask);
342 }
343
344 struct vpu_inst *vpu_inst_get(struct vpu_inst *inst)
345 {
346         if (!inst)
347                 return NULL;
348
349         atomic_inc(&inst->ref_count);
350
351         return inst;
352 }
353
354 void vpu_inst_put(struct vpu_inst *inst)
355 {
356         if (!inst)
357                 return;
358         if (atomic_dec_and_test(&inst->ref_count)) {
359                 if (inst->release)
360                         inst->release(inst);
361         }
362 }
363
364 struct vpu_core *vpu_request_core(struct vpu_dev *vpu, enum vpu_core_type type)
365 {
366         struct vpu_core *core = NULL;
367         int ret;
368
369         mutex_lock(&vpu->lock);
370
371         core = vpu_core_find_proper_by_type(vpu, type);
372         if (!core)
373                 goto exit;
374
375         mutex_lock(&core->lock);
376         pm_runtime_resume_and_get(core->dev);
377
378         if (core->state == VPU_CORE_DEINIT) {
379                 if (vpu_iface_get_power_state(core))
380                         ret = vpu_core_restore(core);
381                 else
382                         ret = vpu_core_boot(core, true);
383                 if (ret) {
384                         pm_runtime_put_sync(core->dev);
385                         mutex_unlock(&core->lock);
386                         core = NULL;
387                         goto exit;
388                 }
389         }
390
391         core->request_count++;
392
393         mutex_unlock(&core->lock);
394 exit:
395         mutex_unlock(&vpu->lock);
396
397         return core;
398 }
399
400 void vpu_release_core(struct vpu_core *core)
401 {
402         if (!core)
403                 return;
404
405         mutex_lock(&core->lock);
406         pm_runtime_put_sync(core->dev);
407         if (core->request_count)
408                 core->request_count--;
409         mutex_unlock(&core->lock);
410 }
411
412 int vpu_inst_register(struct vpu_inst *inst)
413 {
414         struct vpu_dev *vpu;
415         struct vpu_core *core;
416         int ret = 0;
417
418         vpu = inst->vpu;
419         core = inst->core;
420         if (!core) {
421                 core = vpu_request_core(vpu, inst->type);
422                 if (!core) {
423                         dev_err(vpu->dev, "there is no vpu core for %s\n",
424                                 vpu_core_type_desc(inst->type));
425                         return -EINVAL;
426                 }
427                 inst->core = core;
428                 inst->dev = get_device(core->dev);
429         }
430
431         mutex_lock(&core->lock);
432         if (core->state != VPU_CORE_ACTIVE) {
433                 dev_err(core->dev, "vpu core is not active, state = %d\n", core->state);
434                 ret = -EINVAL;
435                 goto exit;
436         }
437
438         if (inst->id >= 0 && inst->id < core->supported_instance_count)
439                 goto exit;
440
441         ret = vpu_core_acquire_instance(core);
442         if (ret < 0)
443                 goto exit;
444
445         vpu_trace(inst->dev, "[%d] %p\n", ret, inst);
446         inst->id = ret;
447         list_add_tail(&inst->list, &core->instances);
448         ret = 0;
449         if (core->res->act_size) {
450                 inst->act.phys = core->act.phys + core->res->act_size * inst->id;
451                 inst->act.virt = core->act.virt + core->res->act_size * inst->id;
452                 inst->act.length = core->res->act_size;
453         }
454         vpu_inst_create_dbgfs_file(inst);
455 exit:
456         mutex_unlock(&core->lock);
457
458         if (ret)
459                 dev_err(core->dev, "register instance fail\n");
460         return ret;
461 }
462
463 int vpu_inst_unregister(struct vpu_inst *inst)
464 {
465         struct vpu_core *core;
466
467         if (!inst->core)
468                 return 0;
469
470         core = inst->core;
471         vpu_clear_request(inst);
472         mutex_lock(&core->lock);
473         if (inst->id >= 0 && inst->id < core->supported_instance_count) {
474                 vpu_inst_remove_dbgfs_file(inst);
475                 list_del_init(&inst->list);
476                 vpu_core_release_instance(core, inst->id);
477                 inst->id = VPU_INST_NULL_ID;
478         }
479         vpu_core_update_state(core);
480         if (core->state == VPU_CORE_HANG && !core->instance_mask) {
481                 int err;
482
483                 dev_info(core->dev, "reset hang core\n");
484                 mutex_unlock(&core->lock);
485                 err = vpu_core_sw_reset(core);
486                 mutex_lock(&core->lock);
487                 if (!err) {
488                         vpu_core_set_state(core, VPU_CORE_ACTIVE);
489                         core->hang_mask = 0;
490                 }
491         }
492         mutex_unlock(&core->lock);
493
494         return 0;
495 }
496
497 struct vpu_inst *vpu_core_find_instance(struct vpu_core *core, u32 index)
498 {
499         struct vpu_inst *inst = NULL;
500         struct vpu_inst *tmp;
501
502         mutex_lock(&core->lock);
503         if (index >= core->supported_instance_count || !test_bit(index, &core->instance_mask))
504                 goto exit;
505         list_for_each_entry(tmp, &core->instances, list) {
506                 if (tmp->id == index) {
507                         inst = vpu_inst_get(tmp);
508                         break;
509                 }
510         }
511 exit:
512         mutex_unlock(&core->lock);
513
514         return inst;
515 }
516
517 const struct vpu_core_resources *vpu_get_resource(struct vpu_inst *inst)
518 {
519         struct vpu_dev *vpu;
520         struct vpu_core *core = NULL;
521         const struct vpu_core_resources *res = NULL;
522
523         if (!inst || !inst->vpu)
524                 return NULL;
525
526         if (inst->core && inst->core->res)
527                 return inst->core->res;
528
529         vpu = inst->vpu;
530         mutex_lock(&vpu->lock);
531         list_for_each_entry(core, &vpu->cores, list) {
532                 if (core->type == inst->type) {
533                         res = core->res;
534                         break;
535                 }
536         }
537         mutex_unlock(&vpu->lock);
538
539         return res;
540 }
541
542 static int vpu_core_parse_dt(struct vpu_core *core, struct device_node *np)
543 {
544         struct device_node *node;
545         struct resource res;
546         int ret;
547
548         if (of_count_phandle_with_args(np, "memory-region", NULL) < 2) {
549                 dev_err(core->dev, "need 2 memory-region for boot and rpc\n");
550                 return -ENODEV;
551         }
552
553         node = of_parse_phandle(np, "memory-region", 0);
554         if (!node) {
555                 dev_err(core->dev, "boot-region of_parse_phandle error\n");
556                 return -ENODEV;
557         }
558         if (of_address_to_resource(node, 0, &res)) {
559                 dev_err(core->dev, "boot-region of_address_to_resource error\n");
560                 of_node_put(node);
561                 return -EINVAL;
562         }
563         core->fw.phys = res.start;
564         core->fw.length = resource_size(&res);
565
566         of_node_put(node);
567
568         node = of_parse_phandle(np, "memory-region", 1);
569         if (!node) {
570                 dev_err(core->dev, "rpc-region of_parse_phandle error\n");
571                 return -ENODEV;
572         }
573         if (of_address_to_resource(node, 0, &res)) {
574                 dev_err(core->dev, "rpc-region of_address_to_resource error\n");
575                 of_node_put(node);
576                 return -EINVAL;
577         }
578         core->rpc.phys = res.start;
579         core->rpc.length = resource_size(&res);
580
581         if (core->rpc.length < core->res->rpc_size + core->res->fwlog_size) {
582                 dev_err(core->dev, "the rpc-region <%pad, 0x%x> is not enough\n",
583                         &core->rpc.phys, core->rpc.length);
584                 of_node_put(node);
585                 return -EINVAL;
586         }
587
588         core->fw.virt = memremap(core->fw.phys, core->fw.length, MEMREMAP_WC);
589         core->rpc.virt = memremap(core->rpc.phys, core->rpc.length, MEMREMAP_WC);
590         memset(core->rpc.virt, 0, core->rpc.length);
591
592         ret = vpu_iface_check_memory_region(core, core->rpc.phys, core->rpc.length);
593         if (ret != VPU_CORE_MEMORY_UNCACHED) {
594                 dev_err(core->dev, "rpc region<%pad, 0x%x> isn't uncached\n",
595                         &core->rpc.phys, core->rpc.length);
596                 of_node_put(node);
597                 return -EINVAL;
598         }
599
600         core->log.phys = core->rpc.phys + core->res->rpc_size;
601         core->log.virt = core->rpc.virt + core->res->rpc_size;
602         core->log.length = core->res->fwlog_size;
603         core->act.phys = core->log.phys + core->log.length;
604         core->act.virt = core->log.virt + core->log.length;
605         core->act.length = core->rpc.length - core->res->rpc_size - core->log.length;
606         core->rpc.length = core->res->rpc_size;
607
608         of_node_put(node);
609
610         return 0;
611 }
612
613 static int vpu_core_probe(struct platform_device *pdev)
614 {
615         struct device *dev = &pdev->dev;
616         struct vpu_core *core;
617         struct vpu_dev *vpu = dev_get_drvdata(dev->parent);
618         struct vpu_shared_addr *iface;
619         u32 iface_data_size;
620         int ret;
621
622         dev_dbg(dev, "probe\n");
623         if (!vpu)
624                 return -EINVAL;
625         core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
626         if (!core)
627                 return -ENOMEM;
628
629         core->pdev = pdev;
630         core->dev = dev;
631         platform_set_drvdata(pdev, core);
632         core->vpu = vpu;
633         INIT_LIST_HEAD(&core->instances);
634         mutex_init(&core->lock);
635         mutex_init(&core->cmd_lock);
636         init_completion(&core->cmp);
637         init_waitqueue_head(&core->ack_wq);
638         vpu_core_set_state(core, VPU_CORE_DEINIT);
639
640         core->res = of_device_get_match_data(dev);
641         if (!core->res)
642                 return -ENODEV;
643
644         core->type = core->res->type;
645         core->id = of_alias_get_id(dev->of_node, "vpu-core");
646         if (core->id < 0) {
647                 dev_err(dev, "can't get vpu core id\n");
648                 return core->id;
649         }
650         dev_info(core->dev, "[%d] = %s\n", core->id, vpu_core_type_desc(core->type));
651         ret = vpu_core_parse_dt(core, dev->of_node);
652         if (ret)
653                 return ret;
654
655         core->base = devm_platform_ioremap_resource(pdev, 0);
656         if (IS_ERR(core->base))
657                 return PTR_ERR(core->base);
658
659         if (!vpu_iface_check_codec(core)) {
660                 dev_err(core->dev, "is not supported\n");
661                 return -EINVAL;
662         }
663
664         ret = vpu_mbox_init(core);
665         if (ret)
666                 return ret;
667
668         iface = devm_kzalloc(dev, sizeof(*iface), GFP_KERNEL);
669         if (!iface)
670                 return -ENOMEM;
671
672         iface_data_size = vpu_iface_get_data_size(core);
673         if (iface_data_size) {
674                 iface->priv = devm_kzalloc(dev, iface_data_size, GFP_KERNEL);
675                 if (!iface->priv)
676                         return -ENOMEM;
677         }
678
679         ret = vpu_iface_init(core, iface, &core->rpc, core->fw.phys);
680         if (ret) {
681                 dev_err(core->dev, "init iface fail, ret = %d\n", ret);
682                 return ret;
683         }
684
685         vpu_iface_config_system(core, vpu->res->mreg_base, vpu->base);
686         vpu_iface_set_log_buf(core, &core->log);
687
688         pm_runtime_enable(dev);
689         ret = pm_runtime_resume_and_get(dev);
690         if (ret) {
691                 pm_runtime_put_noidle(dev);
692                 pm_runtime_set_suspended(dev);
693                 goto err_runtime_disable;
694         }
695
696         ret = vpu_core_register(dev->parent, core);
697         if (ret)
698                 goto err_core_register;
699         core->parent = dev->parent;
700
701         pm_runtime_put_sync(dev);
702         vpu_core_create_dbgfs_file(core);
703
704         return 0;
705
706 err_core_register:
707         pm_runtime_put_sync(dev);
708 err_runtime_disable:
709         pm_runtime_disable(dev);
710
711         return ret;
712 }
713
714 static void vpu_core_remove(struct platform_device *pdev)
715 {
716         struct device *dev = &pdev->dev;
717         struct vpu_core *core = platform_get_drvdata(pdev);
718         int ret;
719
720         vpu_core_remove_dbgfs_file(core);
721         ret = pm_runtime_resume_and_get(dev);
722         WARN_ON(ret < 0);
723
724         vpu_core_shutdown(core);
725         pm_runtime_put_sync(dev);
726         pm_runtime_disable(dev);
727
728         vpu_core_unregister(core->parent, core);
729         memunmap(core->fw.virt);
730         memunmap(core->rpc.virt);
731         mutex_destroy(&core->lock);
732         mutex_destroy(&core->cmd_lock);
733 }
734
735 static int __maybe_unused vpu_core_runtime_resume(struct device *dev)
736 {
737         struct vpu_core *core = dev_get_drvdata(dev);
738
739         return vpu_mbox_request(core);
740 }
741
742 static int __maybe_unused vpu_core_runtime_suspend(struct device *dev)
743 {
744         struct vpu_core *core = dev_get_drvdata(dev);
745
746         vpu_mbox_free(core);
747         return 0;
748 }
749
750 static void vpu_core_cancel_work(struct vpu_core *core)
751 {
752         struct vpu_inst *inst = NULL;
753
754         cancel_work_sync(&core->msg_work);
755         cancel_delayed_work_sync(&core->msg_delayed_work);
756
757         mutex_lock(&core->lock);
758         list_for_each_entry(inst, &core->instances, list)
759                 cancel_work_sync(&inst->msg_work);
760         mutex_unlock(&core->lock);
761 }
762
763 static void vpu_core_resume_work(struct vpu_core *core)
764 {
765         struct vpu_inst *inst = NULL;
766         unsigned long delay = msecs_to_jiffies(10);
767
768         queue_work(core->workqueue, &core->msg_work);
769         queue_delayed_work(core->workqueue, &core->msg_delayed_work, delay);
770
771         mutex_lock(&core->lock);
772         list_for_each_entry(inst, &core->instances, list)
773                 queue_work(inst->workqueue, &inst->msg_work);
774         mutex_unlock(&core->lock);
775 }
776
777 static int __maybe_unused vpu_core_resume(struct device *dev)
778 {
779         struct vpu_core *core = dev_get_drvdata(dev);
780         int ret = 0;
781
782         mutex_lock(&core->lock);
783         pm_runtime_resume_and_get(dev);
784         vpu_core_get_vpu(core);
785
786         if (core->request_count) {
787                 if (!vpu_iface_get_power_state(core))
788                         ret = vpu_core_boot(core, false);
789                 else
790                         ret = vpu_core_sw_reset(core);
791                 if (ret) {
792                         dev_err(core->dev, "resume fail\n");
793                         vpu_core_set_state(core, VPU_CORE_HANG);
794                 }
795         }
796         vpu_core_update_state(core);
797         pm_runtime_put_sync(dev);
798         mutex_unlock(&core->lock);
799
800         vpu_core_resume_work(core);
801         return ret;
802 }
803
804 static int __maybe_unused vpu_core_suspend(struct device *dev)
805 {
806         struct vpu_core *core = dev_get_drvdata(dev);
807         int ret = 0;
808
809         mutex_lock(&core->lock);
810         if (core->request_count)
811                 ret = vpu_core_snapshot(core);
812         mutex_unlock(&core->lock);
813         if (ret)
814                 return ret;
815
816         vpu_core_cancel_work(core);
817
818         mutex_lock(&core->lock);
819         vpu_core_put_vpu(core);
820         mutex_unlock(&core->lock);
821         return ret;
822 }
823
824 static const struct dev_pm_ops vpu_core_pm_ops = {
825         SET_RUNTIME_PM_OPS(vpu_core_runtime_suspend, vpu_core_runtime_resume, NULL)
826         SET_SYSTEM_SLEEP_PM_OPS(vpu_core_suspend, vpu_core_resume)
827 };
828
829 static struct vpu_core_resources imx8q_enc = {
830         .type = VPU_CORE_TYPE_ENC,
831         .fwname = "/*(DEBLOBBED)*/",
832         .stride = 16,
833         .max_width = 1920,
834         .max_height = 1920,
835         .min_width = 64,
836         .min_height = 48,
837         .step_width = 2,
838         .step_height = 2,
839         .rpc_size = 0x80000,
840         .fwlog_size = 0x80000,
841         .act_size = 0xc0000,
842 };
843
844 static struct vpu_core_resources imx8q_dec = {
845         .type = VPU_CORE_TYPE_DEC,
846         .fwname = "/*(DEBLOBBED)*/",
847         .stride = 256,
848         .max_width = 8188,
849         .max_height = 8188,
850         .min_width = 16,
851         .min_height = 16,
852         .step_width = 1,
853         .step_height = 1,
854         .rpc_size = 0x80000,
855         .fwlog_size = 0x80000,
856 };
857
858 static const struct of_device_id vpu_core_dt_match[] = {
859         { .compatible = "nxp,imx8q-vpu-encoder", .data = &imx8q_enc },
860         { .compatible = "nxp,imx8q-vpu-decoder", .data = &imx8q_dec },
861         {}
862 };
863 MODULE_DEVICE_TABLE(of, vpu_core_dt_match);
864
865 static struct platform_driver amphion_vpu_core_driver = {
866         .probe = vpu_core_probe,
867         .remove_new = vpu_core_remove,
868         .driver = {
869                 .name = "amphion-vpu-core",
870                 .of_match_table = vpu_core_dt_match,
871                 .pm = &vpu_core_pm_ops,
872         },
873 };
874
875 int __init vpu_core_driver_init(void)
876 {
877         return platform_driver_register(&amphion_vpu_core_driver);
878 }
879
880 void __exit vpu_core_driver_exit(void)
881 {
882         platform_driver_unregister(&amphion_vpu_core_driver);
883 }