GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / media / platform / amphion / vpu_core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2020-2021 NXP
4  */
5
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_address.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/pm_domain.h>
19 #include <linux/firmware.h>
20 #include <linux/vmalloc.h>
21 #include "vpu.h"
22 #include "vpu_defs.h"
23 #include "vpu_core.h"
24 #include "vpu_mbox.h"
25 #include "vpu_msgs.h"
26 #include "vpu_rpc.h"
27 #include "vpu_cmds.h"
28
29 void csr_writel(struct vpu_core *core, u32 reg, u32 val)
30 {
31         writel(val, core->base + reg);
32 }
33
34 u32 csr_readl(struct vpu_core *core, u32 reg)
35 {
36         return readl(core->base + reg);
37 }
38
39 static int vpu_core_load_firmware(struct vpu_core *core)
40 {
41         const struct firmware *pfw = NULL;
42         int ret = 0;
43
44         if (!core->fw.virt) {
45                 dev_err(core->dev, "firmware buffer is not ready\n");
46                 return -EINVAL;
47         }
48
49         ret = reject_firmware(&pfw, core->res->fwname, core->dev);
50         dev_dbg(core->dev, "request_firmware %s : %d\n", core->res->fwname, ret);
51         if (ret) {
52                 dev_err(core->dev, "request firmware %s failed, ret = %d\n",
53                         core->res->fwname, ret);
54                 return ret;
55         }
56
57         if (core->fw.length < pfw->size) {
58                 dev_err(core->dev, "firmware buffer size want %zu, but %d\n",
59                         pfw->size, core->fw.length);
60                 ret = -EINVAL;
61                 goto exit;
62         }
63
64         memset(core->fw.virt, 0, core->fw.length);
65         memcpy(core->fw.virt, pfw->data, pfw->size);
66         core->fw.bytesused = pfw->size;
67         ret = vpu_iface_on_firmware_loaded(core);
68 exit:
69         release_firmware(pfw);
70         pfw = NULL;
71
72         return ret;
73 }
74
75 static int vpu_core_boot_done(struct vpu_core *core)
76 {
77         u32 fw_version;
78
79         fw_version = vpu_iface_get_version(core);
80         dev_info(core->dev, "%s firmware version : %d.%d.%d\n",
81                  vpu_core_type_desc(core->type),
82                  (fw_version >> 16) & 0xff,
83                  (fw_version >> 8) & 0xff,
84                  fw_version & 0xff);
85         core->supported_instance_count = vpu_iface_get_max_instance_count(core);
86         if (core->res->act_size) {
87                 u32 count = core->act.length / core->res->act_size;
88
89                 core->supported_instance_count = min(core->supported_instance_count, count);
90         }
91         core->fw_version = fw_version;
92         core->state = VPU_CORE_ACTIVE;
93
94         return 0;
95 }
96
97 static int vpu_core_wait_boot_done(struct vpu_core *core)
98 {
99         int ret;
100
101         ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
102         if (!ret) {
103                 dev_err(core->dev, "boot timeout\n");
104                 return -EINVAL;
105         }
106         return vpu_core_boot_done(core);
107 }
108
109 static int vpu_core_boot(struct vpu_core *core, bool load)
110 {
111         int ret;
112
113         reinit_completion(&core->cmp);
114         if (load) {
115                 ret = vpu_core_load_firmware(core);
116                 if (ret)
117                         return ret;
118         }
119
120         vpu_iface_boot_core(core);
121         return vpu_core_wait_boot_done(core);
122 }
123
124 static int vpu_core_shutdown(struct vpu_core *core)
125 {
126         return vpu_iface_shutdown_core(core);
127 }
128
129 static int vpu_core_restore(struct vpu_core *core)
130 {
131         int ret;
132
133         ret = vpu_core_sw_reset(core);
134         if (ret)
135                 return ret;
136
137         vpu_core_boot_done(core);
138         return vpu_iface_restore_core(core);
139 }
140
141 static int __vpu_alloc_dma(struct device *dev, struct vpu_buffer *buf)
142 {
143         gfp_t gfp = GFP_KERNEL | GFP_DMA32;
144
145         if (!buf->length)
146                 return 0;
147
148         buf->virt = dma_alloc_coherent(dev, buf->length, &buf->phys, gfp);
149         if (!buf->virt)
150                 return -ENOMEM;
151
152         buf->dev = dev;
153
154         return 0;
155 }
156
157 void vpu_free_dma(struct vpu_buffer *buf)
158 {
159         if (!buf->virt || !buf->dev)
160                 return;
161
162         dma_free_coherent(buf->dev, buf->length, buf->virt, buf->phys);
163         buf->virt = NULL;
164         buf->phys = 0;
165         buf->length = 0;
166         buf->bytesused = 0;
167         buf->dev = NULL;
168 }
169
170 int vpu_alloc_dma(struct vpu_core *core, struct vpu_buffer *buf)
171 {
172         return __vpu_alloc_dma(core->dev, buf);
173 }
174
175 static void vpu_core_check_hang(struct vpu_core *core)
176 {
177         if (core->hang_mask)
178                 core->state = VPU_CORE_HANG;
179 }
180
181 static struct vpu_core *vpu_core_find_proper_by_type(struct vpu_dev *vpu, u32 type)
182 {
183         struct vpu_core *core = NULL;
184         int request_count = INT_MAX;
185         struct vpu_core *c;
186
187         list_for_each_entry(c, &vpu->cores, list) {
188                 dev_dbg(c->dev, "instance_mask = 0x%lx, state = %d\n", c->instance_mask, c->state);
189                 if (c->type != type)
190                         continue;
191                 if (c->state == VPU_CORE_DEINIT) {
192                         core = c;
193                         break;
194                 }
195                 vpu_core_check_hang(c);
196                 if (c->state != VPU_CORE_ACTIVE)
197                         continue;
198                 if (c->request_count < request_count) {
199                         request_count = c->request_count;
200                         core = c;
201                 }
202                 if (!request_count)
203                         break;
204         }
205
206         return core;
207 }
208
209 static bool vpu_core_is_exist(struct vpu_dev *vpu, struct vpu_core *core)
210 {
211         struct vpu_core *c;
212
213         list_for_each_entry(c, &vpu->cores, list) {
214                 if (c == core)
215                         return true;
216         }
217
218         return false;
219 }
220
221 static void vpu_core_get_vpu(struct vpu_core *core)
222 {
223         core->vpu->get_vpu(core->vpu);
224         if (core->type == VPU_CORE_TYPE_ENC)
225                 core->vpu->get_enc(core->vpu);
226         if (core->type == VPU_CORE_TYPE_DEC)
227                 core->vpu->get_dec(core->vpu);
228 }
229
230 static int vpu_core_register(struct device *dev, struct vpu_core *core)
231 {
232         struct vpu_dev *vpu = dev_get_drvdata(dev);
233         int ret = 0;
234
235         dev_dbg(core->dev, "register core %s\n", vpu_core_type_desc(core->type));
236         if (vpu_core_is_exist(vpu, core))
237                 return 0;
238
239         core->workqueue = alloc_workqueue("vpu", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
240         if (!core->workqueue) {
241                 dev_err(core->dev, "fail to alloc workqueue\n");
242                 return -ENOMEM;
243         }
244         INIT_WORK(&core->msg_work, vpu_msg_run_work);
245         INIT_DELAYED_WORK(&core->msg_delayed_work, vpu_msg_delayed_work);
246         core->msg_buffer_size = roundup_pow_of_two(VPU_MSG_BUFFER_SIZE);
247         core->msg_buffer = vzalloc(core->msg_buffer_size);
248         if (!core->msg_buffer) {
249                 dev_err(core->dev, "failed allocate buffer for fifo\n");
250                 ret = -ENOMEM;
251                 goto error;
252         }
253         ret = kfifo_init(&core->msg_fifo, core->msg_buffer, core->msg_buffer_size);
254         if (ret) {
255                 dev_err(core->dev, "failed init kfifo\n");
256                 goto error;
257         }
258
259         list_add_tail(&core->list, &vpu->cores);
260
261         vpu_core_get_vpu(core);
262
263         if (vpu_iface_get_power_state(core))
264                 ret = vpu_core_restore(core);
265         if (ret)
266                 goto error;
267
268         return 0;
269 error:
270         if (core->msg_buffer) {
271                 vfree(core->msg_buffer);
272                 core->msg_buffer = NULL;
273         }
274         if (core->workqueue) {
275                 destroy_workqueue(core->workqueue);
276                 core->workqueue = NULL;
277         }
278         return ret;
279 }
280
281 static void vpu_core_put_vpu(struct vpu_core *core)
282 {
283         if (core->type == VPU_CORE_TYPE_ENC)
284                 core->vpu->put_enc(core->vpu);
285         if (core->type == VPU_CORE_TYPE_DEC)
286                 core->vpu->put_dec(core->vpu);
287         core->vpu->put_vpu(core->vpu);
288 }
289
290 static int vpu_core_unregister(struct device *dev, struct vpu_core *core)
291 {
292         list_del_init(&core->list);
293
294         vpu_core_put_vpu(core);
295         core->vpu = NULL;
296         vfree(core->msg_buffer);
297         core->msg_buffer = NULL;
298
299         if (core->workqueue) {
300                 cancel_work_sync(&core->msg_work);
301                 cancel_delayed_work_sync(&core->msg_delayed_work);
302                 destroy_workqueue(core->workqueue);
303                 core->workqueue = NULL;
304         }
305
306         return 0;
307 }
308
309 static int vpu_core_acquire_instance(struct vpu_core *core)
310 {
311         int id;
312
313         id = ffz(core->instance_mask);
314         if (id >= core->supported_instance_count)
315                 return -EINVAL;
316
317         set_bit(id, &core->instance_mask);
318
319         return id;
320 }
321
322 static void vpu_core_release_instance(struct vpu_core *core, int id)
323 {
324         if (id < 0 || id >= core->supported_instance_count)
325                 return;
326
327         clear_bit(id, &core->instance_mask);
328 }
329
330 struct vpu_inst *vpu_inst_get(struct vpu_inst *inst)
331 {
332         if (!inst)
333                 return NULL;
334
335         atomic_inc(&inst->ref_count);
336
337         return inst;
338 }
339
340 void vpu_inst_put(struct vpu_inst *inst)
341 {
342         if (!inst)
343                 return;
344         if (atomic_dec_and_test(&inst->ref_count)) {
345                 if (inst->release)
346                         inst->release(inst);
347         }
348 }
349
350 struct vpu_core *vpu_request_core(struct vpu_dev *vpu, enum vpu_core_type type)
351 {
352         struct vpu_core *core = NULL;
353         int ret;
354
355         mutex_lock(&vpu->lock);
356
357         core = vpu_core_find_proper_by_type(vpu, type);
358         if (!core)
359                 goto exit;
360
361         mutex_lock(&core->lock);
362         pm_runtime_resume_and_get(core->dev);
363
364         if (core->state == VPU_CORE_DEINIT) {
365                 ret = vpu_core_boot(core, true);
366                 if (ret) {
367                         pm_runtime_put_sync(core->dev);
368                         mutex_unlock(&core->lock);
369                         core = NULL;
370                         goto exit;
371                 }
372         }
373
374         core->request_count++;
375
376         mutex_unlock(&core->lock);
377 exit:
378         mutex_unlock(&vpu->lock);
379
380         return core;
381 }
382
383 void vpu_release_core(struct vpu_core *core)
384 {
385         if (!core)
386                 return;
387
388         mutex_lock(&core->lock);
389         pm_runtime_put_sync(core->dev);
390         if (core->request_count)
391                 core->request_count--;
392         mutex_unlock(&core->lock);
393 }
394
395 int vpu_inst_register(struct vpu_inst *inst)
396 {
397         struct vpu_dev *vpu;
398         struct vpu_core *core;
399         int ret = 0;
400
401         vpu = inst->vpu;
402         core = inst->core;
403         if (!core) {
404                 core = vpu_request_core(vpu, inst->type);
405                 if (!core) {
406                         dev_err(vpu->dev, "there is no vpu core for %s\n",
407                                 vpu_core_type_desc(inst->type));
408                         return -EINVAL;
409                 }
410                 inst->core = core;
411                 inst->dev = get_device(core->dev);
412         }
413
414         mutex_lock(&core->lock);
415         if (inst->id >= 0 && inst->id < core->supported_instance_count)
416                 goto exit;
417
418         ret = vpu_core_acquire_instance(core);
419         if (ret < 0)
420                 goto exit;
421
422         vpu_trace(inst->dev, "[%d] %p\n", ret, inst);
423         inst->id = ret;
424         list_add_tail(&inst->list, &core->instances);
425         ret = 0;
426         if (core->res->act_size) {
427                 inst->act.phys = core->act.phys + core->res->act_size * inst->id;
428                 inst->act.virt = core->act.virt + core->res->act_size * inst->id;
429                 inst->act.length = core->res->act_size;
430         }
431         vpu_inst_create_dbgfs_file(inst);
432 exit:
433         mutex_unlock(&core->lock);
434
435         if (ret)
436                 dev_err(core->dev, "register instance fail\n");
437         return ret;
438 }
439
440 int vpu_inst_unregister(struct vpu_inst *inst)
441 {
442         struct vpu_core *core;
443
444         if (!inst->core)
445                 return 0;
446
447         core = inst->core;
448         vpu_clear_request(inst);
449         mutex_lock(&core->lock);
450         if (inst->id >= 0 && inst->id < core->supported_instance_count) {
451                 vpu_inst_remove_dbgfs_file(inst);
452                 list_del_init(&inst->list);
453                 vpu_core_release_instance(core, inst->id);
454                 inst->id = VPU_INST_NULL_ID;
455         }
456         vpu_core_check_hang(core);
457         if (core->state == VPU_CORE_HANG && !core->instance_mask) {
458                 dev_info(core->dev, "reset hang core\n");
459                 if (!vpu_core_sw_reset(core)) {
460                         core->state = VPU_CORE_ACTIVE;
461                         core->hang_mask = 0;
462                 }
463         }
464         mutex_unlock(&core->lock);
465
466         return 0;
467 }
468
469 struct vpu_inst *vpu_core_find_instance(struct vpu_core *core, u32 index)
470 {
471         struct vpu_inst *inst = NULL;
472         struct vpu_inst *tmp;
473
474         mutex_lock(&core->lock);
475         if (index >= core->supported_instance_count || !test_bit(index, &core->instance_mask))
476                 goto exit;
477         list_for_each_entry(tmp, &core->instances, list) {
478                 if (tmp->id == index) {
479                         inst = vpu_inst_get(tmp);
480                         break;
481                 }
482         }
483 exit:
484         mutex_unlock(&core->lock);
485
486         return inst;
487 }
488
489 const struct vpu_core_resources *vpu_get_resource(struct vpu_inst *inst)
490 {
491         struct vpu_dev *vpu;
492         struct vpu_core *core = NULL;
493         const struct vpu_core_resources *res = NULL;
494
495         if (!inst || !inst->vpu)
496                 return NULL;
497
498         if (inst->core && inst->core->res)
499                 return inst->core->res;
500
501         vpu = inst->vpu;
502         mutex_lock(&vpu->lock);
503         list_for_each_entry(core, &vpu->cores, list) {
504                 if (core->type == inst->type) {
505                         res = core->res;
506                         break;
507                 }
508         }
509         mutex_unlock(&vpu->lock);
510
511         return res;
512 }
513
514 static int vpu_core_parse_dt(struct vpu_core *core, struct device_node *np)
515 {
516         struct device_node *node;
517         struct resource res;
518         int ret;
519
520         if (of_count_phandle_with_args(np, "memory-region", NULL) < 2) {
521                 dev_err(core->dev, "need 2 memory-region for boot and rpc\n");
522                 return -ENODEV;
523         }
524
525         node = of_parse_phandle(np, "memory-region", 0);
526         if (!node) {
527                 dev_err(core->dev, "boot-region of_parse_phandle error\n");
528                 return -ENODEV;
529         }
530         if (of_address_to_resource(node, 0, &res)) {
531                 dev_err(core->dev, "boot-region of_address_to_resource error\n");
532                 of_node_put(node);
533                 return -EINVAL;
534         }
535         core->fw.phys = res.start;
536         core->fw.length = resource_size(&res);
537
538         of_node_put(node);
539
540         node = of_parse_phandle(np, "memory-region", 1);
541         if (!node) {
542                 dev_err(core->dev, "rpc-region of_parse_phandle error\n");
543                 return -ENODEV;
544         }
545         if (of_address_to_resource(node, 0, &res)) {
546                 dev_err(core->dev, "rpc-region of_address_to_resource error\n");
547                 of_node_put(node);
548                 return -EINVAL;
549         }
550         core->rpc.phys = res.start;
551         core->rpc.length = resource_size(&res);
552
553         if (core->rpc.length < core->res->rpc_size + core->res->fwlog_size) {
554                 dev_err(core->dev, "the rpc-region <%pad, 0x%x> is not enough\n",
555                         &core->rpc.phys, core->rpc.length);
556                 of_node_put(node);
557                 return -EINVAL;
558         }
559
560         core->fw.virt = memremap(core->fw.phys, core->fw.length, MEMREMAP_WC);
561         core->rpc.virt = memremap(core->rpc.phys, core->rpc.length, MEMREMAP_WC);
562         memset(core->rpc.virt, 0, core->rpc.length);
563
564         ret = vpu_iface_check_memory_region(core, core->rpc.phys, core->rpc.length);
565         if (ret != VPU_CORE_MEMORY_UNCACHED) {
566                 dev_err(core->dev, "rpc region<%pad, 0x%x> isn't uncached\n",
567                         &core->rpc.phys, core->rpc.length);
568                 of_node_put(node);
569                 return -EINVAL;
570         }
571
572         core->log.phys = core->rpc.phys + core->res->rpc_size;
573         core->log.virt = core->rpc.virt + core->res->rpc_size;
574         core->log.length = core->res->fwlog_size;
575         core->act.phys = core->log.phys + core->log.length;
576         core->act.virt = core->log.virt + core->log.length;
577         core->act.length = core->rpc.length - core->res->rpc_size - core->log.length;
578         core->rpc.length = core->res->rpc_size;
579
580         of_node_put(node);
581
582         return 0;
583 }
584
585 static int vpu_core_probe(struct platform_device *pdev)
586 {
587         struct device *dev = &pdev->dev;
588         struct vpu_core *core;
589         struct vpu_dev *vpu = dev_get_drvdata(dev->parent);
590         struct vpu_shared_addr *iface;
591         u32 iface_data_size;
592         int ret;
593
594         dev_dbg(dev, "probe\n");
595         if (!vpu)
596                 return -EINVAL;
597         core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
598         if (!core)
599                 return -ENOMEM;
600
601         core->pdev = pdev;
602         core->dev = dev;
603         platform_set_drvdata(pdev, core);
604         core->vpu = vpu;
605         INIT_LIST_HEAD(&core->instances);
606         mutex_init(&core->lock);
607         mutex_init(&core->cmd_lock);
608         init_completion(&core->cmp);
609         init_waitqueue_head(&core->ack_wq);
610         core->state = VPU_CORE_DEINIT;
611
612         core->res = of_device_get_match_data(dev);
613         if (!core->res)
614                 return -ENODEV;
615
616         core->type = core->res->type;
617         core->id = of_alias_get_id(dev->of_node, "vpu_core");
618         if (core->id < 0) {
619                 dev_err(dev, "can't get vpu core id\n");
620                 return core->id;
621         }
622         dev_info(core->dev, "[%d] = %s\n", core->id, vpu_core_type_desc(core->type));
623         ret = vpu_core_parse_dt(core, dev->of_node);
624         if (ret)
625                 return ret;
626
627         core->base = devm_platform_ioremap_resource(pdev, 0);
628         if (IS_ERR(core->base))
629                 return PTR_ERR(core->base);
630
631         if (!vpu_iface_check_codec(core)) {
632                 dev_err(core->dev, "is not supported\n");
633                 return -EINVAL;
634         }
635
636         ret = vpu_mbox_init(core);
637         if (ret)
638                 return ret;
639
640         iface = devm_kzalloc(dev, sizeof(*iface), GFP_KERNEL);
641         if (!iface)
642                 return -ENOMEM;
643
644         iface_data_size = vpu_iface_get_data_size(core);
645         if (iface_data_size) {
646                 iface->priv = devm_kzalloc(dev, iface_data_size, GFP_KERNEL);
647                 if (!iface->priv)
648                         return -ENOMEM;
649         }
650
651         ret = vpu_iface_init(core, iface, &core->rpc, core->fw.phys);
652         if (ret) {
653                 dev_err(core->dev, "init iface fail, ret = %d\n", ret);
654                 return ret;
655         }
656
657         vpu_iface_config_system(core, vpu->res->mreg_base, vpu->base);
658         vpu_iface_set_log_buf(core, &core->log);
659
660         pm_runtime_enable(dev);
661         ret = pm_runtime_resume_and_get(dev);
662         if (ret) {
663                 pm_runtime_put_noidle(dev);
664                 pm_runtime_set_suspended(dev);
665                 goto err_runtime_disable;
666         }
667
668         ret = vpu_core_register(dev->parent, core);
669         if (ret)
670                 goto err_core_register;
671         core->parent = dev->parent;
672
673         pm_runtime_put_sync(dev);
674         vpu_core_create_dbgfs_file(core);
675
676         return 0;
677
678 err_core_register:
679         pm_runtime_put_sync(dev);
680 err_runtime_disable:
681         pm_runtime_disable(dev);
682
683         return ret;
684 }
685
686 static int vpu_core_remove(struct platform_device *pdev)
687 {
688         struct device *dev = &pdev->dev;
689         struct vpu_core *core = platform_get_drvdata(pdev);
690         int ret;
691
692         vpu_core_remove_dbgfs_file(core);
693         ret = pm_runtime_resume_and_get(dev);
694         WARN_ON(ret < 0);
695
696         vpu_core_shutdown(core);
697         pm_runtime_put_sync(dev);
698         pm_runtime_disable(dev);
699
700         vpu_core_unregister(core->parent, core);
701         memunmap(core->fw.virt);
702         memunmap(core->rpc.virt);
703         mutex_destroy(&core->lock);
704         mutex_destroy(&core->cmd_lock);
705
706         return 0;
707 }
708
709 static int __maybe_unused vpu_core_runtime_resume(struct device *dev)
710 {
711         struct vpu_core *core = dev_get_drvdata(dev);
712
713         return vpu_mbox_request(core);
714 }
715
716 static int __maybe_unused vpu_core_runtime_suspend(struct device *dev)
717 {
718         struct vpu_core *core = dev_get_drvdata(dev);
719
720         vpu_mbox_free(core);
721         return 0;
722 }
723
724 static void vpu_core_cancel_work(struct vpu_core *core)
725 {
726         struct vpu_inst *inst = NULL;
727
728         cancel_work_sync(&core->msg_work);
729         cancel_delayed_work_sync(&core->msg_delayed_work);
730
731         mutex_lock(&core->lock);
732         list_for_each_entry(inst, &core->instances, list)
733                 cancel_work_sync(&inst->msg_work);
734         mutex_unlock(&core->lock);
735 }
736
737 static void vpu_core_resume_work(struct vpu_core *core)
738 {
739         struct vpu_inst *inst = NULL;
740         unsigned long delay = msecs_to_jiffies(10);
741
742         queue_work(core->workqueue, &core->msg_work);
743         queue_delayed_work(core->workqueue, &core->msg_delayed_work, delay);
744
745         mutex_lock(&core->lock);
746         list_for_each_entry(inst, &core->instances, list)
747                 queue_work(inst->workqueue, &inst->msg_work);
748         mutex_unlock(&core->lock);
749 }
750
751 static int __maybe_unused vpu_core_resume(struct device *dev)
752 {
753         struct vpu_core *core = dev_get_drvdata(dev);
754         int ret = 0;
755
756         mutex_lock(&core->lock);
757         pm_runtime_resume_and_get(dev);
758         vpu_core_get_vpu(core);
759         if (core->state != VPU_CORE_SNAPSHOT)
760                 goto exit;
761
762         if (!vpu_iface_get_power_state(core)) {
763                 if (!list_empty(&core->instances)) {
764                         ret = vpu_core_boot(core, false);
765                         if (ret) {
766                                 dev_err(core->dev, "%s boot fail\n", __func__);
767                                 core->state = VPU_CORE_DEINIT;
768                                 goto exit;
769                         }
770                 } else {
771                         core->state = VPU_CORE_DEINIT;
772                 }
773         } else {
774                 if (!list_empty(&core->instances)) {
775                         ret = vpu_core_sw_reset(core);
776                         if (ret) {
777                                 dev_err(core->dev, "%s sw_reset fail\n", __func__);
778                                 core->state = VPU_CORE_HANG;
779                                 goto exit;
780                         }
781                 }
782                 core->state = VPU_CORE_ACTIVE;
783         }
784
785 exit:
786         pm_runtime_put_sync(dev);
787         mutex_unlock(&core->lock);
788
789         vpu_core_resume_work(core);
790         return ret;
791 }
792
793 static int __maybe_unused vpu_core_suspend(struct device *dev)
794 {
795         struct vpu_core *core = dev_get_drvdata(dev);
796         int ret = 0;
797
798         mutex_lock(&core->lock);
799         if (core->state == VPU_CORE_ACTIVE) {
800                 if (!list_empty(&core->instances)) {
801                         ret = vpu_core_snapshot(core);
802                         if (ret) {
803                                 mutex_unlock(&core->lock);
804                                 return ret;
805                         }
806                 }
807
808                 core->state = VPU_CORE_SNAPSHOT;
809         }
810         mutex_unlock(&core->lock);
811
812         vpu_core_cancel_work(core);
813
814         mutex_lock(&core->lock);
815         vpu_core_put_vpu(core);
816         mutex_unlock(&core->lock);
817         return ret;
818 }
819
820 static const struct dev_pm_ops vpu_core_pm_ops = {
821         SET_RUNTIME_PM_OPS(vpu_core_runtime_suspend, vpu_core_runtime_resume, NULL)
822         SET_SYSTEM_SLEEP_PM_OPS(vpu_core_suspend, vpu_core_resume)
823 };
824
825 static struct vpu_core_resources imx8q_enc = {
826         .type = VPU_CORE_TYPE_ENC,
827         .fwname = "/*(DEBLOBBED)*/",
828         .stride = 16,
829         .max_width = 1920,
830         .max_height = 1920,
831         .min_width = 64,
832         .min_height = 48,
833         .step_width = 2,
834         .step_height = 2,
835         .rpc_size = 0x80000,
836         .fwlog_size = 0x80000,
837         .act_size = 0xc0000,
838 };
839
840 static struct vpu_core_resources imx8q_dec = {
841         .type = VPU_CORE_TYPE_DEC,
842         .fwname = "/*(DEBLOBBED)*/",
843         .stride = 256,
844         .max_width = 8188,
845         .max_height = 8188,
846         .min_width = 16,
847         .min_height = 16,
848         .step_width = 1,
849         .step_height = 1,
850         .rpc_size = 0x80000,
851         .fwlog_size = 0x80000,
852 };
853
854 static const struct of_device_id vpu_core_dt_match[] = {
855         { .compatible = "nxp,imx8q-vpu-encoder", .data = &imx8q_enc },
856         { .compatible = "nxp,imx8q-vpu-decoder", .data = &imx8q_dec },
857         {}
858 };
859 MODULE_DEVICE_TABLE(of, vpu_core_dt_match);
860
861 static struct platform_driver amphion_vpu_core_driver = {
862         .probe = vpu_core_probe,
863         .remove = vpu_core_remove,
864         .driver = {
865                 .name = "amphion-vpu-core",
866                 .of_match_table = vpu_core_dt_match,
867                 .pm = &vpu_core_pm_ops,
868         },
869 };
870
871 int __init vpu_core_driver_init(void)
872 {
873         return platform_driver_register(&amphion_vpu_core_driver);
874 }
875
876 void __exit vpu_core_driver_exit(void)
877 {
878         platform_driver_unregister(&amphion_vpu_core_driver);
879 }