GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / media / platform / qcom / venus / hfi_venus.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2017 Linaro Ltd.
5  */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14
15 #include "core.h"
16 #include "hfi_cmds.h"
17 #include "hfi_msgs.h"
18 #include "hfi_venus.h"
19 #include "hfi_venus_io.h"
20 #include "firmware.h"
21
22 #define HFI_MASK_QHDR_TX_TYPE           0xff000000
23 #define HFI_MASK_QHDR_RX_TYPE           0x00ff0000
24 #define HFI_MASK_QHDR_PRI_TYPE          0x0000ff00
25 #define HFI_MASK_QHDR_ID_TYPE           0x000000ff
26
27 #define HFI_HOST_TO_CTRL_CMD_Q          0
28 #define HFI_CTRL_TO_HOST_MSG_Q          1
29 #define HFI_CTRL_TO_HOST_DBG_Q          2
30 #define HFI_MASK_QHDR_STATUS            0x000000ff
31
32 #define IFACEQ_NUM                      3
33 #define IFACEQ_CMD_IDX                  0
34 #define IFACEQ_MSG_IDX                  1
35 #define IFACEQ_DBG_IDX                  2
36 #define IFACEQ_MAX_BUF_COUNT            50
37 #define IFACEQ_MAX_PARALLEL_CLNTS       16
38 #define IFACEQ_DFLT_QHDR                0x01010000
39
40 #define POLL_INTERVAL_US                50
41
42 #define IFACEQ_MAX_PKT_SIZE             1024
43 #define IFACEQ_MED_PKT_SIZE             768
44 #define IFACEQ_MIN_PKT_SIZE             8
45 #define IFACEQ_VAR_SMALL_PKT_SIZE       100
46 #define IFACEQ_VAR_LARGE_PKT_SIZE       512
47 #define IFACEQ_VAR_HUGE_PKT_SIZE        (1024 * 12)
48
49 struct hfi_queue_table_header {
50         u32 version;
51         u32 size;
52         u32 qhdr0_offset;
53         u32 qhdr_size;
54         u32 num_q;
55         u32 num_active_q;
56 };
57
58 struct hfi_queue_header {
59         u32 status;
60         u32 start_addr;
61         u32 type;
62         u32 q_size;
63         u32 pkt_size;
64         u32 pkt_drop_cnt;
65         u32 rx_wm;
66         u32 tx_wm;
67         u32 rx_req;
68         u32 tx_req;
69         u32 rx_irq_status;
70         u32 tx_irq_status;
71         u32 read_idx;
72         u32 write_idx;
73 };
74
75 #define IFACEQ_TABLE_SIZE       \
76         (sizeof(struct hfi_queue_table_header) +        \
77          sizeof(struct hfi_queue_header) * IFACEQ_NUM)
78
79 #define IFACEQ_QUEUE_SIZE       (IFACEQ_MAX_PKT_SIZE *  \
80         IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS)
81
82 #define IFACEQ_GET_QHDR_START_ADDR(ptr, i)      \
83         (void *)(((ptr) + sizeof(struct hfi_queue_table_header)) +      \
84                 ((i) * sizeof(struct hfi_queue_header)))
85
86 #define QDSS_SIZE               SZ_4K
87 #define SFR_SIZE                SZ_4K
88 #define QUEUE_SIZE              \
89         (IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM))
90
91 #define ALIGNED_QDSS_SIZE       ALIGN(QDSS_SIZE, SZ_4K)
92 #define ALIGNED_SFR_SIZE        ALIGN(SFR_SIZE, SZ_4K)
93 #define ALIGNED_QUEUE_SIZE      ALIGN(QUEUE_SIZE, SZ_4K)
94 #define SHARED_QSIZE            ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
95                                       ALIGNED_QDSS_SIZE, SZ_1M)
96
97 struct mem_desc {
98         dma_addr_t da;  /* device address */
99         void *kva;      /* kernel virtual address */
100         u32 size;
101         unsigned long attrs;
102 };
103
104 struct iface_queue {
105         struct hfi_queue_header *qhdr;
106         struct mem_desc qmem;
107 };
108
109 enum venus_state {
110         VENUS_STATE_DEINIT = 1,
111         VENUS_STATE_INIT,
112 };
113
114 struct venus_hfi_device {
115         struct venus_core *core;
116         u32 irq_status;
117         u32 last_packet_type;
118         bool power_enabled;
119         bool suspended;
120         enum venus_state state;
121         /* serialize read / write to the shared memory */
122         struct mutex lock;
123         struct completion pwr_collapse_prep;
124         struct completion release_resource;
125         struct mem_desc ifaceq_table;
126         struct mem_desc sfr;
127         struct iface_queue queues[IFACEQ_NUM];
128         u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
129         u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
130 };
131
132 static bool venus_pkt_debug;
133 int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
134 static bool venus_sys_idle_indicator;
135 static bool venus_fw_low_power_mode = true;
136 static int venus_hw_rsp_timeout = 1000;
137 static bool venus_fw_coverage;
138
139 static void venus_set_state(struct venus_hfi_device *hdev,
140                             enum venus_state state)
141 {
142         mutex_lock(&hdev->lock);
143         hdev->state = state;
144         mutex_unlock(&hdev->lock);
145 }
146
147 static bool venus_is_valid_state(struct venus_hfi_device *hdev)
148 {
149         return hdev->state != VENUS_STATE_DEINIT;
150 }
151
152 static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet)
153 {
154         size_t pkt_size = *(u32 *)packet;
155
156         if (!venus_pkt_debug)
157                 return;
158
159         print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet,
160                        pkt_size, true);
161 }
162
163 static int venus_write_queue(struct venus_hfi_device *hdev,
164                              struct iface_queue *queue,
165                              void *packet, u32 *rx_req)
166 {
167         struct hfi_queue_header *qhdr;
168         u32 dwords, new_wr_idx;
169         u32 empty_space, rd_idx, wr_idx, qsize;
170         u32 *wr_ptr;
171
172         if (!queue->qmem.kva)
173                 return -EINVAL;
174
175         qhdr = queue->qhdr;
176         if (!qhdr)
177                 return -EINVAL;
178
179         venus_dump_packet(hdev, packet);
180
181         dwords = (*(u32 *)packet) >> 2;
182         if (!dwords)
183                 return -EINVAL;
184
185         rd_idx = qhdr->read_idx;
186         wr_idx = qhdr->write_idx;
187         qsize = qhdr->q_size;
188         /* ensure rd/wr indices's are read from memory */
189         rmb();
190
191         if (wr_idx >= rd_idx)
192                 empty_space = qsize - (wr_idx - rd_idx);
193         else
194                 empty_space = rd_idx - wr_idx;
195
196         if (empty_space <= dwords) {
197                 qhdr->tx_req = 1;
198                 /* ensure tx_req is updated in memory */
199                 wmb();
200                 return -ENOSPC;
201         }
202
203         qhdr->tx_req = 0;
204         /* ensure tx_req is updated in memory */
205         wmb();
206
207         new_wr_idx = wr_idx + dwords;
208         wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
209         if (new_wr_idx < qsize) {
210                 memcpy(wr_ptr, packet, dwords << 2);
211         } else {
212                 size_t len;
213
214                 new_wr_idx -= qsize;
215                 len = (dwords - new_wr_idx) << 2;
216                 memcpy(wr_ptr, packet, len);
217                 memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2);
218         }
219
220         /* make sure packet is written before updating the write index */
221         wmb();
222
223         qhdr->write_idx = new_wr_idx;
224         *rx_req = qhdr->rx_req ? 1 : 0;
225
226         /* make sure write index is updated before an interrupt is raised */
227         mb();
228
229         return 0;
230 }
231
232 static int venus_read_queue(struct venus_hfi_device *hdev,
233                             struct iface_queue *queue, void *pkt, u32 *tx_req)
234 {
235         struct hfi_queue_header *qhdr;
236         u32 dwords, new_rd_idx;
237         u32 rd_idx, wr_idx, type, qsize;
238         u32 *rd_ptr;
239         u32 recv_request = 0;
240         int ret = 0;
241
242         if (!queue->qmem.kva)
243                 return -EINVAL;
244
245         qhdr = queue->qhdr;
246         if (!qhdr)
247                 return -EINVAL;
248
249         type = qhdr->type;
250         rd_idx = qhdr->read_idx;
251         wr_idx = qhdr->write_idx;
252         qsize = qhdr->q_size;
253
254         /* make sure data is valid before using it */
255         rmb();
256
257         /*
258          * Do not set receive request for debug queue, if set, Venus generates
259          * interrupt for debug messages even when there is no response message
260          * available. In general debug queue will not become full as it is being
261          * emptied out for every interrupt from Venus. Venus will anyway
262          * generates interrupt if it is full.
263          */
264         if (type & HFI_CTRL_TO_HOST_MSG_Q)
265                 recv_request = 1;
266
267         if (rd_idx == wr_idx) {
268                 qhdr->rx_req = recv_request;
269                 *tx_req = 0;
270                 /* update rx_req field in memory */
271                 wmb();
272                 return -ENODATA;
273         }
274
275         rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
276         dwords = *rd_ptr >> 2;
277         if (!dwords)
278                 return -EINVAL;
279
280         new_rd_idx = rd_idx + dwords;
281         if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) {
282                 if (new_rd_idx < qsize) {
283                         memcpy(pkt, rd_ptr, dwords << 2);
284                 } else {
285                         size_t len;
286
287                         new_rd_idx -= qsize;
288                         len = (dwords - new_rd_idx) << 2;
289                         memcpy(pkt, rd_ptr, len);
290                         memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
291                 }
292         } else {
293                 /* bad packet received, dropping */
294                 new_rd_idx = qhdr->write_idx;
295                 ret = -EBADMSG;
296         }
297
298         /* ensure the packet is read before updating read index */
299         rmb();
300
301         qhdr->read_idx = new_rd_idx;
302         /* ensure updating read index */
303         wmb();
304
305         rd_idx = qhdr->read_idx;
306         wr_idx = qhdr->write_idx;
307         /* ensure rd/wr indices are read from memory */
308         rmb();
309
310         if (rd_idx != wr_idx)
311                 qhdr->rx_req = 0;
312         else
313                 qhdr->rx_req = recv_request;
314
315         *tx_req = qhdr->tx_req ? 1 : 0;
316
317         /* ensure rx_req is stored to memory and tx_req is loaded from memory */
318         mb();
319
320         venus_dump_packet(hdev, pkt);
321
322         return ret;
323 }
324
325 static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
326                        u32 size)
327 {
328         struct device *dev = hdev->core->dev;
329
330         desc->attrs = DMA_ATTR_WRITE_COMBINE;
331         desc->size = ALIGN(size, SZ_4K);
332
333         desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
334                                     desc->attrs);
335         if (!desc->kva)
336                 return -ENOMEM;
337
338         return 0;
339 }
340
341 static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem)
342 {
343         struct device *dev = hdev->core->dev;
344
345         dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs);
346 }
347
348 static void venus_set_registers(struct venus_hfi_device *hdev)
349 {
350         const struct venus_resources *res = hdev->core->res;
351         const struct reg_val *tbl = res->reg_tbl;
352         unsigned int count = res->reg_tbl_size;
353         unsigned int i;
354
355         for (i = 0; i < count; i++)
356                 writel(tbl[i].value, hdev->core->base + tbl[i].reg);
357 }
358
359 static void venus_soft_int(struct venus_hfi_device *hdev)
360 {
361         void __iomem *cpu_ic_base = hdev->core->cpu_ic_base;
362         u32 clear_bit;
363
364         if (IS_V6(hdev->core))
365                 clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT_V6);
366         else
367                 clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT);
368
369         writel(clear_bit, cpu_ic_base + CPU_IC_SOFTINT);
370 }
371
372 static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
373                                          void *pkt, bool sync)
374 {
375         struct device *dev = hdev->core->dev;
376         struct hfi_pkt_hdr *cmd_packet;
377         struct iface_queue *queue;
378         u32 rx_req;
379         int ret;
380
381         if (!venus_is_valid_state(hdev))
382                 return -EINVAL;
383
384         cmd_packet = (struct hfi_pkt_hdr *)pkt;
385         hdev->last_packet_type = cmd_packet->pkt_type;
386
387         queue = &hdev->queues[IFACEQ_CMD_IDX];
388
389         ret = venus_write_queue(hdev, queue, pkt, &rx_req);
390         if (ret) {
391                 dev_err(dev, "write to iface cmd queue failed (%d)\n", ret);
392                 return ret;
393         }
394
395         if (sync) {
396                 /*
397                  * Inform video hardware to raise interrupt for synchronous
398                  * commands
399                  */
400                 queue = &hdev->queues[IFACEQ_MSG_IDX];
401                 queue->qhdr->rx_req = 1;
402                 /* ensure rx_req is updated in memory */
403                 wmb();
404         }
405
406         if (rx_req)
407                 venus_soft_int(hdev);
408
409         return 0;
410 }
411
412 static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt, bool sync)
413 {
414         int ret;
415
416         mutex_lock(&hdev->lock);
417         ret = venus_iface_cmdq_write_nolock(hdev, pkt, sync);
418         mutex_unlock(&hdev->lock);
419
420         return ret;
421 }
422
423 static int venus_hfi_core_set_resource(struct venus_core *core, u32 id,
424                                        u32 size, u32 addr, void *cookie)
425 {
426         struct venus_hfi_device *hdev = to_hfi_priv(core);
427         struct hfi_sys_set_resource_pkt *pkt;
428         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
429         int ret;
430
431         if (id == VIDC_RESOURCE_NONE)
432                 return 0;
433
434         pkt = (struct hfi_sys_set_resource_pkt *)packet;
435
436         ret = pkt_sys_set_resource(pkt, id, size, addr, cookie);
437         if (ret)
438                 return ret;
439
440         ret = venus_iface_cmdq_write(hdev, pkt, false);
441         if (ret)
442                 return ret;
443
444         return 0;
445 }
446
447 static int venus_boot_core(struct venus_hfi_device *hdev)
448 {
449         struct device *dev = hdev->core->dev;
450         static const unsigned int max_tries = 100;
451         u32 ctrl_status = 0, mask_val;
452         unsigned int count = 0;
453         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
454         void __iomem *wrapper_base = hdev->core->wrapper_base;
455         int ret = 0;
456
457         writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
458         if (IS_V6(hdev->core)) {
459                 mask_val = readl(wrapper_base + WRAPPER_INTR_MASK);
460                 mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 |
461                               WRAPPER_INTR_MASK_A2HCPU_MASK);
462         } else {
463                 mask_val = WRAPPER_INTR_MASK_A2HVCODEC_MASK;
464         }
465         writel(mask_val, wrapper_base + WRAPPER_INTR_MASK);
466         writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3);
467
468         while (!ctrl_status && count < max_tries) {
469                 ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
470                 if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
471                         dev_err(dev, "invalid setting for UC_REGION\n");
472                         ret = -EINVAL;
473                         break;
474                 }
475
476                 usleep_range(500, 1000);
477                 count++;
478         }
479
480         if (count >= max_tries)
481                 ret = -ETIMEDOUT;
482
483         if (IS_V6(hdev->core)) {
484                 writel(0x1, cpu_cs_base + CPU_CS_H2XSOFTINTEN_V6);
485                 writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
486         }
487
488         return ret;
489 }
490
491 static u32 venus_hwversion(struct venus_hfi_device *hdev)
492 {
493         struct device *dev = hdev->core->dev;
494         void __iomem *wrapper_base = hdev->core->wrapper_base;
495         u32 ver;
496         u32 major, minor, step;
497
498         ver = readl(wrapper_base + WRAPPER_HW_VERSION);
499         major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK;
500         major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT;
501         minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK;
502         minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT;
503         step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK;
504
505         dev_dbg(dev, VDBGL "venus hw version %x.%x.%x\n", major, minor, step);
506
507         return major;
508 }
509
510 static int venus_run(struct venus_hfi_device *hdev)
511 {
512         struct device *dev = hdev->core->dev;
513         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
514         int ret;
515
516         /*
517          * Re-program all of the registers that get reset as a result of
518          * regulator_disable() and _enable()
519          */
520         venus_set_registers(hdev);
521
522         writel(hdev->ifaceq_table.da, cpu_cs_base + UC_REGION_ADDR);
523         writel(SHARED_QSIZE, cpu_cs_base + UC_REGION_SIZE);
524         writel(hdev->ifaceq_table.da, cpu_cs_base + CPU_CS_SCIACMDARG2);
525         writel(0x01, cpu_cs_base + CPU_CS_SCIACMDARG1);
526         if (hdev->sfr.da)
527                 writel(hdev->sfr.da, cpu_cs_base + SFR_ADDR);
528
529         ret = venus_boot_core(hdev);
530         if (ret) {
531                 dev_err(dev, "failed to reset venus core\n");
532                 return ret;
533         }
534
535         venus_hwversion(hdev);
536
537         return 0;
538 }
539
540 static int venus_halt_axi(struct venus_hfi_device *hdev)
541 {
542         void __iomem *wrapper_base = hdev->core->wrapper_base;
543         void __iomem *vbif_base = hdev->core->vbif_base;
544         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
545         void __iomem *aon_base = hdev->core->aon_base;
546         struct device *dev = hdev->core->dev;
547         u32 val;
548         u32 mask_val;
549         int ret;
550
551         if (IS_V6(hdev->core)) {
552                 writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6);
553
554                 if (hdev->core->res->num_vpp_pipes == 1)
555                         goto skip_aon_mvp_noc;
556
557                 writel(0x1, aon_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
558                 ret = readl_poll_timeout(aon_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
559                                          val,
560                                          val & BIT(0),
561                                          POLL_INTERVAL_US,
562                                          VBIF_AXI_HALT_ACK_TIMEOUT_US);
563                 if (ret)
564                         return -ETIMEDOUT;
565
566 skip_aon_mvp_noc:
567                 mask_val = (BIT(2) | BIT(1) | BIT(0));
568                 writel(mask_val, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
569
570                 writel(0x00, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
571                 ret = readl_poll_timeout(wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS_V6,
572                                          val,
573                                          val == 0,
574                                          POLL_INTERVAL_US,
575                                          VBIF_AXI_HALT_ACK_TIMEOUT_US);
576
577                 if (ret) {
578                         dev_err(dev, "DBLP Release: lpi_status %x\n", val);
579                         return -ETIMEDOUT;
580                 }
581                 return 0;
582         }
583
584         if (IS_V4(hdev->core)) {
585                 val = readl(wrapper_base + WRAPPER_CPU_AXI_HALT);
586                 val |= WRAPPER_CPU_AXI_HALT_HALT;
587                 writel(val, wrapper_base + WRAPPER_CPU_AXI_HALT);
588
589                 ret = readl_poll_timeout(wrapper_base + WRAPPER_CPU_AXI_HALT_STATUS,
590                                          val,
591                                          val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE,
592                                          POLL_INTERVAL_US,
593                                          VBIF_AXI_HALT_ACK_TIMEOUT_US);
594                 if (ret) {
595                         dev_err(dev, "AXI bus port halt timeout\n");
596                         return ret;
597                 }
598
599                 return 0;
600         }
601
602         /* Halt AXI and AXI IMEM VBIF Access */
603         val = readl(vbif_base + VBIF_AXI_HALT_CTRL0);
604         val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
605         writel(val, vbif_base + VBIF_AXI_HALT_CTRL0);
606
607         /* Request for AXI bus port halt */
608         ret = readl_poll_timeout(vbif_base + VBIF_AXI_HALT_CTRL1, val,
609                                  val & VBIF_AXI_HALT_CTRL1_HALT_ACK,
610                                  POLL_INTERVAL_US,
611                                  VBIF_AXI_HALT_ACK_TIMEOUT_US);
612         if (ret) {
613                 dev_err(dev, "AXI bus port halt timeout\n");
614                 return ret;
615         }
616
617         return 0;
618 }
619
620 static int venus_power_off(struct venus_hfi_device *hdev)
621 {
622         int ret;
623
624         if (!hdev->power_enabled)
625                 return 0;
626
627         ret = venus_set_hw_state_suspend(hdev->core);
628         if (ret)
629                 return ret;
630
631         ret = venus_halt_axi(hdev);
632         if (ret)
633                 return ret;
634
635         hdev->power_enabled = false;
636
637         return 0;
638 }
639
640 static int venus_power_on(struct venus_hfi_device *hdev)
641 {
642         int ret;
643
644         if (hdev->power_enabled)
645                 return 0;
646
647         ret = venus_set_hw_state_resume(hdev->core);
648         if (ret)
649                 goto err;
650
651         ret = venus_run(hdev);
652         if (ret)
653                 goto err_suspend;
654
655         hdev->power_enabled = true;
656
657         return 0;
658
659 err_suspend:
660         venus_set_hw_state_suspend(hdev->core);
661 err:
662         hdev->power_enabled = false;
663         return ret;
664 }
665
666 static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev,
667                                         void *pkt)
668 {
669         struct iface_queue *queue;
670         u32 tx_req;
671         int ret;
672
673         if (!venus_is_valid_state(hdev))
674                 return -EINVAL;
675
676         queue = &hdev->queues[IFACEQ_MSG_IDX];
677
678         ret = venus_read_queue(hdev, queue, pkt, &tx_req);
679         if (ret)
680                 return ret;
681
682         if (tx_req)
683                 venus_soft_int(hdev);
684
685         return 0;
686 }
687
688 static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt)
689 {
690         int ret;
691
692         mutex_lock(&hdev->lock);
693         ret = venus_iface_msgq_read_nolock(hdev, pkt);
694         mutex_unlock(&hdev->lock);
695
696         return ret;
697 }
698
699 static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev,
700                                         void *pkt)
701 {
702         struct iface_queue *queue;
703         u32 tx_req;
704         int ret;
705
706         ret = venus_is_valid_state(hdev);
707         if (!ret)
708                 return -EINVAL;
709
710         queue = &hdev->queues[IFACEQ_DBG_IDX];
711
712         ret = venus_read_queue(hdev, queue, pkt, &tx_req);
713         if (ret)
714                 return ret;
715
716         if (tx_req)
717                 venus_soft_int(hdev);
718
719         return 0;
720 }
721
722 static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt)
723 {
724         int ret;
725
726         if (!pkt)
727                 return -EINVAL;
728
729         mutex_lock(&hdev->lock);
730         ret = venus_iface_dbgq_read_nolock(hdev, pkt);
731         mutex_unlock(&hdev->lock);
732
733         return ret;
734 }
735
736 static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr)
737 {
738         qhdr->status = 1;
739         qhdr->type = IFACEQ_DFLT_QHDR;
740         qhdr->q_size = IFACEQ_QUEUE_SIZE / 4;
741         qhdr->pkt_size = 0;
742         qhdr->rx_wm = 1;
743         qhdr->tx_wm = 1;
744         qhdr->rx_req = 1;
745         qhdr->tx_req = 0;
746         qhdr->rx_irq_status = 0;
747         qhdr->tx_irq_status = 0;
748         qhdr->read_idx = 0;
749         qhdr->write_idx = 0;
750 }
751
752 static void venus_interface_queues_release(struct venus_hfi_device *hdev)
753 {
754         mutex_lock(&hdev->lock);
755
756         venus_free(hdev, &hdev->ifaceq_table);
757         venus_free(hdev, &hdev->sfr);
758
759         memset(hdev->queues, 0, sizeof(hdev->queues));
760         memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table));
761         memset(&hdev->sfr, 0, sizeof(hdev->sfr));
762
763         mutex_unlock(&hdev->lock);
764 }
765
766 static int venus_interface_queues_init(struct venus_hfi_device *hdev)
767 {
768         struct hfi_queue_table_header *tbl_hdr;
769         struct iface_queue *queue;
770         struct hfi_sfr *sfr;
771         struct mem_desc desc = {0};
772         unsigned int offset;
773         unsigned int i;
774         int ret;
775
776         ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE);
777         if (ret)
778                 return ret;
779
780         hdev->ifaceq_table = desc;
781         offset = IFACEQ_TABLE_SIZE;
782
783         for (i = 0; i < IFACEQ_NUM; i++) {
784                 queue = &hdev->queues[i];
785                 queue->qmem.da = desc.da + offset;
786                 queue->qmem.kva = desc.kva + offset;
787                 queue->qmem.size = IFACEQ_QUEUE_SIZE;
788                 offset += queue->qmem.size;
789                 queue->qhdr =
790                         IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
791
792                 venus_set_qhdr_defaults(queue->qhdr);
793
794                 queue->qhdr->start_addr = queue->qmem.da;
795
796                 if (i == IFACEQ_CMD_IDX)
797                         queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
798                 else if (i == IFACEQ_MSG_IDX)
799                         queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
800                 else if (i == IFACEQ_DBG_IDX)
801                         queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
802         }
803
804         tbl_hdr = hdev->ifaceq_table.kva;
805         tbl_hdr->version = 0;
806         tbl_hdr->size = IFACEQ_TABLE_SIZE;
807         tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
808         tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
809         tbl_hdr->num_q = IFACEQ_NUM;
810         tbl_hdr->num_active_q = IFACEQ_NUM;
811
812         /*
813          * Set receive request to zero on debug queue as there is no
814          * need of interrupt from video hardware for debug messages
815          */
816         queue = &hdev->queues[IFACEQ_DBG_IDX];
817         queue->qhdr->rx_req = 0;
818
819         ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE);
820         if (ret) {
821                 hdev->sfr.da = 0;
822         } else {
823                 hdev->sfr = desc;
824                 sfr = hdev->sfr.kva;
825                 sfr->buf_size = ALIGNED_SFR_SIZE;
826         }
827
828         /* ensure table and queue header structs are settled in memory */
829         wmb();
830
831         return 0;
832 }
833
834 static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug)
835 {
836         struct hfi_sys_set_property_pkt *pkt;
837         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
838         int ret;
839
840         pkt = (struct hfi_sys_set_property_pkt *)packet;
841
842         pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
843
844         ret = venus_iface_cmdq_write(hdev, pkt, false);
845         if (ret)
846                 return ret;
847
848         return 0;
849 }
850
851 static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode)
852 {
853         struct hfi_sys_set_property_pkt *pkt;
854         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
855         int ret;
856
857         pkt = (struct hfi_sys_set_property_pkt *)packet;
858
859         pkt_sys_coverage_config(pkt, mode);
860
861         ret = venus_iface_cmdq_write(hdev, pkt, false);
862         if (ret)
863                 return ret;
864
865         return 0;
866 }
867
868 static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
869                                       bool enable)
870 {
871         struct hfi_sys_set_property_pkt *pkt;
872         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
873         int ret;
874
875         if (!enable)
876                 return 0;
877
878         pkt = (struct hfi_sys_set_property_pkt *)packet;
879
880         pkt_sys_idle_indicator(pkt, enable);
881
882         ret = venus_iface_cmdq_write(hdev, pkt, false);
883         if (ret)
884                 return ret;
885
886         return 0;
887 }
888
889 static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
890                                        bool enable)
891 {
892         struct hfi_sys_set_property_pkt *pkt;
893         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
894         int ret;
895
896         pkt = (struct hfi_sys_set_property_pkt *)packet;
897
898         pkt_sys_power_control(pkt, enable);
899
900         ret = venus_iface_cmdq_write(hdev, pkt, false);
901         if (ret)
902                 return ret;
903
904         return 0;
905 }
906
907 static int venus_get_queue_size(struct venus_hfi_device *hdev,
908                                 unsigned int index)
909 {
910         struct hfi_queue_header *qhdr;
911
912         if (index >= IFACEQ_NUM)
913                 return -EINVAL;
914
915         qhdr = hdev->queues[index].qhdr;
916         if (!qhdr)
917                 return -EINVAL;
918
919         return abs(qhdr->read_idx - qhdr->write_idx);
920 }
921
922 static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
923 {
924         struct device *dev = hdev->core->dev;
925         int ret;
926
927         ret = venus_sys_set_debug(hdev, venus_fw_debug);
928         if (ret)
929                 dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
930
931         /*
932          * Idle indicator is disabled by default on some 4xx firmware versions,
933          * enable it explicitly in order to make suspend functional by checking
934          * WFI (wait-for-interrupt) bit.
935          */
936         if (IS_V4(hdev->core) || IS_V6(hdev->core))
937                 venus_sys_idle_indicator = true;
938
939         ret = venus_sys_set_idle_message(hdev, venus_sys_idle_indicator);
940         if (ret)
941                 dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
942
943         ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
944         if (ret)
945                 dev_warn(dev, "setting hw power collapse ON failed (%d)\n",
946                          ret);
947
948         return ret;
949 }
950
951 static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type, bool sync)
952 {
953         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
954         struct hfi_session_pkt pkt;
955
956         pkt_session_cmd(&pkt, pkt_type, inst);
957
958         return venus_iface_cmdq_write(hdev, &pkt, sync);
959 }
960
961 static void venus_flush_debug_queue(struct venus_hfi_device *hdev)
962 {
963         struct device *dev = hdev->core->dev;
964         void *packet = hdev->dbg_buf;
965
966         while (!venus_iface_dbgq_read(hdev, packet)) {
967                 struct hfi_msg_sys_coverage_pkt *pkt = packet;
968
969                 if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
970                         struct hfi_msg_sys_debug_pkt *pkt = packet;
971
972                         dev_dbg(dev, VDBGFW "%s", pkt->msg_data);
973                 }
974         }
975 }
976
977 static int venus_prepare_power_collapse(struct venus_hfi_device *hdev,
978                                         bool wait)
979 {
980         unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout);
981         struct hfi_sys_pc_prep_pkt pkt;
982         int ret;
983
984         init_completion(&hdev->pwr_collapse_prep);
985
986         pkt_sys_pc_prep(&pkt);
987
988         ret = venus_iface_cmdq_write(hdev, &pkt, false);
989         if (ret)
990                 return ret;
991
992         if (!wait)
993                 return 0;
994
995         ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout);
996         if (!ret) {
997                 venus_flush_debug_queue(hdev);
998                 return -ETIMEDOUT;
999         }
1000
1001         return 0;
1002 }
1003
1004 static int venus_are_queues_empty(struct venus_hfi_device *hdev)
1005 {
1006         int ret1, ret2;
1007
1008         ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX);
1009         if (ret1 < 0)
1010                 return ret1;
1011
1012         ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX);
1013         if (ret2 < 0)
1014                 return ret2;
1015
1016         if (!ret1 && !ret2)
1017                 return 1;
1018
1019         return 0;
1020 }
1021
1022 static void venus_sfr_print(struct venus_hfi_device *hdev)
1023 {
1024         struct device *dev = hdev->core->dev;
1025         struct hfi_sfr *sfr = hdev->sfr.kva;
1026         void *p;
1027
1028         if (!sfr)
1029                 return;
1030
1031         p = memchr(sfr->data, '\0', sfr->buf_size);
1032         /*
1033          * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
1034          * that Venus is in the process of crashing.
1035          */
1036         if (!p)
1037                 sfr->data[sfr->buf_size - 1] = '\0';
1038
1039         dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
1040 }
1041
1042 static void venus_process_msg_sys_error(struct venus_hfi_device *hdev,
1043                                         void *packet)
1044 {
1045         struct hfi_msg_event_notify_pkt *event_pkt = packet;
1046
1047         if (event_pkt->event_id != HFI_EVENT_SYS_ERROR)
1048                 return;
1049
1050         venus_set_state(hdev, VENUS_STATE_DEINIT);
1051
1052         venus_sfr_print(hdev);
1053 }
1054
1055 static irqreturn_t venus_isr_thread(struct venus_core *core)
1056 {
1057         struct venus_hfi_device *hdev = to_hfi_priv(core);
1058         const struct venus_resources *res;
1059         void *pkt;
1060         u32 msg_ret;
1061
1062         if (!hdev)
1063                 return IRQ_NONE;
1064
1065         res = hdev->core->res;
1066         pkt = hdev->pkt_buf;
1067
1068
1069         while (!venus_iface_msgq_read(hdev, pkt)) {
1070                 msg_ret = hfi_process_msg_packet(core, pkt);
1071                 switch (msg_ret) {
1072                 case HFI_MSG_EVENT_NOTIFY:
1073                         venus_process_msg_sys_error(hdev, pkt);
1074                         break;
1075                 case HFI_MSG_SYS_INIT:
1076                         venus_hfi_core_set_resource(core, res->vmem_id,
1077                                                     res->vmem_size,
1078                                                     res->vmem_addr,
1079                                                     hdev);
1080                         break;
1081                 case HFI_MSG_SYS_RELEASE_RESOURCE:
1082                         complete(&hdev->release_resource);
1083                         break;
1084                 case HFI_MSG_SYS_PC_PREP:
1085                         complete(&hdev->pwr_collapse_prep);
1086                         break;
1087                 default:
1088                         break;
1089                 }
1090         }
1091
1092         venus_flush_debug_queue(hdev);
1093
1094         return IRQ_HANDLED;
1095 }
1096
1097 static irqreturn_t venus_isr(struct venus_core *core)
1098 {
1099         struct venus_hfi_device *hdev = to_hfi_priv(core);
1100         u32 status;
1101         void __iomem *cpu_cs_base;
1102         void __iomem *wrapper_base;
1103
1104         if (!hdev)
1105                 return IRQ_NONE;
1106
1107         cpu_cs_base = hdev->core->cpu_cs_base;
1108         wrapper_base = hdev->core->wrapper_base;
1109
1110         status = readl(wrapper_base + WRAPPER_INTR_STATUS);
1111         if (IS_V6(core)) {
1112                 if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
1113                     status & WRAPPER_INTR_STATUS_A2HWD_MASK_V6 ||
1114                     status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1115                         hdev->irq_status = status;
1116         } else {
1117                 if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
1118                     status & WRAPPER_INTR_STATUS_A2HWD_MASK ||
1119                     status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1120                         hdev->irq_status = status;
1121         }
1122         writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR);
1123         if (!IS_V6(core))
1124                 writel(status, wrapper_base + WRAPPER_INTR_CLEAR);
1125
1126         return IRQ_WAKE_THREAD;
1127 }
1128
1129 static int venus_core_init(struct venus_core *core)
1130 {
1131         struct venus_hfi_device *hdev = to_hfi_priv(core);
1132         struct device *dev = core->dev;
1133         struct hfi_sys_get_property_pkt version_pkt;
1134         struct hfi_sys_init_pkt pkt;
1135         int ret;
1136
1137         pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX);
1138
1139         venus_set_state(hdev, VENUS_STATE_INIT);
1140
1141         ret = venus_iface_cmdq_write(hdev, &pkt, false);
1142         if (ret)
1143                 return ret;
1144
1145         pkt_sys_image_version(&version_pkt);
1146
1147         ret = venus_iface_cmdq_write(hdev, &version_pkt, false);
1148         if (ret)
1149                 dev_warn(dev, "failed to send image version pkt to fw\n");
1150
1151         ret = venus_sys_set_default_properties(hdev);
1152         if (ret)
1153                 return ret;
1154
1155         return 0;
1156 }
1157
1158 static int venus_core_deinit(struct venus_core *core)
1159 {
1160         struct venus_hfi_device *hdev = to_hfi_priv(core);
1161
1162         venus_set_state(hdev, VENUS_STATE_DEINIT);
1163         hdev->suspended = true;
1164         hdev->power_enabled = false;
1165
1166         return 0;
1167 }
1168
1169 static int venus_core_ping(struct venus_core *core, u32 cookie)
1170 {
1171         struct venus_hfi_device *hdev = to_hfi_priv(core);
1172         struct hfi_sys_ping_pkt pkt;
1173
1174         pkt_sys_ping(&pkt, cookie);
1175
1176         return venus_iface_cmdq_write(hdev, &pkt, false);
1177 }
1178
1179 static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
1180 {
1181         struct venus_hfi_device *hdev = to_hfi_priv(core);
1182         struct hfi_sys_test_ssr_pkt pkt;
1183         int ret;
1184
1185         ret = pkt_sys_ssr_cmd(&pkt, trigger_type);
1186         if (ret)
1187                 return ret;
1188
1189         return venus_iface_cmdq_write(hdev, &pkt, false);
1190 }
1191
1192 static int venus_session_init(struct venus_inst *inst, u32 session_type,
1193                               u32 codec)
1194 {
1195         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1196         struct hfi_session_init_pkt pkt;
1197         int ret;
1198
1199         ret = venus_sys_set_debug(hdev, venus_fw_debug);
1200         if (ret)
1201                 goto err;
1202
1203         ret = pkt_session_init(&pkt, inst, session_type, codec);
1204         if (ret)
1205                 goto err;
1206
1207         ret = venus_iface_cmdq_write(hdev, &pkt, true);
1208         if (ret)
1209                 goto err;
1210
1211         return 0;
1212
1213 err:
1214         venus_flush_debug_queue(hdev);
1215         return ret;
1216 }
1217
1218 static int venus_session_end(struct venus_inst *inst)
1219 {
1220         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1221         struct device *dev = hdev->core->dev;
1222
1223         if (venus_fw_coverage) {
1224                 if (venus_sys_set_coverage(hdev, venus_fw_coverage))
1225                         dev_warn(dev, "fw coverage msg ON failed\n");
1226         }
1227
1228         return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END, true);
1229 }
1230
1231 static int venus_session_abort(struct venus_inst *inst)
1232 {
1233         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1234
1235         venus_flush_debug_queue(hdev);
1236
1237         return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT, true);
1238 }
1239
1240 static int venus_session_flush(struct venus_inst *inst, u32 flush_mode)
1241 {
1242         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1243         struct hfi_session_flush_pkt pkt;
1244         int ret;
1245
1246         ret = pkt_session_flush(&pkt, inst, flush_mode);
1247         if (ret)
1248                 return ret;
1249
1250         return venus_iface_cmdq_write(hdev, &pkt, true);
1251 }
1252
1253 static int venus_session_start(struct venus_inst *inst)
1254 {
1255         return venus_session_cmd(inst, HFI_CMD_SESSION_START, true);
1256 }
1257
1258 static int venus_session_stop(struct venus_inst *inst)
1259 {
1260         return venus_session_cmd(inst, HFI_CMD_SESSION_STOP, true);
1261 }
1262
1263 static int venus_session_continue(struct venus_inst *inst)
1264 {
1265         return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE, false);
1266 }
1267
1268 static int venus_session_etb(struct venus_inst *inst,
1269                              struct hfi_frame_data *in_frame)
1270 {
1271         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1272         u32 session_type = inst->session_type;
1273         int ret;
1274
1275         if (session_type == VIDC_SESSION_TYPE_DEC) {
1276                 struct hfi_session_empty_buffer_compressed_pkt pkt;
1277
1278                 ret = pkt_session_etb_decoder(&pkt, inst, in_frame);
1279                 if (ret)
1280                         return ret;
1281
1282                 ret = venus_iface_cmdq_write(hdev, &pkt, false);
1283         } else if (session_type == VIDC_SESSION_TYPE_ENC) {
1284                 struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt;
1285
1286                 ret = pkt_session_etb_encoder(&pkt, inst, in_frame);
1287                 if (ret)
1288                         return ret;
1289
1290                 ret = venus_iface_cmdq_write(hdev, &pkt, false);
1291         } else {
1292                 ret = -EINVAL;
1293         }
1294
1295         return ret;
1296 }
1297
1298 static int venus_session_ftb(struct venus_inst *inst,
1299                              struct hfi_frame_data *out_frame)
1300 {
1301         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1302         struct hfi_session_fill_buffer_pkt pkt;
1303         int ret;
1304
1305         ret = pkt_session_ftb(&pkt, inst, out_frame);
1306         if (ret)
1307                 return ret;
1308
1309         return venus_iface_cmdq_write(hdev, &pkt, false);
1310 }
1311
1312 static int venus_session_set_buffers(struct venus_inst *inst,
1313                                      struct hfi_buffer_desc *bd)
1314 {
1315         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1316         struct hfi_session_set_buffers_pkt *pkt;
1317         u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1318         int ret;
1319
1320         if (bd->buffer_type == HFI_BUFFER_INPUT)
1321                 return 0;
1322
1323         pkt = (struct hfi_session_set_buffers_pkt *)packet;
1324
1325         ret = pkt_session_set_buffers(pkt, inst, bd);
1326         if (ret)
1327                 return ret;
1328
1329         return venus_iface_cmdq_write(hdev, pkt, false);
1330 }
1331
1332 static int venus_session_unset_buffers(struct venus_inst *inst,
1333                                        struct hfi_buffer_desc *bd)
1334 {
1335         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1336         struct hfi_session_release_buffer_pkt *pkt;
1337         u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1338         int ret;
1339
1340         if (bd->buffer_type == HFI_BUFFER_INPUT)
1341                 return 0;
1342
1343         pkt = (struct hfi_session_release_buffer_pkt *)packet;
1344
1345         ret = pkt_session_unset_buffers(pkt, inst, bd);
1346         if (ret)
1347                 return ret;
1348
1349         return venus_iface_cmdq_write(hdev, pkt, true);
1350 }
1351
1352 static int venus_session_load_res(struct venus_inst *inst)
1353 {
1354         return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES, true);
1355 }
1356
1357 static int venus_session_release_res(struct venus_inst *inst)
1358 {
1359         return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES, true);
1360 }
1361
1362 static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1363                                        u32 seq_hdr_len)
1364 {
1365         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1366         struct hfi_session_parse_sequence_header_pkt *pkt;
1367         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1368         int ret;
1369
1370         pkt = (struct hfi_session_parse_sequence_header_pkt *)packet;
1371
1372         ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len);
1373         if (ret)
1374                 return ret;
1375
1376         ret = venus_iface_cmdq_write(hdev, pkt, false);
1377         if (ret)
1378                 return ret;
1379
1380         return 0;
1381 }
1382
1383 static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1384                                      u32 seq_hdr_len)
1385 {
1386         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1387         struct hfi_session_get_sequence_header_pkt *pkt;
1388         u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1389         int ret;
1390
1391         pkt = (struct hfi_session_get_sequence_header_pkt *)packet;
1392
1393         ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len);
1394         if (ret)
1395                 return ret;
1396
1397         return venus_iface_cmdq_write(hdev, pkt, false);
1398 }
1399
1400 static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
1401                                       void *pdata)
1402 {
1403         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1404         struct hfi_session_set_property_pkt *pkt;
1405         u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1406         int ret;
1407
1408         pkt = (struct hfi_session_set_property_pkt *)packet;
1409
1410         ret = pkt_session_set_property(pkt, inst, ptype, pdata);
1411         if (ret == -ENOTSUPP)
1412                 return 0;
1413         if (ret)
1414                 return ret;
1415
1416         return venus_iface_cmdq_write(hdev, pkt, false);
1417 }
1418
1419 static int venus_session_get_property(struct venus_inst *inst, u32 ptype)
1420 {
1421         struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1422         struct hfi_session_get_property_pkt pkt;
1423         int ret;
1424
1425         ret = pkt_session_get_property(&pkt, inst, ptype);
1426         if (ret)
1427                 return ret;
1428
1429         return venus_iface_cmdq_write(hdev, &pkt, true);
1430 }
1431
1432 static int venus_resume(struct venus_core *core)
1433 {
1434         struct venus_hfi_device *hdev = to_hfi_priv(core);
1435         int ret = 0;
1436
1437         mutex_lock(&hdev->lock);
1438
1439         if (!hdev->suspended)
1440                 goto unlock;
1441
1442         ret = venus_power_on(hdev);
1443
1444 unlock:
1445         if (!ret)
1446                 hdev->suspended = false;
1447
1448         mutex_unlock(&hdev->lock);
1449
1450         return ret;
1451 }
1452
1453 static int venus_suspend_1xx(struct venus_core *core)
1454 {
1455         struct venus_hfi_device *hdev = to_hfi_priv(core);
1456         struct device *dev = core->dev;
1457         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1458         u32 ctrl_status;
1459         int ret;
1460
1461         if (!hdev->power_enabled || hdev->suspended)
1462                 return 0;
1463
1464         mutex_lock(&hdev->lock);
1465         ret = venus_is_valid_state(hdev);
1466         mutex_unlock(&hdev->lock);
1467
1468         if (!ret) {
1469                 dev_err(dev, "bad state, cannot suspend\n");
1470                 return -EINVAL;
1471         }
1472
1473         ret = venus_prepare_power_collapse(hdev, true);
1474         if (ret) {
1475                 dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1476                 return ret;
1477         }
1478
1479         mutex_lock(&hdev->lock);
1480
1481         if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) {
1482                 mutex_unlock(&hdev->lock);
1483                 return -EINVAL;
1484         }
1485
1486         ret = venus_are_queues_empty(hdev);
1487         if (ret < 0 || !ret) {
1488                 mutex_unlock(&hdev->lock);
1489                 return -EINVAL;
1490         }
1491
1492         ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1493         if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
1494                 mutex_unlock(&hdev->lock);
1495                 return -EINVAL;
1496         }
1497
1498         ret = venus_power_off(hdev);
1499         if (ret) {
1500                 mutex_unlock(&hdev->lock);
1501                 return ret;
1502         }
1503
1504         hdev->suspended = true;
1505
1506         mutex_unlock(&hdev->lock);
1507
1508         return 0;
1509 }
1510
1511 static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
1512 {
1513         void __iomem *wrapper_base = hdev->core->wrapper_base;
1514         void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
1515         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1516         u32 ctrl_status, cpu_status;
1517
1518         if (IS_V6(hdev->core))
1519                 cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
1520         else
1521                 cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1522         ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1523
1524         if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1525             ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1526                 return true;
1527
1528         return false;
1529 }
1530
1531 static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
1532 {
1533         void __iomem *wrapper_base = hdev->core->wrapper_base;
1534         void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
1535         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1536         u32 ctrl_status, cpu_status;
1537
1538         if (IS_V6(hdev->core))
1539                 cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
1540         else
1541                 cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1542         ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1543
1544         if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1545             ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
1546                 return true;
1547
1548         return false;
1549 }
1550
1551 static int venus_suspend_3xx(struct venus_core *core)
1552 {
1553         struct venus_hfi_device *hdev = to_hfi_priv(core);
1554         struct device *dev = core->dev;
1555         void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1556         u32 ctrl_status;
1557         bool val;
1558         int ret;
1559
1560         if (!hdev->power_enabled || hdev->suspended)
1561                 return 0;
1562
1563         mutex_lock(&hdev->lock);
1564         ret = venus_is_valid_state(hdev);
1565         mutex_unlock(&hdev->lock);
1566
1567         if (!ret) {
1568                 dev_err(dev, "bad state, cannot suspend\n");
1569                 return -EINVAL;
1570         }
1571
1572         ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1573         if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
1574                 goto power_off;
1575
1576         /*
1577          * Power collapse sequence for Venus 3xx and 4xx versions:
1578          * 1. Check for ARM9 and video core to be idle by checking WFI bit
1579          *    (bit 0) in CPU status register and by checking Idle (bit 30) in
1580          *    Control status register for video core.
1581          * 2. Send a command to prepare for power collapse.
1582          * 3. Check for WFI and PC_READY bits.
1583          */
1584         ret = readx_poll_timeout(venus_cpu_and_video_core_idle, hdev, val, val,
1585                                  1500, 100 * 1500);
1586         if (ret) {
1587                 dev_err(dev, "wait for cpu and video core idle fail (%d)\n", ret);
1588                 return ret;
1589         }
1590
1591         ret = venus_prepare_power_collapse(hdev, false);
1592         if (ret) {
1593                 dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1594                 return ret;
1595         }
1596
1597         ret = readx_poll_timeout(venus_cpu_idle_and_pc_ready, hdev, val, val,
1598                                  1500, 100 * 1500);
1599         if (ret)
1600                 return ret;
1601
1602 power_off:
1603         mutex_lock(&hdev->lock);
1604
1605         ret = venus_power_off(hdev);
1606         if (ret) {
1607                 dev_err(dev, "venus_power_off (%d)\n", ret);
1608                 mutex_unlock(&hdev->lock);
1609                 return ret;
1610         }
1611
1612         hdev->suspended = true;
1613
1614         mutex_unlock(&hdev->lock);
1615
1616         return 0;
1617 }
1618
1619 static int venus_suspend(struct venus_core *core)
1620 {
1621         if (IS_V3(core) || IS_V4(core) || IS_V6(core))
1622                 return venus_suspend_3xx(core);
1623
1624         return venus_suspend_1xx(core);
1625 }
1626
1627 static const struct hfi_ops venus_hfi_ops = {
1628         .core_init                      = venus_core_init,
1629         .core_deinit                    = venus_core_deinit,
1630         .core_ping                      = venus_core_ping,
1631         .core_trigger_ssr               = venus_core_trigger_ssr,
1632
1633         .session_init                   = venus_session_init,
1634         .session_end                    = venus_session_end,
1635         .session_abort                  = venus_session_abort,
1636         .session_flush                  = venus_session_flush,
1637         .session_start                  = venus_session_start,
1638         .session_stop                   = venus_session_stop,
1639         .session_continue               = venus_session_continue,
1640         .session_etb                    = venus_session_etb,
1641         .session_ftb                    = venus_session_ftb,
1642         .session_set_buffers            = venus_session_set_buffers,
1643         .session_unset_buffers          = venus_session_unset_buffers,
1644         .session_load_res               = venus_session_load_res,
1645         .session_release_res            = venus_session_release_res,
1646         .session_parse_seq_hdr          = venus_session_parse_seq_hdr,
1647         .session_get_seq_hdr            = venus_session_get_seq_hdr,
1648         .session_set_property           = venus_session_set_property,
1649         .session_get_property           = venus_session_get_property,
1650
1651         .resume                         = venus_resume,
1652         .suspend                        = venus_suspend,
1653
1654         .isr                            = venus_isr,
1655         .isr_thread                     = venus_isr_thread,
1656 };
1657
1658 void venus_hfi_destroy(struct venus_core *core)
1659 {
1660         struct venus_hfi_device *hdev = to_hfi_priv(core);
1661
1662         core->priv = NULL;
1663         venus_interface_queues_release(hdev);
1664         mutex_destroy(&hdev->lock);
1665         kfree(hdev);
1666         core->ops = NULL;
1667 }
1668
1669 int venus_hfi_create(struct venus_core *core)
1670 {
1671         struct venus_hfi_device *hdev;
1672         int ret;
1673
1674         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
1675         if (!hdev)
1676                 return -ENOMEM;
1677
1678         mutex_init(&hdev->lock);
1679
1680         hdev->core = core;
1681         hdev->suspended = true;
1682         core->priv = hdev;
1683         core->ops = &venus_hfi_ops;
1684
1685         ret = venus_interface_queues_init(hdev);
1686         if (ret)
1687                 goto err_kfree;
1688
1689         return 0;
1690
1691 err_kfree:
1692         kfree(hdev);
1693         core->priv = NULL;
1694         core->ops = NULL;
1695         return ret;
1696 }
1697
1698 void venus_hfi_queues_reinit(struct venus_core *core)
1699 {
1700         struct venus_hfi_device *hdev = to_hfi_priv(core);
1701         struct hfi_queue_table_header *tbl_hdr;
1702         struct iface_queue *queue;
1703         struct hfi_sfr *sfr;
1704         unsigned int i;
1705
1706         mutex_lock(&hdev->lock);
1707
1708         for (i = 0; i < IFACEQ_NUM; i++) {
1709                 queue = &hdev->queues[i];
1710                 queue->qhdr =
1711                         IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
1712
1713                 venus_set_qhdr_defaults(queue->qhdr);
1714
1715                 queue->qhdr->start_addr = queue->qmem.da;
1716
1717                 if (i == IFACEQ_CMD_IDX)
1718                         queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
1719                 else if (i == IFACEQ_MSG_IDX)
1720                         queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
1721                 else if (i == IFACEQ_DBG_IDX)
1722                         queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
1723         }
1724
1725         tbl_hdr = hdev->ifaceq_table.kva;
1726         tbl_hdr->version = 0;
1727         tbl_hdr->size = IFACEQ_TABLE_SIZE;
1728         tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
1729         tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
1730         tbl_hdr->num_q = IFACEQ_NUM;
1731         tbl_hdr->num_active_q = IFACEQ_NUM;
1732
1733         /*
1734          * Set receive request to zero on debug queue as there is no
1735          * need of interrupt from video hardware for debug messages
1736          */
1737         queue = &hdev->queues[IFACEQ_DBG_IDX];
1738         queue->qhdr->rx_req = 0;
1739
1740         sfr = hdev->sfr.kva;
1741         sfr->buf_size = ALIGNED_SFR_SIZE;
1742
1743         /* ensure table and queue header structs are settled in memory */
1744         wmb();
1745
1746         mutex_unlock(&hdev->lock);
1747 }