1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2020-2021 NXP
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
17 #include "vpu_helpers.h"
19 int vpu_helper_find_in_array_u8(const u8 *array, u32 size, u32 x)
23 for (i = 0; i < size; i++) {
31 bool vpu_helper_check_type(struct vpu_inst *inst, u32 type)
33 const struct vpu_format *pfmt;
35 for (pfmt = inst->formats; pfmt->pixfmt; pfmt++) {
36 if (!vpu_iface_check_format(inst, pfmt->pixfmt))
38 if (pfmt->type == type)
45 const struct vpu_format *vpu_helper_find_format(struct vpu_inst *inst, u32 type, u32 pixelfmt)
47 const struct vpu_format *pfmt;
49 if (!inst || !inst->formats)
52 if (!vpu_iface_check_format(inst, pixelfmt))
55 for (pfmt = inst->formats; pfmt->pixfmt; pfmt++) {
56 if (pfmt->pixfmt == pixelfmt && (!type || type == pfmt->type))
63 const struct vpu_format *vpu_helper_find_sibling(struct vpu_inst *inst, u32 type, u32 pixelfmt)
65 const struct vpu_format *fmt;
66 const struct vpu_format *sibling;
68 fmt = vpu_helper_find_format(inst, type, pixelfmt);
69 if (!fmt || !fmt->sibling)
72 sibling = vpu_helper_find_format(inst, type, fmt->sibling);
73 if (!sibling || sibling->sibling != fmt->pixfmt ||
74 sibling->comp_planes != fmt->comp_planes)
80 bool vpu_helper_match_format(struct vpu_inst *inst, u32 type, u32 fmta, u32 fmtb)
82 const struct vpu_format *sibling;
87 sibling = vpu_helper_find_sibling(inst, type, fmta);
88 if (sibling && sibling->pixfmt == fmtb)
93 const struct vpu_format *vpu_helper_enum_format(struct vpu_inst *inst, u32 type, int index)
95 const struct vpu_format *pfmt;
98 if (!inst || !inst->formats)
101 for (pfmt = inst->formats; pfmt->pixfmt; pfmt++) {
102 if (!vpu_iface_check_format(inst, pfmt->pixfmt))
105 if (pfmt->type == type) {
115 u32 vpu_helper_valid_frame_width(struct vpu_inst *inst, u32 width)
117 const struct vpu_core_resources *res;
122 res = vpu_get_resource(inst);
126 width = clamp(width, res->min_width, res->max_width);
128 width = ALIGN(width, res->step_width);
133 u32 vpu_helper_valid_frame_height(struct vpu_inst *inst, u32 height)
135 const struct vpu_core_resources *res;
140 res = vpu_get_resource(inst);
144 height = clamp(height, res->min_height, res->max_height);
145 if (res->step_height)
146 height = ALIGN(height, res->step_height);
151 static u32 get_nv12_plane_size(u32 width, u32 height, int plane_no,
152 u32 stride, u32 interlaced, u32 *pbl)
157 bytesperline = width;
159 bytesperline = max(bytesperline, *pbl);
160 bytesperline = ALIGN(bytesperline, stride);
161 height = ALIGN(height, 2);
163 size = bytesperline * height;
164 else if (plane_no == 1)
165 size = bytesperline * height >> 1;
172 static u32 get_tiled_8l128_plane_size(u32 fmt, u32 width, u32 height, int plane_no,
173 u32 stride, u32 interlaced, u32 *pbl)
183 if (fmt == V4L2_PIX_FMT_NV12M_10BE_8L128 || fmt == V4L2_PIX_FMT_NV12_10BE_8L128)
185 bytesperline = DIV_ROUND_UP(width * bitdepth, BITS_PER_BYTE);
187 bytesperline = max(bytesperline, *pbl);
188 bytesperline = ALIGN(bytesperline, 1 << ws);
189 bytesperline = ALIGN(bytesperline, stride);
190 height = ALIGN(height, 1 << hs);
192 size = bytesperline * height;
193 else if (plane_no == 1)
194 size = (bytesperline * ALIGN(height, 1 << (hs + 1))) >> 1;
201 static u32 get_default_plane_size(u32 width, u32 height, int plane_no,
202 u32 stride, u32 interlaced, u32 *pbl)
207 bytesperline = width;
209 bytesperline = max(bytesperline, *pbl);
210 bytesperline = ALIGN(bytesperline, stride);
212 size = bytesperline * height;
219 u32 vpu_helper_get_plane_size(u32 fmt, u32 w, u32 h, int plane_no,
220 u32 stride, u32 interlaced, u32 *pbl)
223 case V4L2_PIX_FMT_NV12:
224 case V4L2_PIX_FMT_NV12M:
225 return get_nv12_plane_size(w, h, plane_no, stride, interlaced, pbl);
226 case V4L2_PIX_FMT_NV12_8L128:
227 case V4L2_PIX_FMT_NV12M_8L128:
228 case V4L2_PIX_FMT_NV12_10BE_8L128:
229 case V4L2_PIX_FMT_NV12M_10BE_8L128:
230 return get_tiled_8l128_plane_size(fmt, w, h, plane_no, stride, interlaced, pbl);
232 return get_default_plane_size(w, h, plane_no, stride, interlaced, pbl);
236 int vpu_helper_copy_from_stream_buffer(struct vpu_buffer *stream_buffer,
237 u32 *rptr, u32 size, void *dst)
244 if (!stream_buffer || !rptr || !dst)
251 start = stream_buffer->phys;
252 end = start + stream_buffer->length;
253 virt = stream_buffer->virt;
255 if (offset < start || offset > end)
258 if (offset + size <= end) {
259 memcpy(dst, virt + (offset - start), size);
261 memcpy(dst, virt + (offset - start), end - offset);
262 memcpy(dst + end - offset, virt, size + offset - end);
265 *rptr = vpu_helper_step_walk(stream_buffer, offset, size);
270 int vpu_helper_copy_to_stream_buffer(struct vpu_buffer *stream_buffer,
271 u32 *wptr, u32 size, void *src)
278 if (!stream_buffer || !wptr || !src)
285 start = stream_buffer->phys;
286 end = start + stream_buffer->length;
287 virt = stream_buffer->virt;
288 if (offset < start || offset > end)
291 if (offset + size <= end) {
292 memcpy(virt + (offset - start), src, size);
294 memcpy(virt + (offset - start), src, end - offset);
295 memcpy(virt, src + end - offset, size + offset - end);
298 *wptr = vpu_helper_step_walk(stream_buffer, offset, size);
303 int vpu_helper_memset_stream_buffer(struct vpu_buffer *stream_buffer,
304 u32 *wptr, u8 val, u32 size)
311 if (!stream_buffer || !wptr)
318 start = stream_buffer->phys;
319 end = start + stream_buffer->length;
320 virt = stream_buffer->virt;
321 if (offset < start || offset > end)
324 if (offset + size <= end) {
325 memset(virt + (offset - start), val, size);
327 memset(virt + (offset - start), val, end - offset);
328 memset(virt, val, size + offset - end);
333 offset -= stream_buffer->length;
340 u32 vpu_helper_get_free_space(struct vpu_inst *inst)
342 struct vpu_rpc_buffer_desc desc;
344 if (vpu_iface_get_stream_buffer_desc(inst, &desc))
347 if (desc.rptr > desc.wptr)
348 return desc.rptr - desc.wptr;
349 else if (desc.rptr < desc.wptr)
350 return (desc.end - desc.start + desc.rptr - desc.wptr);
352 return desc.end - desc.start;
355 u32 vpu_helper_get_used_space(struct vpu_inst *inst)
357 struct vpu_rpc_buffer_desc desc;
359 if (vpu_iface_get_stream_buffer_desc(inst, &desc))
362 if (desc.wptr > desc.rptr)
363 return desc.wptr - desc.rptr;
364 else if (desc.wptr < desc.rptr)
365 return (desc.end - desc.start + desc.wptr - desc.rptr);
370 int vpu_helper_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
372 struct vpu_inst *inst = ctrl_to_inst(ctrl);
375 case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
376 ctrl->val = inst->min_buffer_cap;
378 case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
379 ctrl->val = inst->min_buffer_out;
388 int vpu_helper_find_startcode(struct vpu_buffer *stream_buffer,
389 u32 pixelformat, u32 offset, u32 bytesused)
397 if (!stream_buffer || !stream_buffer->virt)
400 switch (pixelformat) {
401 case V4L2_PIX_FMT_H264:
403 start_code = 0x00000001;
409 for (i = 0; i < bytesused; i++) {
410 val = (val << 8) | vpu_helper_read_byte(stream_buffer, offset + i);
411 if (i < start_code_size - 1)
413 if (val == start_code) {
414 ret = i + 1 - start_code_size;
422 int vpu_find_dst_by_src(struct vpu_pair *pairs, u32 cnt, u32 src)
429 for (i = 0; i < cnt; i++) {
430 if (pairs[i].src == src)
437 int vpu_find_src_by_dst(struct vpu_pair *pairs, u32 cnt, u32 dst)
444 for (i = 0; i < cnt; i++) {
445 if (pairs[i].dst == dst)
452 const char *vpu_id_name(u32 id)
455 case VPU_CMD_ID_NOOP: return "noop";
456 case VPU_CMD_ID_CONFIGURE_CODEC: return "configure codec";
457 case VPU_CMD_ID_START: return "start";
458 case VPU_CMD_ID_STOP: return "stop";
459 case VPU_CMD_ID_ABORT: return "abort";
460 case VPU_CMD_ID_RST_BUF: return "reset buf";
461 case VPU_CMD_ID_SNAPSHOT: return "snapshot";
462 case VPU_CMD_ID_FIRM_RESET: return "reset firmware";
463 case VPU_CMD_ID_UPDATE_PARAMETER: return "update parameter";
464 case VPU_CMD_ID_FRAME_ENCODE: return "encode frame";
465 case VPU_CMD_ID_SKIP: return "skip";
466 case VPU_CMD_ID_FS_ALLOC: return "alloc fb";
467 case VPU_CMD_ID_FS_RELEASE: return "release fb";
468 case VPU_CMD_ID_TIMESTAMP: return "timestamp";
469 case VPU_CMD_ID_DEBUG: return "debug";
470 case VPU_MSG_ID_RESET_DONE: return "reset done";
471 case VPU_MSG_ID_START_DONE: return "start done";
472 case VPU_MSG_ID_STOP_DONE: return "stop done";
473 case VPU_MSG_ID_ABORT_DONE: return "abort done";
474 case VPU_MSG_ID_BUF_RST: return "buf reset done";
475 case VPU_MSG_ID_MEM_REQUEST: return "mem request";
476 case VPU_MSG_ID_PARAM_UPD_DONE: return "param upd done";
477 case VPU_MSG_ID_FRAME_INPUT_DONE: return "frame input done";
478 case VPU_MSG_ID_ENC_DONE: return "encode done";
479 case VPU_MSG_ID_DEC_DONE: return "frame display";
480 case VPU_MSG_ID_FRAME_REQ: return "fb request";
481 case VPU_MSG_ID_FRAME_RELEASE: return "fb release";
482 case VPU_MSG_ID_SEQ_HDR_FOUND: return "seq hdr found";
483 case VPU_MSG_ID_RES_CHANGE: return "resolution change";
484 case VPU_MSG_ID_PIC_HDR_FOUND: return "pic hdr found";
485 case VPU_MSG_ID_PIC_DECODED: return "picture decoded";
486 case VPU_MSG_ID_PIC_EOS: return "eos";
487 case VPU_MSG_ID_FIFO_LOW: return "fifo low";
488 case VPU_MSG_ID_BS_ERROR: return "bs error";
489 case VPU_MSG_ID_UNSUPPORTED: return "unsupported";
490 case VPU_MSG_ID_FIRMWARE_XCPT: return "exception";
491 case VPU_MSG_ID_PIC_SKIPPED: return "skipped";
492 case VPU_MSG_ID_DBG_MSG: return "debug msg";
497 const char *vpu_codec_state_name(enum vpu_codec_state state)
500 case VPU_CODEC_STATE_DEINIT: return "initialization";
501 case VPU_CODEC_STATE_CONFIGURED: return "configured";
502 case VPU_CODEC_STATE_START: return "start";
503 case VPU_CODEC_STATE_STARTED: return "started";
504 case VPU_CODEC_STATE_ACTIVE: return "active";
505 case VPU_CODEC_STATE_SEEK: return "seek";
506 case VPU_CODEC_STATE_STOP: return "stop";
507 case VPU_CODEC_STATE_DRAIN: return "drain";
508 case VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE: return "resolution change";