1 // SPDX-License-Identifier: GPL-2.0
3 * Rockchip Video Decoder VP9 backend
5 * Copyright (C) 2019 Collabora, Ltd.
6 * Boris Brezillon <boris.brezillon@collabora.com>
7 * Copyright (C) 2021 Collabora, Ltd.
8 * Andrzej Pietrasiewicz <andrzej.p@collabora.com>
10 * Copyright (C) 2016 Rockchip Electronics Co., Ltd.
11 * Alpha Lin <Alpha.Lin@rock-chips.com>
15 * For following the vp9 spec please start reading this driver
16 * code from rkvdec_vp9_run() followed by rkvdec_vp9_done().
19 #include <linux/kernel.h>
20 #include <linux/vmalloc.h>
21 #include <media/v4l2-mem2mem.h>
22 #include <media/v4l2-vp9.h>
25 #include "rkvdec-regs.h"
27 #define RKVDEC_VP9_PROBE_SIZE 4864
28 #define RKVDEC_VP9_COUNT_SIZE 13232
29 #define RKVDEC_VP9_MAX_SEGMAP_SIZE 73728
31 struct rkvdec_vp9_intra_mode_probs {
36 struct rkvdec_vp9_intra_only_frame_probs {
37 u8 coef_intra[4][2][128];
38 struct rkvdec_vp9_intra_mode_probs intra_mode[10];
41 struct rkvdec_vp9_inter_frame_probs {
47 u8 interp_filter[4][2];
49 u8 coef[2][4][2][128];
65 u8 class0_fr[2][2][3];
72 struct rkvdec_vp9_probs {
81 /* 128 bit alignment */
84 struct rkvdec_vp9_inter_frame_probs inter;
85 struct rkvdec_vp9_intra_only_frame_probs intra_only;
89 /* Data structure describing auxiliary buffer format. */
90 struct rkvdec_vp9_priv_tbl {
91 struct rkvdec_vp9_probs probs;
92 u8 segmap[2][RKVDEC_VP9_MAX_SEGMAP_SIZE];
95 struct rkvdec_vp9_refs_counts {
100 struct rkvdec_vp9_inter_frame_symbol_counts {
101 u32 partition[16][4];
111 u32 single_ref[5][2][2];
116 /* add 1 element for align */
117 u32 classes[2][11 + 1];
120 u32 class0_fp[2][2][4];
124 struct rkvdec_vp9_refs_counts ref_cnt[2][4][2][6][6];
127 struct rkvdec_vp9_intra_frame_symbol_counts {
128 u32 partition[4][4][4];
134 struct rkvdec_vp9_refs_counts ref_cnt[2][4][2][6][6];
137 struct rkvdec_vp9_run {
138 struct rkvdec_run base;
139 const struct v4l2_ctrl_vp9_frame *decode_params;
142 struct rkvdec_vp9_frame_info {
145 u32 frame_context_idx : 2;
146 u32 reference_mode : 2;
148 u32 interpolation_filter : 3;
151 struct v4l2_vp9_segmentation seg;
152 struct v4l2_vp9_loop_filter lf;
155 struct rkvdec_vp9_ctx {
156 struct rkvdec_aux_buf priv_tbl;
157 struct rkvdec_aux_buf count_tbl;
158 struct v4l2_vp9_frame_symbol_counts inter_cnts;
159 struct v4l2_vp9_frame_symbol_counts intra_cnts;
160 struct v4l2_vp9_frame_context probability_tables;
161 struct v4l2_vp9_frame_context frame_context[4];
162 struct rkvdec_vp9_frame_info cur;
163 struct rkvdec_vp9_frame_info last;
166 static void write_coeff_plane(const u8 coef[6][6][3], u8 *coeff_plane)
168 unsigned int idx = 0, byte_count = 0;
172 for (k = 0; k < 6; k++) {
173 for (m = 0; m < 6; m++) {
174 for (n = 0; n < 3; n++) {
176 coeff_plane[idx++] = p;
178 if (byte_count == 27) {
187 static void init_intra_only_probs(struct rkvdec_ctx *ctx,
188 const struct rkvdec_vp9_run *run)
190 struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
191 struct rkvdec_vp9_priv_tbl *tbl = vp9_ctx->priv_tbl.cpu;
192 struct rkvdec_vp9_intra_only_frame_probs *rkprobs;
193 const struct v4l2_vp9_frame_context *probs;
194 unsigned int i, j, k;
196 rkprobs = &tbl->probs.intra_only;
197 probs = &vp9_ctx->probability_tables;
200 * intra only 149 x 128 bits ,aligned to 152 x 128 bits coeff related
203 for (i = 0; i < ARRAY_SIZE(probs->coef); i++) {
204 for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++)
205 write_coeff_plane(probs->coef[i][j][0],
206 rkprobs->coef_intra[i][j]);
209 /* intra mode prob 80 x 128 bits */
210 for (i = 0; i < ARRAY_SIZE(v4l2_vp9_kf_y_mode_prob); i++) {
211 unsigned int byte_count = 0;
214 /* vp9_kf_y_mode_prob */
215 for (j = 0; j < ARRAY_SIZE(v4l2_vp9_kf_y_mode_prob[0]); j++) {
216 for (k = 0; k < ARRAY_SIZE(v4l2_vp9_kf_y_mode_prob[0][0]);
218 u8 val = v4l2_vp9_kf_y_mode_prob[i][j][k];
220 rkprobs->intra_mode[i].y_mode[idx++] = val;
222 if (byte_count == 27) {
231 for (i = 0; i < sizeof(v4l2_vp9_kf_uv_mode_prob); ++i) {
232 const u8 *ptr = (const u8 *)v4l2_vp9_kf_uv_mode_prob;
234 rkprobs->intra_mode[i / 23].uv_mode[i % 23] = ptr[i];
238 static void init_inter_probs(struct rkvdec_ctx *ctx,
239 const struct rkvdec_vp9_run *run)
241 struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
242 struct rkvdec_vp9_priv_tbl *tbl = vp9_ctx->priv_tbl.cpu;
243 struct rkvdec_vp9_inter_frame_probs *rkprobs;
244 const struct v4l2_vp9_frame_context *probs;
245 unsigned int i, j, k;
247 rkprobs = &tbl->probs.inter;
248 probs = &vp9_ctx->probability_tables;
252 * 151 x 128 bits, aligned to 152 x 128 bits
254 * intra_y_mode & inter_block info 6 x 128 bits
257 memcpy(rkprobs->y_mode, probs->y_mode, sizeof(rkprobs->y_mode));
258 memcpy(rkprobs->comp_mode, probs->comp_mode,
259 sizeof(rkprobs->comp_mode));
260 memcpy(rkprobs->comp_ref, probs->comp_ref,
261 sizeof(rkprobs->comp_ref));
262 memcpy(rkprobs->single_ref, probs->single_ref,
263 sizeof(rkprobs->single_ref));
264 memcpy(rkprobs->inter_mode, probs->inter_mode,
265 sizeof(rkprobs->inter_mode));
266 memcpy(rkprobs->interp_filter, probs->interp_filter,
267 sizeof(rkprobs->interp_filter));
269 /* 128 x 128 bits coeff related */
270 for (i = 0; i < ARRAY_SIZE(probs->coef); i++) {
271 for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++) {
272 for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++)
273 write_coeff_plane(probs->coef[i][j][k],
274 rkprobs->coef[k][i][j]);
278 /* intra uv mode 6 x 128 */
279 memcpy(rkprobs->uv_mode_0_2, &probs->uv_mode[0],
280 sizeof(rkprobs->uv_mode_0_2));
281 memcpy(rkprobs->uv_mode_3_5, &probs->uv_mode[3],
282 sizeof(rkprobs->uv_mode_3_5));
283 memcpy(rkprobs->uv_mode_6_8, &probs->uv_mode[6],
284 sizeof(rkprobs->uv_mode_6_8));
285 memcpy(rkprobs->uv_mode_9, &probs->uv_mode[9],
286 sizeof(rkprobs->uv_mode_9));
288 /* mv related 6 x 128 */
289 memcpy(rkprobs->mv.joint, probs->mv.joint,
290 sizeof(rkprobs->mv.joint));
291 memcpy(rkprobs->mv.sign, probs->mv.sign,
292 sizeof(rkprobs->mv.sign));
293 memcpy(rkprobs->mv.classes, probs->mv.classes,
294 sizeof(rkprobs->mv.classes));
295 memcpy(rkprobs->mv.class0_bit, probs->mv.class0_bit,
296 sizeof(rkprobs->mv.class0_bit));
297 memcpy(rkprobs->mv.bits, probs->mv.bits,
298 sizeof(rkprobs->mv.bits));
299 memcpy(rkprobs->mv.class0_fr, probs->mv.class0_fr,
300 sizeof(rkprobs->mv.class0_fr));
301 memcpy(rkprobs->mv.fr, probs->mv.fr,
302 sizeof(rkprobs->mv.fr));
303 memcpy(rkprobs->mv.class0_hp, probs->mv.class0_hp,
304 sizeof(rkprobs->mv.class0_hp));
305 memcpy(rkprobs->mv.hp, probs->mv.hp,
306 sizeof(rkprobs->mv.hp));
309 static void init_probs(struct rkvdec_ctx *ctx,
310 const struct rkvdec_vp9_run *run)
312 const struct v4l2_ctrl_vp9_frame *dec_params;
313 struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
314 struct rkvdec_vp9_priv_tbl *tbl = vp9_ctx->priv_tbl.cpu;
315 struct rkvdec_vp9_probs *rkprobs = &tbl->probs;
316 const struct v4l2_vp9_segmentation *seg;
317 const struct v4l2_vp9_frame_context *probs;
320 dec_params = run->decode_params;
321 probs = &vp9_ctx->probability_tables;
322 seg = &dec_params->seg;
324 memset(rkprobs, 0, sizeof(*rkprobs));
326 intra_only = !!(dec_params->flags &
327 (V4L2_VP9_FRAME_FLAG_KEY_FRAME |
328 V4L2_VP9_FRAME_FLAG_INTRA_ONLY));
330 /* sb info 5 x 128 bit */
331 memcpy(rkprobs->partition,
332 intra_only ? v4l2_vp9_kf_partition_probs : probs->partition,
333 sizeof(rkprobs->partition));
335 memcpy(rkprobs->pred, seg->pred_probs, sizeof(rkprobs->pred));
336 memcpy(rkprobs->tree, seg->tree_probs, sizeof(rkprobs->tree));
337 memcpy(rkprobs->skip, probs->skip, sizeof(rkprobs->skip));
338 memcpy(rkprobs->tx32, probs->tx32, sizeof(rkprobs->tx32));
339 memcpy(rkprobs->tx16, probs->tx16, sizeof(rkprobs->tx16));
340 memcpy(rkprobs->tx8, probs->tx8, sizeof(rkprobs->tx8));
341 memcpy(rkprobs->is_inter, probs->is_inter, sizeof(rkprobs->is_inter));
344 init_intra_only_probs(ctx, run);
346 init_inter_probs(ctx, run);
349 struct rkvdec_vp9_ref_reg {
357 static struct rkvdec_vp9_ref_reg ref_regs[] = {
359 .reg_frm_size = RKVDEC_REG_VP9_FRAME_SIZE(0),
360 .reg_hor_stride = RKVDEC_VP9_HOR_VIRSTRIDE(0),
361 .reg_y_stride = RKVDEC_VP9_LAST_FRAME_YSTRIDE,
362 .reg_yuv_stride = RKVDEC_VP9_LAST_FRAME_YUVSTRIDE,
363 .reg_ref_base = RKVDEC_REG_VP9_LAST_FRAME_BASE,
366 .reg_frm_size = RKVDEC_REG_VP9_FRAME_SIZE(1),
367 .reg_hor_stride = RKVDEC_VP9_HOR_VIRSTRIDE(1),
368 .reg_y_stride = RKVDEC_VP9_GOLDEN_FRAME_YSTRIDE,
370 .reg_ref_base = RKVDEC_REG_VP9_GOLDEN_FRAME_BASE,
373 .reg_frm_size = RKVDEC_REG_VP9_FRAME_SIZE(2),
374 .reg_hor_stride = RKVDEC_VP9_HOR_VIRSTRIDE(2),
375 .reg_y_stride = RKVDEC_VP9_ALTREF_FRAME_YSTRIDE,
377 .reg_ref_base = RKVDEC_REG_VP9_ALTREF_FRAME_BASE,
381 static struct rkvdec_decoded_buffer *
382 get_ref_buf(struct rkvdec_ctx *ctx, struct vb2_v4l2_buffer *dst, u64 timestamp)
384 struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
385 struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
389 * If a ref is unused or invalid, address of current destination
390 * buffer is returned.
392 buf_idx = vb2_find_timestamp(cap_q, timestamp, 0);
394 return vb2_to_rkvdec_decoded_buf(&dst->vb2_buf);
396 return vb2_to_rkvdec_decoded_buf(vb2_get_buffer(cap_q, buf_idx));
399 static dma_addr_t get_mv_base_addr(struct rkvdec_decoded_buffer *buf)
401 unsigned int aligned_pitch, aligned_height, yuv_len;
403 aligned_height = round_up(buf->vp9.height, 64);
404 aligned_pitch = round_up(buf->vp9.width * buf->vp9.bit_depth, 512) / 8;
405 yuv_len = (aligned_height * aligned_pitch * 3) / 2;
407 return vb2_dma_contig_plane_dma_addr(&buf->base.vb.vb2_buf, 0) +
411 static void config_ref_registers(struct rkvdec_ctx *ctx,
412 const struct rkvdec_vp9_run *run,
413 struct rkvdec_decoded_buffer *ref_buf,
414 struct rkvdec_vp9_ref_reg *ref_reg)
416 unsigned int aligned_pitch, aligned_height, y_len, yuv_len;
417 struct rkvdec_dev *rkvdec = ctx->dev;
419 aligned_height = round_up(ref_buf->vp9.height, 64);
420 writel_relaxed(RKVDEC_VP9_FRAMEWIDTH(ref_buf->vp9.width) |
421 RKVDEC_VP9_FRAMEHEIGHT(ref_buf->vp9.height),
422 rkvdec->regs + ref_reg->reg_frm_size);
424 writel_relaxed(vb2_dma_contig_plane_dma_addr(&ref_buf->base.vb.vb2_buf, 0),
425 rkvdec->regs + ref_reg->reg_ref_base);
427 if (&ref_buf->base.vb == run->base.bufs.dst)
430 aligned_pitch = round_up(ref_buf->vp9.width * ref_buf->vp9.bit_depth, 512) / 8;
431 y_len = aligned_height * aligned_pitch;
432 yuv_len = (y_len * 3) / 2;
434 writel_relaxed(RKVDEC_HOR_Y_VIRSTRIDE(aligned_pitch / 16) |
435 RKVDEC_HOR_UV_VIRSTRIDE(aligned_pitch / 16),
436 rkvdec->regs + ref_reg->reg_hor_stride);
437 writel_relaxed(RKVDEC_VP9_REF_YSTRIDE(y_len / 16),
438 rkvdec->regs + ref_reg->reg_y_stride);
440 if (!ref_reg->reg_yuv_stride)
443 writel_relaxed(RKVDEC_VP9_REF_YUVSTRIDE(yuv_len / 16),
444 rkvdec->regs + ref_reg->reg_yuv_stride);
447 static void config_seg_registers(struct rkvdec_ctx *ctx, unsigned int segid)
449 struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
450 const struct v4l2_vp9_segmentation *seg;
451 struct rkvdec_dev *rkvdec = ctx->dev;
456 seg = vp9_ctx->last.valid ? &vp9_ctx->last.seg : &vp9_ctx->cur.seg;
457 feature_id = V4L2_VP9_SEG_LVL_ALT_Q;
458 if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid)) {
459 feature_val = seg->feature_data[segid][feature_id];
460 val |= RKVDEC_SEGID_FRAME_QP_DELTA_EN(1) |
461 RKVDEC_SEGID_FRAME_QP_DELTA(feature_val);
464 feature_id = V4L2_VP9_SEG_LVL_ALT_L;
465 if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid)) {
466 feature_val = seg->feature_data[segid][feature_id];
467 val |= RKVDEC_SEGID_FRAME_LOOPFILTER_VALUE_EN(1) |
468 RKVDEC_SEGID_FRAME_LOOPFILTER_VALUE(feature_val);
471 feature_id = V4L2_VP9_SEG_LVL_REF_FRAME;
472 if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid)) {
473 feature_val = seg->feature_data[segid][feature_id];
474 val |= RKVDEC_SEGID_REFERINFO_EN(1) |
475 RKVDEC_SEGID_REFERINFO(feature_val);
478 feature_id = V4L2_VP9_SEG_LVL_SKIP;
479 if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid))
480 val |= RKVDEC_SEGID_FRAME_SKIP_EN(1);
483 (seg->flags & V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE))
484 val |= RKVDEC_SEGID_ABS_DELTA(1);
486 writel_relaxed(val, rkvdec->regs + RKVDEC_VP9_SEGID_GRP(segid));
489 static void update_dec_buf_info(struct rkvdec_decoded_buffer *buf,
490 const struct v4l2_ctrl_vp9_frame *dec_params)
492 buf->vp9.width = dec_params->frame_width_minus_1 + 1;
493 buf->vp9.height = dec_params->frame_height_minus_1 + 1;
494 buf->vp9.bit_depth = dec_params->bit_depth;
497 static void update_ctx_cur_info(struct rkvdec_vp9_ctx *vp9_ctx,
498 struct rkvdec_decoded_buffer *buf,
499 const struct v4l2_ctrl_vp9_frame *dec_params)
501 vp9_ctx->cur.valid = true;
502 vp9_ctx->cur.reference_mode = dec_params->reference_mode;
503 vp9_ctx->cur.interpolation_filter = dec_params->interpolation_filter;
504 vp9_ctx->cur.flags = dec_params->flags;
505 vp9_ctx->cur.timestamp = buf->base.vb.vb2_buf.timestamp;
506 vp9_ctx->cur.seg = dec_params->seg;
507 vp9_ctx->cur.lf = dec_params->lf;
510 static void update_ctx_last_info(struct rkvdec_vp9_ctx *vp9_ctx)
512 vp9_ctx->last = vp9_ctx->cur;
515 static void config_registers(struct rkvdec_ctx *ctx,
516 const struct rkvdec_vp9_run *run)
518 unsigned int y_len, uv_len, yuv_len, bit_depth, aligned_height, aligned_pitch, stream_len;
519 const struct v4l2_ctrl_vp9_frame *dec_params;
520 struct rkvdec_decoded_buffer *ref_bufs[3];
521 struct rkvdec_decoded_buffer *dst, *last, *mv_ref;
522 struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
523 u32 val, last_frame_info = 0;
524 const struct v4l2_vp9_segmentation *seg;
525 struct rkvdec_dev *rkvdec = ctx->dev;
530 dec_params = run->decode_params;
531 dst = vb2_to_rkvdec_decoded_buf(&run->base.bufs.dst->vb2_buf);
532 ref_bufs[0] = get_ref_buf(ctx, &dst->base.vb, dec_params->last_frame_ts);
533 ref_bufs[1] = get_ref_buf(ctx, &dst->base.vb, dec_params->golden_frame_ts);
534 ref_bufs[2] = get_ref_buf(ctx, &dst->base.vb, dec_params->alt_frame_ts);
536 if (vp9_ctx->last.valid)
537 last = get_ref_buf(ctx, &dst->base.vb, vp9_ctx->last.timestamp);
541 update_dec_buf_info(dst, dec_params);
542 update_ctx_cur_info(vp9_ctx, dst, dec_params);
543 seg = &dec_params->seg;
545 intra_only = !!(dec_params->flags &
546 (V4L2_VP9_FRAME_FLAG_KEY_FRAME |
547 V4L2_VP9_FRAME_FLAG_INTRA_ONLY));
549 writel_relaxed(RKVDEC_MODE(RKVDEC_MODE_VP9),
550 rkvdec->regs + RKVDEC_REG_SYSCTRL);
552 bit_depth = dec_params->bit_depth;
553 aligned_height = round_up(ctx->decoded_fmt.fmt.pix_mp.height, 64);
555 aligned_pitch = round_up(ctx->decoded_fmt.fmt.pix_mp.width *
558 y_len = aligned_height * aligned_pitch;
560 yuv_len = y_len + uv_len;
562 writel_relaxed(RKVDEC_Y_HOR_VIRSTRIDE(aligned_pitch / 16) |
563 RKVDEC_UV_HOR_VIRSTRIDE(aligned_pitch / 16),
564 rkvdec->regs + RKVDEC_REG_PICPAR);
565 writel_relaxed(RKVDEC_Y_VIRSTRIDE(y_len / 16),
566 rkvdec->regs + RKVDEC_REG_Y_VIRSTRIDE);
567 writel_relaxed(RKVDEC_YUV_VIRSTRIDE(yuv_len / 16),
568 rkvdec->regs + RKVDEC_REG_YUV_VIRSTRIDE);
570 stream_len = vb2_get_plane_payload(&run->base.bufs.src->vb2_buf, 0);
571 writel_relaxed(RKVDEC_STRM_LEN(stream_len),
572 rkvdec->regs + RKVDEC_REG_STRM_LEN);
575 * Reset count buffer, because decoder only output intra related syntax
576 * counts when decoding intra frame, but update entropy need to update
577 * all the probabilities.
580 memset(vp9_ctx->count_tbl.cpu, 0, vp9_ctx->count_tbl.size);
582 vp9_ctx->cur.segmapid = vp9_ctx->last.segmapid;
584 !(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
585 (!(seg->flags & V4L2_VP9_SEGMENTATION_FLAG_ENABLED) ||
586 (seg->flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP)))
587 vp9_ctx->cur.segmapid++;
589 for (i = 0; i < ARRAY_SIZE(ref_bufs); i++)
590 config_ref_registers(ctx, run, ref_bufs[i], &ref_regs[i]);
592 for (i = 0; i < 8; i++)
593 config_seg_registers(ctx, i);
595 writel_relaxed(RKVDEC_VP9_TX_MODE(vp9_ctx->cur.tx_mode) |
596 RKVDEC_VP9_FRAME_REF_MODE(dec_params->reference_mode),
597 rkvdec->regs + RKVDEC_VP9_CPRHEADER_CONFIG);
600 const struct v4l2_vp9_loop_filter *lf;
603 if (vp9_ctx->last.valid)
604 lf = &vp9_ctx->last.lf;
606 lf = &vp9_ctx->cur.lf;
609 for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) {
610 delta = lf->ref_deltas[i];
611 val |= RKVDEC_REF_DELTAS_LASTFRAME(i, delta);
615 rkvdec->regs + RKVDEC_VP9_REF_DELTAS_LASTFRAME);
617 for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) {
618 delta = lf->mode_deltas[i];
619 last_frame_info |= RKVDEC_MODE_DELTAS_LASTFRAME(i,
624 if (vp9_ctx->last.valid && !intra_only &&
625 vp9_ctx->last.seg.flags & V4L2_VP9_SEGMENTATION_FLAG_ENABLED)
626 last_frame_info |= RKVDEC_SEG_EN_LASTFRAME;
628 if (vp9_ctx->last.valid &&
629 vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_SHOW_FRAME)
630 last_frame_info |= RKVDEC_LAST_SHOW_FRAME;
632 if (vp9_ctx->last.valid &&
633 vp9_ctx->last.flags &
634 (V4L2_VP9_FRAME_FLAG_KEY_FRAME | V4L2_VP9_FRAME_FLAG_INTRA_ONLY))
635 last_frame_info |= RKVDEC_LAST_INTRA_ONLY;
637 if (vp9_ctx->last.valid &&
638 last->vp9.width == dst->vp9.width &&
639 last->vp9.height == dst->vp9.height)
640 last_frame_info |= RKVDEC_LAST_WIDHHEIGHT_EQCUR;
642 writel_relaxed(last_frame_info,
643 rkvdec->regs + RKVDEC_VP9_INFO_LASTFRAME);
645 writel_relaxed(stream_len - dec_params->compressed_header_size -
646 dec_params->uncompressed_header_size,
647 rkvdec->regs + RKVDEC_VP9_LASTTILE_SIZE);
649 for (i = 0; !intra_only && i < ARRAY_SIZE(ref_bufs); i++) {
650 unsigned int refw = ref_bufs[i]->vp9.width;
651 unsigned int refh = ref_bufs[i]->vp9.height;
654 hscale = (refw << 14) / dst->vp9.width;
655 vscale = (refh << 14) / dst->vp9.height;
656 writel_relaxed(RKVDEC_VP9_REF_HOR_SCALE(hscale) |
657 RKVDEC_VP9_REF_VER_SCALE(vscale),
658 rkvdec->regs + RKVDEC_VP9_REF_SCALE(i));
661 addr = vb2_dma_contig_plane_dma_addr(&dst->base.vb.vb2_buf, 0);
662 writel_relaxed(addr, rkvdec->regs + RKVDEC_REG_DECOUT_BASE);
663 addr = vb2_dma_contig_plane_dma_addr(&run->base.bufs.src->vb2_buf, 0);
664 writel_relaxed(addr, rkvdec->regs + RKVDEC_REG_STRM_RLC_BASE);
665 writel_relaxed(vp9_ctx->priv_tbl.dma +
666 offsetof(struct rkvdec_vp9_priv_tbl, probs),
667 rkvdec->regs + RKVDEC_REG_CABACTBL_PROB_BASE);
668 writel_relaxed(vp9_ctx->count_tbl.dma,
669 rkvdec->regs + RKVDEC_REG_VP9COUNT_BASE);
671 writel_relaxed(vp9_ctx->priv_tbl.dma +
672 offsetof(struct rkvdec_vp9_priv_tbl, segmap) +
673 (RKVDEC_VP9_MAX_SEGMAP_SIZE * vp9_ctx->cur.segmapid),
674 rkvdec->regs + RKVDEC_REG_VP9_SEGIDCUR_BASE);
675 writel_relaxed(vp9_ctx->priv_tbl.dma +
676 offsetof(struct rkvdec_vp9_priv_tbl, segmap) +
677 (RKVDEC_VP9_MAX_SEGMAP_SIZE * (!vp9_ctx->cur.segmapid)),
678 rkvdec->regs + RKVDEC_REG_VP9_SEGIDLAST_BASE);
681 !(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
687 writel_relaxed(get_mv_base_addr(mv_ref),
688 rkvdec->regs + RKVDEC_VP9_REF_COLMV_BASE);
690 writel_relaxed(ctx->decoded_fmt.fmt.pix_mp.width |
691 (ctx->decoded_fmt.fmt.pix_mp.height << 16),
692 rkvdec->regs + RKVDEC_REG_PERFORMANCE_CYCLE);
695 static int validate_dec_params(struct rkvdec_ctx *ctx,
696 const struct v4l2_ctrl_vp9_frame *dec_params)
698 unsigned int aligned_width, aligned_height;
700 /* We only support profile 0. */
701 if (dec_params->profile != 0) {
702 dev_err(ctx->dev->dev, "unsupported profile %d\n",
703 dec_params->profile);
707 aligned_width = round_up(dec_params->frame_width_minus_1 + 1, 64);
708 aligned_height = round_up(dec_params->frame_height_minus_1 + 1, 64);
711 * Userspace should update the capture/decoded format when the
712 * resolution changes.
714 if (aligned_width != ctx->decoded_fmt.fmt.pix_mp.width ||
715 aligned_height != ctx->decoded_fmt.fmt.pix_mp.height) {
716 dev_err(ctx->dev->dev,
717 "unexpected bitstream resolution %dx%d\n",
718 dec_params->frame_width_minus_1 + 1,
719 dec_params->frame_height_minus_1 + 1);
726 static int rkvdec_vp9_run_preamble(struct rkvdec_ctx *ctx,
727 struct rkvdec_vp9_run *run)
729 const struct v4l2_ctrl_vp9_frame *dec_params;
730 const struct v4l2_ctrl_vp9_compressed_hdr *prob_updates;
731 struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
732 struct v4l2_ctrl *ctrl;
733 unsigned int fctx_idx;
736 /* v4l2-specific stuff */
737 rkvdec_run_preamble(ctx, &run->base);
739 ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
740 V4L2_CID_STATELESS_VP9_FRAME);
743 dec_params = ctrl->p_cur.p;
745 ret = validate_dec_params(ctx, dec_params);
749 run->decode_params = dec_params;
751 ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, V4L2_CID_STATELESS_VP9_COMPRESSED_HDR);
754 prob_updates = ctrl->p_cur.p;
755 vp9_ctx->cur.tx_mode = prob_updates->tx_mode;
760 * by this point the userspace has done all parts of 6.2 uncompressed_header()
761 * except this fragment:
762 * if ( FrameIsIntra || error_resilient_mode ) {
763 * setup_past_independence ( )
764 * if ( frame_type == KEY_FRAME || error_resilient_mode == 1 ||
765 * reset_frame_context == 3 ) {
766 * for ( i = 0; i < 4; i ++ ) {
769 * } else if ( reset_frame_context == 2 ) {
770 * save_probs( frame_context_idx )
772 * frame_context_idx = 0
775 fctx_idx = v4l2_vp9_reset_frame_ctx(dec_params, vp9_ctx->frame_context);
776 vp9_ctx->cur.frame_context_idx = fctx_idx;
778 /* 6.1 frame(sz): load_probs() and load_probs2() */
779 vp9_ctx->probability_tables = vp9_ctx->frame_context[fctx_idx];
782 * The userspace has also performed 6.3 compressed_header(), but handling the
783 * probs in a special way. All probs which need updating, except MV-related,
784 * have been read from the bitstream and translated through inv_map_table[],
785 * but no 6.3.6 inv_recenter_nonneg(v, m) has been performed. The values passed
786 * by userspace are either translated values (there are no 0 values in
787 * inv_map_table[]), or zero to indicate no update. All MV-related probs which need
788 * updating have been read from the bitstream and (mv_prob << 1) | 1 has been
789 * performed. The values passed by userspace are either new values
790 * to replace old ones (the above mentioned shift and bitwise or never result in
791 * a zero) or zero to indicate no update.
792 * fw_update_probs() performs actual probs updates or leaves probs as-is
793 * for values for which a zero was passed from userspace.
795 v4l2_vp9_fw_update_probs(&vp9_ctx->probability_tables, prob_updates, dec_params);
800 static int rkvdec_vp9_run(struct rkvdec_ctx *ctx)
802 struct rkvdec_dev *rkvdec = ctx->dev;
803 struct rkvdec_vp9_run run = { };
806 ret = rkvdec_vp9_run_preamble(ctx, &run);
808 rkvdec_run_postamble(ctx, &run.base);
813 init_probs(ctx, &run);
815 /* Configure hardware registers. */
816 config_registers(ctx, &run);
818 rkvdec_run_postamble(ctx, &run.base);
820 schedule_delayed_work(&rkvdec->watchdog_work, msecs_to_jiffies(2000));
822 writel(1, rkvdec->regs + RKVDEC_REG_PREF_LUMA_CACHE_COMMAND);
823 writel(1, rkvdec->regs + RKVDEC_REG_PREF_CHR_CACHE_COMMAND);
825 writel(0xe, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
826 /* Start decoding! */
827 writel(RKVDEC_INTERRUPT_DEC_E | RKVDEC_CONFIG_DEC_CLK_GATE_E |
828 RKVDEC_TIMEOUT_E | RKVDEC_BUF_EMPTY_E,
829 rkvdec->regs + RKVDEC_REG_INTERRUPT);
834 #define copy_tx_and_skip(p1, p2) \
836 memcpy((p1)->tx8, (p2)->tx8, sizeof((p1)->tx8)); \
837 memcpy((p1)->tx16, (p2)->tx16, sizeof((p1)->tx16)); \
838 memcpy((p1)->tx32, (p2)->tx32, sizeof((p1)->tx32)); \
839 memcpy((p1)->skip, (p2)->skip, sizeof((p1)->skip)); \
842 static void rkvdec_vp9_done(struct rkvdec_ctx *ctx,
843 struct vb2_v4l2_buffer *src_buf,
844 struct vb2_v4l2_buffer *dst_buf,
845 enum vb2_buffer_state result)
847 struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
848 unsigned int fctx_idx;
850 /* v4l2-specific stuff */
851 if (result == VB2_BUF_STATE_ERROR)
852 goto out_update_last;
857 * 6.1.2 refresh_probs()
859 * In the spec a complementary condition goes last in 6.1.2 refresh_probs(),
860 * but it makes no sense to perform all the activities from the first "if"
861 * there if we actually are not refreshing the frame context. On top of that,
862 * because of 6.2 uncompressed_header() whenever error_resilient_mode == 1,
863 * refresh_frame_context == 0. Consequently, if we don't jump to out_update_last
864 * it means error_resilient_mode must be 0.
866 if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX))
867 goto out_update_last;
869 fctx_idx = vp9_ctx->cur.frame_context_idx;
871 if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE)) {
872 /* error_resilient_mode == 0 && frame_parallel_decoding_mode == 0 */
873 struct v4l2_vp9_frame_context *probs = &vp9_ctx->probability_tables;
874 bool frame_is_intra = vp9_ctx->cur.flags &
875 (V4L2_VP9_FRAME_FLAG_KEY_FRAME | V4L2_VP9_FRAME_FLAG_INTRA_ONLY);
881 } _tx_skip, *tx_skip = &_tx_skip;
882 struct v4l2_vp9_frame_symbol_counts *counts;
884 /* buffer the forward-updated TX and skip probs */
886 copy_tx_and_skip(tx_skip, probs);
888 /* 6.1.2 refresh_probs(): load_probs() and load_probs2() */
889 *probs = vp9_ctx->frame_context[fctx_idx];
891 /* if FrameIsIntra then undo the effect of load_probs2() */
893 copy_tx_and_skip(probs, tx_skip);
895 counts = frame_is_intra ? &vp9_ctx->intra_cnts : &vp9_ctx->inter_cnts;
896 v4l2_vp9_adapt_coef_probs(probs, counts,
897 !vp9_ctx->last.valid ||
898 vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME,
900 if (!frame_is_intra) {
901 const struct rkvdec_vp9_inter_frame_symbol_counts *inter_cnts;
905 inter_cnts = vp9_ctx->count_tbl.cpu;
906 for (i = 0; i < ARRAY_SIZE(classes); ++i)
907 memcpy(classes[i], inter_cnts->classes[i], sizeof(classes[0]));
908 counts->classes = &classes;
910 /* load_probs2() already done */
911 v4l2_vp9_adapt_noncoef_probs(&vp9_ctx->probability_tables, counts,
912 vp9_ctx->cur.reference_mode,
913 vp9_ctx->cur.interpolation_filter,
914 vp9_ctx->cur.tx_mode, vp9_ctx->cur.flags);
918 /* 6.1.2 refresh_probs(): save_probs(fctx_idx) */
919 vp9_ctx->frame_context[fctx_idx] = vp9_ctx->probability_tables;
922 update_ctx_last_info(vp9_ctx);
925 static void rkvdec_init_v4l2_vp9_count_tbl(struct rkvdec_ctx *ctx)
927 struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
928 struct rkvdec_vp9_intra_frame_symbol_counts *intra_cnts = vp9_ctx->count_tbl.cpu;
929 struct rkvdec_vp9_inter_frame_symbol_counts *inter_cnts = vp9_ctx->count_tbl.cpu;
932 vp9_ctx->inter_cnts.partition = &inter_cnts->partition;
933 vp9_ctx->inter_cnts.skip = &inter_cnts->skip;
934 vp9_ctx->inter_cnts.intra_inter = &inter_cnts->inter;
935 vp9_ctx->inter_cnts.tx32p = &inter_cnts->tx32p;
936 vp9_ctx->inter_cnts.tx16p = &inter_cnts->tx16p;
937 vp9_ctx->inter_cnts.tx8p = &inter_cnts->tx8p;
939 vp9_ctx->intra_cnts.partition = (u32 (*)[16][4])(&intra_cnts->partition);
940 vp9_ctx->intra_cnts.skip = &intra_cnts->skip;
941 vp9_ctx->intra_cnts.intra_inter = &intra_cnts->intra;
942 vp9_ctx->intra_cnts.tx32p = &intra_cnts->tx32p;
943 vp9_ctx->intra_cnts.tx16p = &intra_cnts->tx16p;
944 vp9_ctx->intra_cnts.tx8p = &intra_cnts->tx8p;
946 vp9_ctx->inter_cnts.y_mode = &inter_cnts->y_mode;
947 vp9_ctx->inter_cnts.uv_mode = &inter_cnts->uv_mode;
948 vp9_ctx->inter_cnts.comp = &inter_cnts->comp;
949 vp9_ctx->inter_cnts.comp_ref = &inter_cnts->comp_ref;
950 vp9_ctx->inter_cnts.single_ref = &inter_cnts->single_ref;
951 vp9_ctx->inter_cnts.mv_mode = &inter_cnts->mv_mode;
952 vp9_ctx->inter_cnts.filter = &inter_cnts->filter;
953 vp9_ctx->inter_cnts.mv_joint = &inter_cnts->mv_joint;
954 vp9_ctx->inter_cnts.sign = &inter_cnts->sign;
956 * rk hardware actually uses "u32 classes[2][11 + 1];"
957 * instead of "u32 classes[2][11];", so this must be explicitly
958 * copied into vp9_ctx->classes when passing the data to the
959 * vp9 library function
961 vp9_ctx->inter_cnts.class0 = &inter_cnts->class0;
962 vp9_ctx->inter_cnts.bits = &inter_cnts->bits;
963 vp9_ctx->inter_cnts.class0_fp = &inter_cnts->class0_fp;
964 vp9_ctx->inter_cnts.fp = &inter_cnts->fp;
965 vp9_ctx->inter_cnts.class0_hp = &inter_cnts->class0_hp;
966 vp9_ctx->inter_cnts.hp = &inter_cnts->hp;
968 #define INNERMOST_LOOP \
970 for (m = 0; m < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff[0][0][0][0]); ++m) {\
971 vp9_ctx->inter_cnts.coeff[i][j][k][l][m] = \
972 &inter_cnts->ref_cnt[k][i][j][l][m].coeff; \
973 vp9_ctx->inter_cnts.eob[i][j][k][l][m][0] = \
974 &inter_cnts->ref_cnt[k][i][j][l][m].eob[0]; \
975 vp9_ctx->inter_cnts.eob[i][j][k][l][m][1] = \
976 &inter_cnts->ref_cnt[k][i][j][l][m].eob[1]; \
978 vp9_ctx->intra_cnts.coeff[i][j][k][l][m] = \
979 &intra_cnts->ref_cnt[k][i][j][l][m].coeff; \
980 vp9_ctx->intra_cnts.eob[i][j][k][l][m][0] = \
981 &intra_cnts->ref_cnt[k][i][j][l][m].eob[0]; \
982 vp9_ctx->intra_cnts.eob[i][j][k][l][m][1] = \
983 &intra_cnts->ref_cnt[k][i][j][l][m].eob[1]; \
987 for (i = 0; i < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff); ++i)
988 for (j = 0; j < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff[0]); ++j)
989 for (k = 0; k < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff[0][0]); ++k)
990 for (l = 0; l < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff[0][0][0]); ++l)
992 #undef INNERMOST_LOOP
995 static int rkvdec_vp9_start(struct rkvdec_ctx *ctx)
997 struct rkvdec_dev *rkvdec = ctx->dev;
998 struct rkvdec_vp9_priv_tbl *priv_tbl;
999 struct rkvdec_vp9_ctx *vp9_ctx;
1000 unsigned char *count_tbl;
1003 vp9_ctx = kzalloc(sizeof(*vp9_ctx), GFP_KERNEL);
1007 ctx->priv = vp9_ctx;
1009 priv_tbl = dma_alloc_coherent(rkvdec->dev, sizeof(*priv_tbl),
1010 &vp9_ctx->priv_tbl.dma, GFP_KERNEL);
1016 vp9_ctx->priv_tbl.size = sizeof(*priv_tbl);
1017 vp9_ctx->priv_tbl.cpu = priv_tbl;
1018 memset(priv_tbl, 0, sizeof(*priv_tbl));
1020 count_tbl = dma_alloc_coherent(rkvdec->dev, RKVDEC_VP9_COUNT_SIZE,
1021 &vp9_ctx->count_tbl.dma, GFP_KERNEL);
1024 goto err_free_priv_tbl;
1027 vp9_ctx->count_tbl.size = RKVDEC_VP9_COUNT_SIZE;
1028 vp9_ctx->count_tbl.cpu = count_tbl;
1029 memset(count_tbl, 0, sizeof(*count_tbl));
1030 rkvdec_init_v4l2_vp9_count_tbl(ctx);
1035 dma_free_coherent(rkvdec->dev, vp9_ctx->priv_tbl.size,
1036 vp9_ctx->priv_tbl.cpu, vp9_ctx->priv_tbl.dma);
1043 static void rkvdec_vp9_stop(struct rkvdec_ctx *ctx)
1045 struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
1046 struct rkvdec_dev *rkvdec = ctx->dev;
1048 dma_free_coherent(rkvdec->dev, vp9_ctx->count_tbl.size,
1049 vp9_ctx->count_tbl.cpu, vp9_ctx->count_tbl.dma);
1050 dma_free_coherent(rkvdec->dev, vp9_ctx->priv_tbl.size,
1051 vp9_ctx->priv_tbl.cpu, vp9_ctx->priv_tbl.dma);
1055 static int rkvdec_vp9_adjust_fmt(struct rkvdec_ctx *ctx,
1056 struct v4l2_format *f)
1058 struct v4l2_pix_format_mplane *fmt = &f->fmt.pix_mp;
1060 fmt->num_planes = 1;
1061 if (!fmt->plane_fmt[0].sizeimage)
1062 fmt->plane_fmt[0].sizeimage = fmt->width * fmt->height * 2;
1066 const struct rkvdec_coded_fmt_ops rkvdec_vp9_fmt_ops = {
1067 .adjust_fmt = rkvdec_vp9_adjust_fmt,
1068 .start = rkvdec_vp9_start,
1069 .stop = rkvdec_vp9_stop,
1070 .run = rkvdec_vp9_run,
1071 .done = rkvdec_vp9_done,