2 * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver
4 * Copyright (c) 2013 Texas Instruments Inc.
5 * David Griego, <dagriego@biglakesoftware.com>
6 * Dale Farnsworth, <dale@farnsworth.org>
7 * Archit Taneja, <archit@ti.com>
9 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
10 * Pawel Osciak, <pawel@osciak.com>
11 * Marek Szyprowski, <m.szyprowski@samsung.com>
13 * Based on the virtual v4l2-mem2mem example device
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License version 2 as published by
17 * the Free Software Foundation
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/err.h>
24 #include <linux/interrupt.h>
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
29 #include <linux/platform_device.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/videodev2.h>
34 #include <linux/log2.h>
35 #include <linux/sizes.h>
37 #include <media/v4l2-common.h>
38 #include <media/v4l2-ctrls.h>
39 #include <media/v4l2-device.h>
40 #include <media/v4l2-event.h>
41 #include <media/v4l2-ioctl.h>
42 #include <media/v4l2-mem2mem.h>
43 #include <media/videobuf2-v4l2.h>
44 #include <media/videobuf2-dma-contig.h>
51 #define VPE_MODULE_NAME "vpe"
53 /* minimum and maximum frame sizes */
59 /* required alignments */
60 #define S_ALIGN 0 /* multiple of 1 */
61 #define H_ALIGN 1 /* multiple of 2 */
63 /* flags that indicate a format can be used for capture/output */
64 #define VPE_FMT_TYPE_CAPTURE (1 << 0)
65 #define VPE_FMT_TYPE_OUTPUT (1 << 1)
67 /* used as plane indices */
68 #define VPE_MAX_PLANES 2
72 /* per m2m context info */
73 #define VPE_MAX_SRC_BUFS 3 /* need 3 src fields to de-interlace */
75 #define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */
78 * each VPE context can need up to 3 config descriptors, 7 input descriptors,
79 * 3 output descriptors, and 10 control descriptors
81 #define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \
82 13 * VPDMA_CFD_CTD_DESC_SIZE)
84 #define vpe_dbg(vpedev, fmt, arg...) \
85 dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg)
86 #define vpe_err(vpedev, fmt, arg...) \
87 dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg)
89 struct vpe_us_coeffs {
90 unsigned short anchor_fid0_c0;
91 unsigned short anchor_fid0_c1;
92 unsigned short anchor_fid0_c2;
93 unsigned short anchor_fid0_c3;
94 unsigned short interp_fid0_c0;
95 unsigned short interp_fid0_c1;
96 unsigned short interp_fid0_c2;
97 unsigned short interp_fid0_c3;
98 unsigned short anchor_fid1_c0;
99 unsigned short anchor_fid1_c1;
100 unsigned short anchor_fid1_c2;
101 unsigned short anchor_fid1_c3;
102 unsigned short interp_fid1_c0;
103 unsigned short interp_fid1_c1;
104 unsigned short interp_fid1_c2;
105 unsigned short interp_fid1_c3;
109 * Default upsampler coefficients
111 static const struct vpe_us_coeffs us_coeffs[] = {
113 /* Coefficients for progressive input */
114 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
115 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
118 /* Coefficients for Top Field Interlaced input */
119 0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3,
120 /* Coefficients for Bottom Field Interlaced input */
121 0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9,
126 * the following registers are for configuring some of the parameters of the
127 * motion and edge detection blocks inside DEI, these generally remain the same,
128 * these could be passed later via userspace if some one needs to tweak these.
130 struct vpe_dei_regs {
131 unsigned long mdt_spacial_freq_thr_reg; /* VPE_DEI_REG2 */
132 unsigned long edi_config_reg; /* VPE_DEI_REG3 */
133 unsigned long edi_lut_reg0; /* VPE_DEI_REG4 */
134 unsigned long edi_lut_reg1; /* VPE_DEI_REG5 */
135 unsigned long edi_lut_reg2; /* VPE_DEI_REG6 */
136 unsigned long edi_lut_reg3; /* VPE_DEI_REG7 */
140 * default expert DEI register values, unlikely to be modified.
142 static const struct vpe_dei_regs dei_regs = {
143 .mdt_spacial_freq_thr_reg = 0x020C0804u,
144 .edi_config_reg = 0x0118100Fu,
145 .edi_lut_reg0 = 0x08040200u,
146 .edi_lut_reg1 = 0x1010100Cu,
147 .edi_lut_reg2 = 0x10101010u,
148 .edi_lut_reg3 = 0x10101010u,
152 * The port_data structure contains per-port data.
154 struct vpe_port_data {
155 enum vpdma_channel channel; /* VPDMA channel */
156 u8 vb_index; /* input frame f, f-1, f-2 index */
157 u8 vb_part; /* plane index for co-panar formats */
161 * Define indices into the port_data tables
163 #define VPE_PORT_LUMA1_IN 0
164 #define VPE_PORT_CHROMA1_IN 1
165 #define VPE_PORT_LUMA2_IN 2
166 #define VPE_PORT_CHROMA2_IN 3
167 #define VPE_PORT_LUMA3_IN 4
168 #define VPE_PORT_CHROMA3_IN 5
169 #define VPE_PORT_MV_IN 6
170 #define VPE_PORT_MV_OUT 7
171 #define VPE_PORT_LUMA_OUT 8
172 #define VPE_PORT_CHROMA_OUT 9
173 #define VPE_PORT_RGB_OUT 10
175 static const struct vpe_port_data port_data[11] = {
176 [VPE_PORT_LUMA1_IN] = {
177 .channel = VPE_CHAN_LUMA1_IN,
181 [VPE_PORT_CHROMA1_IN] = {
182 .channel = VPE_CHAN_CHROMA1_IN,
184 .vb_part = VPE_CHROMA,
186 [VPE_PORT_LUMA2_IN] = {
187 .channel = VPE_CHAN_LUMA2_IN,
191 [VPE_PORT_CHROMA2_IN] = {
192 .channel = VPE_CHAN_CHROMA2_IN,
194 .vb_part = VPE_CHROMA,
196 [VPE_PORT_LUMA3_IN] = {
197 .channel = VPE_CHAN_LUMA3_IN,
201 [VPE_PORT_CHROMA3_IN] = {
202 .channel = VPE_CHAN_CHROMA3_IN,
204 .vb_part = VPE_CHROMA,
207 .channel = VPE_CHAN_MV_IN,
209 [VPE_PORT_MV_OUT] = {
210 .channel = VPE_CHAN_MV_OUT,
212 [VPE_PORT_LUMA_OUT] = {
213 .channel = VPE_CHAN_LUMA_OUT,
216 [VPE_PORT_CHROMA_OUT] = {
217 .channel = VPE_CHAN_CHROMA_OUT,
218 .vb_part = VPE_CHROMA,
220 [VPE_PORT_RGB_OUT] = {
221 .channel = VPE_CHAN_RGB_OUT,
227 /* driver info for each of the supported video formats */
229 char *name; /* human-readable name */
230 u32 fourcc; /* standard format identifier */
231 u8 types; /* CAPTURE and/or OUTPUT */
232 u8 coplanar; /* set for unpacked Luma and Chroma */
233 /* vpdma format info for each plane */
234 struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES];
237 static struct vpe_fmt vpe_formats[] = {
239 .name = "YUV 422 co-planar",
240 .fourcc = V4L2_PIX_FMT_NV16,
241 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
243 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444],
244 &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444],
248 .name = "YUV 420 co-planar",
249 .fourcc = V4L2_PIX_FMT_NV12,
250 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
252 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
253 &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420],
257 .name = "YUYV 422 packed",
258 .fourcc = V4L2_PIX_FMT_YUYV,
259 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
261 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422],
265 .name = "UYVY 422 packed",
266 .fourcc = V4L2_PIX_FMT_UYVY,
267 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
269 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
273 .name = "RGB888 packed",
274 .fourcc = V4L2_PIX_FMT_RGB24,
275 .types = VPE_FMT_TYPE_CAPTURE,
277 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
282 .fourcc = V4L2_PIX_FMT_RGB32,
283 .types = VPE_FMT_TYPE_CAPTURE,
285 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
289 .name = "BGR888 packed",
290 .fourcc = V4L2_PIX_FMT_BGR24,
291 .types = VPE_FMT_TYPE_CAPTURE,
293 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_BGR24],
298 .fourcc = V4L2_PIX_FMT_BGR32,
299 .types = VPE_FMT_TYPE_CAPTURE,
301 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
307 * per-queue, driver-specific private data.
308 * there is one source queue and one destination queue for each m2m context.
311 unsigned int width; /* frame width */
312 unsigned int height; /* frame height */
313 unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
314 enum v4l2_colorspace colorspace;
315 enum v4l2_field field; /* supported field value */
317 unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */
318 struct v4l2_rect c_rect; /* crop/compose rectangle */
319 struct vpe_fmt *fmt; /* format info */
322 /* vpe_q_data flag bits */
323 #define Q_DATA_FRAME_1D (1 << 0)
324 #define Q_DATA_MODE_TILED (1 << 1)
325 #define Q_DATA_INTERLACED (1 << 2)
332 /* find our format description corresponding to the passed v4l2_format */
333 static struct vpe_fmt *__find_format(u32 fourcc)
338 for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
339 fmt = &vpe_formats[k];
340 if (fmt->fourcc == fourcc)
347 static struct vpe_fmt *find_format(struct v4l2_format *f)
349 return __find_format(f->fmt.pix.pixelformat);
353 * there is one vpe_dev structure in the driver, it is shared by
357 struct v4l2_device v4l2_dev;
358 struct video_device vfd;
359 struct v4l2_m2m_dev *m2m_dev;
361 atomic_t num_instances; /* count of driver instances */
362 dma_addr_t loaded_mmrs; /* shadow mmrs in device */
363 struct mutex dev_mutex;
368 struct resource *res;
370 struct vb2_alloc_ctx *alloc_ctx;
371 struct vpdma_data *vpdma; /* vpdma data handle */
372 struct sc_data *sc; /* scaler data handle */
373 struct csc_data *csc; /* csc data handle */
377 * There is one vpe_ctx structure for each m2m context.
382 struct v4l2_ctrl_handler hdl;
384 unsigned int field; /* current field */
385 unsigned int sequence; /* current frame/field seq */
386 unsigned int aborting; /* abort after next irq */
388 unsigned int bufs_per_job; /* input buffers per batch */
389 unsigned int bufs_completed; /* bufs done in this batch */
391 struct vpe_q_data q_data[2]; /* src & dst queue data */
392 struct vb2_v4l2_buffer *src_vbs[VPE_MAX_SRC_BUFS];
393 struct vb2_v4l2_buffer *dst_vb;
395 dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */
396 void *mv_buf[2]; /* virtual addrs of motion vector bufs */
397 size_t mv_buf_size; /* current motion vector buffer size */
398 struct vpdma_buf mmr_adb; /* shadow reg addr/data block */
399 struct vpdma_buf sc_coeff_h; /* h coeff buffer */
400 struct vpdma_buf sc_coeff_v; /* v coeff buffer */
401 struct vpdma_desc_list desc_list; /* DMA descriptor list */
403 bool deinterlacing; /* using de-interlacer */
404 bool load_mmrs; /* have new shadow reg values */
406 unsigned int src_mv_buf_selector;
411 * M2M devices get 2 queues.
412 * Return the queue given the type.
414 static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
415 enum v4l2_buf_type type)
418 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
419 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
420 return &ctx->q_data[Q_DATA_SRC];
421 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
422 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
423 return &ctx->q_data[Q_DATA_DST];
430 static u32 read_reg(struct vpe_dev *dev, int offset)
432 return ioread32(dev->base + offset);
435 static void write_reg(struct vpe_dev *dev, int offset, u32 value)
437 iowrite32(value, dev->base + offset);
440 /* register field read/write helpers */
441 static int get_field(u32 value, u32 mask, int shift)
443 return (value & (mask << shift)) >> shift;
446 static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift)
448 return get_field(read_reg(dev, offset), mask, shift);
451 static void write_field(u32 *valp, u32 field, u32 mask, int shift)
455 val &= ~(mask << shift);
456 val |= (field & mask) << shift;
460 static void write_field_reg(struct vpe_dev *dev, int offset, u32 field,
463 u32 val = read_reg(dev, offset);
465 write_field(&val, field, mask, shift);
467 write_reg(dev, offset, val);
471 * DMA address/data block for the shadow registers
474 struct vpdma_adb_hdr out_fmt_hdr;
477 struct vpdma_adb_hdr us1_hdr;
479 struct vpdma_adb_hdr us2_hdr;
481 struct vpdma_adb_hdr us3_hdr;
483 struct vpdma_adb_hdr dei_hdr;
485 struct vpdma_adb_hdr sc_hdr0;
488 struct vpdma_adb_hdr sc_hdr8;
491 struct vpdma_adb_hdr sc_hdr17;
494 struct vpdma_adb_hdr csc_hdr;
499 #define GET_OFFSET_TOP(ctx, obj, reg) \
500 ((obj)->res->start - ctx->dev->res->start + reg)
502 #define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \
503 VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
505 * Set the headers for all of the address/data block structures.
507 static void init_adb_hdrs(struct vpe_ctx *ctx)
509 VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT);
510 VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0);
511 VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
512 VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
513 VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
514 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr0, sc_regs0,
515 GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC0));
516 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr8, sc_regs8,
517 GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC8));
518 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr17, sc_regs17,
519 GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC17));
520 VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs,
521 GET_OFFSET_TOP(ctx, ctx->dev->csc, CSC_CSC00));
525 * Allocate or re-allocate the motion vector DMA buffers
526 * There are two buffers, one for input and one for output.
527 * However, the roles are reversed after each field is processed.
528 * In other words, after each field is processed, the previous
529 * output (dst) MV buffer becomes the new input (src) MV buffer.
531 static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size)
533 struct device *dev = ctx->dev->v4l2_dev.dev;
535 if (ctx->mv_buf_size == size)
539 dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0],
543 dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1],
549 ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0],
551 if (!ctx->mv_buf[0]) {
552 vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
556 ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1],
558 if (!ctx->mv_buf[1]) {
559 vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
560 dma_free_coherent(dev, size, ctx->mv_buf[0],
566 ctx->mv_buf_size = size;
567 ctx->src_mv_buf_selector = 0;
572 static void free_mv_buffers(struct vpe_ctx *ctx)
574 realloc_mv_buffers(ctx, 0);
578 * While de-interlacing, we keep the two most recent input buffers
579 * around. This function frees those two buffers when we have
580 * finished processing the current stream.
582 static void free_vbs(struct vpe_ctx *ctx)
584 struct vpe_dev *dev = ctx->dev;
587 if (ctx->src_vbs[2] == NULL)
590 spin_lock_irqsave(&dev->lock, flags);
591 if (ctx->src_vbs[2]) {
592 v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE);
593 v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
595 spin_unlock_irqrestore(&dev->lock, flags);
599 * Enable or disable the VPE clocks
601 static void vpe_set_clock_enable(struct vpe_dev *dev, bool on)
606 val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE;
607 write_reg(dev, VPE_CLK_ENABLE, val);
610 static void vpe_top_reset(struct vpe_dev *dev)
613 write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK,
614 VPE_DATA_PATH_CLK_RESET_SHIFT);
616 usleep_range(100, 150);
618 write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK,
619 VPE_DATA_PATH_CLK_RESET_SHIFT);
622 static void vpe_top_vpdma_reset(struct vpe_dev *dev)
624 write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK,
625 VPE_VPDMA_CLK_RESET_SHIFT);
627 usleep_range(100, 150);
629 write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK,
630 VPE_VPDMA_CLK_RESET_SHIFT);
634 * Load the correct of upsampler coefficients into the shadow MMRs
636 static void set_us_coefficients(struct vpe_ctx *ctx)
638 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
639 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
640 u32 *us1_reg = &mmr_adb->us1_regs[0];
641 u32 *us2_reg = &mmr_adb->us2_regs[0];
642 u32 *us3_reg = &mmr_adb->us3_regs[0];
643 const unsigned short *cp, *end_cp;
645 cp = &us_coeffs[0].anchor_fid0_c0;
647 if (s_q_data->flags & Q_DATA_INTERLACED) /* interlaced */
648 cp += sizeof(us_coeffs[0]) / sizeof(*cp);
650 end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
652 while (cp < end_cp) {
653 write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT);
654 write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT);
655 *us2_reg++ = *us1_reg;
656 *us3_reg++ = *us1_reg++;
658 ctx->load_mmrs = true;
662 * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
664 static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
666 struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
667 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
668 u32 *us1_reg0 = &mmr_adb->us1_regs[0];
669 u32 *us2_reg0 = &mmr_adb->us2_regs[0];
670 u32 *us3_reg0 = &mmr_adb->us3_regs[0];
675 * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing.
676 * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
679 if (fmt->fourcc == V4L2_PIX_FMT_NV12) {
681 line_mode = 0; /* double lines to line buffer */
684 write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
685 write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
686 write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
689 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
690 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN);
691 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN);
693 /* frame start for input luma */
694 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
696 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
698 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
701 /* frame start for input chroma */
702 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
703 VPE_CHAN_CHROMA1_IN);
704 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
705 VPE_CHAN_CHROMA2_IN);
706 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
707 VPE_CHAN_CHROMA3_IN);
709 /* frame start for MV in client */
710 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
713 ctx->load_mmrs = true;
717 * Set the shadow registers that are modified when the source
720 static void set_src_registers(struct vpe_ctx *ctx)
722 set_us_coefficients(ctx);
726 * Set the shadow registers that are modified when the destination
729 static void set_dst_registers(struct vpe_ctx *ctx)
731 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
732 enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace;
733 struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
736 if (clrspc == V4L2_COLORSPACE_SRGB)
737 val |= VPE_RGB_OUT_SELECT;
738 else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
739 val |= VPE_COLOR_SEPARATE_422;
742 * the source of CHR_DS and CSC is always the scaler, irrespective of
743 * whether it's used or not
745 val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
747 if (fmt->fourcc != V4L2_PIX_FMT_NV12)
748 val |= VPE_DS_BYPASS;
750 mmr_adb->out_fmt_reg[0] = val;
752 ctx->load_mmrs = true;
756 * Set the de-interlacer shadow register values
758 static void set_dei_regs(struct vpe_ctx *ctx)
760 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
761 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
762 unsigned int src_h = s_q_data->c_rect.height;
763 unsigned int src_w = s_q_data->c_rect.width;
764 u32 *dei_mmr0 = &mmr_adb->dei_regs[0];
765 bool deinterlace = true;
769 * according to TRM, we should set DEI in progressive bypass mode when
770 * the input content is progressive, however, DEI is bypassed correctly
771 * for both progressive and interlace content in interlace bypass mode.
772 * It has been recommended not to use progressive bypass mode.
774 if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) ||
775 !(s_q_data->flags & Q_DATA_INTERLACED)) {
777 val = VPE_DEI_INTERLACE_BYPASS;
780 src_h = deinterlace ? src_h * 2 : src_h;
782 val |= (src_h << VPE_DEI_HEIGHT_SHIFT) |
783 (src_w << VPE_DEI_WIDTH_SHIFT) |
788 ctx->load_mmrs = true;
791 static void set_dei_shadow_registers(struct vpe_ctx *ctx)
793 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
794 u32 *dei_mmr = &mmr_adb->dei_regs[0];
795 const struct vpe_dei_regs *cur = &dei_regs;
797 dei_mmr[2] = cur->mdt_spacial_freq_thr_reg;
798 dei_mmr[3] = cur->edi_config_reg;
799 dei_mmr[4] = cur->edi_lut_reg0;
800 dei_mmr[5] = cur->edi_lut_reg1;
801 dei_mmr[6] = cur->edi_lut_reg2;
802 dei_mmr[7] = cur->edi_lut_reg3;
804 ctx->load_mmrs = true;
808 * Set the shadow registers whose values are modified when either the
809 * source or destination format is changed.
811 static int set_srcdst_params(struct vpe_ctx *ctx)
813 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
814 struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
815 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
816 unsigned int src_w = s_q_data->c_rect.width;
817 unsigned int src_h = s_q_data->c_rect.height;
818 unsigned int dst_w = d_q_data->c_rect.width;
819 unsigned int dst_h = d_q_data->c_rect.height;
824 ctx->field = V4L2_FIELD_TOP;
826 if ((s_q_data->flags & Q_DATA_INTERLACED) &&
827 !(d_q_data->flags & Q_DATA_INTERLACED)) {
829 const struct vpdma_data_format *mv =
830 &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
833 * we make sure that the source image has a 16 byte aligned
834 * stride, we need to do the same for the motion vector buffer
835 * by aligning it's stride to the next 16 byte boundry. this
836 * extra space will not be used by the de-interlacer, but will
837 * ensure that vpdma operates correctly
839 bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3,
841 mv_buf_size = bytes_per_line * s_q_data->height;
843 ctx->deinterlacing = true;
846 ctx->deinterlacing = false;
852 ret = realloc_mv_buffers(ctx, mv_buf_size);
856 set_cfg_and_line_modes(ctx);
859 csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
860 s_q_data->colorspace, d_q_data->colorspace);
862 sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
863 sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
865 sc_config_scaler(ctx->dev->sc, &mmr_adb->sc_regs0[0],
866 &mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0],
867 src_w, src_h, dst_w, dst_h);
873 * Return the vpe_ctx structure for a given struct file
875 static struct vpe_ctx *file2ctx(struct file *file)
877 return container_of(file->private_data, struct vpe_ctx, fh);
885 * job_ready() - check whether an instance is ready to be scheduled to run
887 static int job_ready(void *priv)
889 struct vpe_ctx *ctx = priv;
890 int needed = ctx->bufs_per_job;
892 if (ctx->deinterlacing && ctx->src_vbs[2] == NULL)
893 needed += 2; /* need additional two most recent fields */
895 if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < needed)
898 if (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < needed)
904 static void job_abort(void *priv)
906 struct vpe_ctx *ctx = priv;
908 /* Will cancel the transaction in the next interrupt handler */
913 * Lock access to the device
915 static void vpe_lock(void *priv)
917 struct vpe_ctx *ctx = priv;
918 struct vpe_dev *dev = ctx->dev;
919 mutex_lock(&dev->dev_mutex);
922 static void vpe_unlock(void *priv)
924 struct vpe_ctx *ctx = priv;
925 struct vpe_dev *dev = ctx->dev;
926 mutex_unlock(&dev->dev_mutex);
929 static void vpe_dump_regs(struct vpe_dev *dev)
931 #define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
933 vpe_dbg(dev, "VPE Registers:\n");
937 DUMPREG(INT0_STATUS0_RAW);
938 DUMPREG(INT0_STATUS0);
939 DUMPREG(INT0_ENABLE0);
940 DUMPREG(INT0_STATUS1_RAW);
941 DUMPREG(INT0_STATUS1);
942 DUMPREG(INT0_ENABLE1);
945 DUMPREG(CLK_FORMAT_SELECT);
946 DUMPREG(CLK_RANGE_MAP);
971 DUMPREG(DEI_FRAME_SIZE);
973 DUMPREG(MDT_SF_THRESHOLD);
975 DUMPREG(DEI_EDI_LUT_R0);
976 DUMPREG(DEI_EDI_LUT_R1);
977 DUMPREG(DEI_EDI_LUT_R2);
978 DUMPREG(DEI_EDI_LUT_R3);
979 DUMPREG(DEI_FMD_WINDOW_R0);
980 DUMPREG(DEI_FMD_WINDOW_R1);
981 DUMPREG(DEI_FMD_CONTROL_R0);
982 DUMPREG(DEI_FMD_CONTROL_R1);
983 DUMPREG(DEI_FMD_STATUS_R0);
984 DUMPREG(DEI_FMD_STATUS_R1);
985 DUMPREG(DEI_FMD_STATUS_R2);
988 sc_dump_regs(dev->sc);
989 csc_dump_regs(dev->csc);
992 static void add_out_dtd(struct vpe_ctx *ctx, int port)
994 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
995 const struct vpe_port_data *p_data = &port_data[port];
996 struct vb2_buffer *vb = &ctx->dst_vb->vb2_buf;
997 struct vpe_fmt *fmt = q_data->fmt;
998 const struct vpdma_data_format *vpdma_fmt;
999 int mv_buf_selector = !ctx->src_mv_buf_selector;
1000 dma_addr_t dma_addr;
1003 if (port == VPE_PORT_MV_OUT) {
1004 vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
1005 dma_addr = ctx->mv_buf_dma[mv_buf_selector];
1007 /* to incorporate interleaved formats */
1008 int plane = fmt->coplanar ? p_data->vb_part : 0;
1010 vpdma_fmt = fmt->vpdma_fmt[plane];
1011 dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
1014 "acquiring output buffer(%d) dma_addr failed\n",
1020 if (q_data->flags & Q_DATA_FRAME_1D)
1021 flags |= VPDMA_DATA_FRAME_1D;
1022 if (q_data->flags & Q_DATA_MODE_TILED)
1023 flags |= VPDMA_DATA_MODE_TILED;
1025 vpdma_add_out_dtd(&ctx->desc_list, q_data->width, &q_data->c_rect,
1026 vpdma_fmt, dma_addr, p_data->channel, flags);
1029 static void add_in_dtd(struct vpe_ctx *ctx, int port)
1031 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
1032 const struct vpe_port_data *p_data = &port_data[port];
1033 struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf;
1034 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1035 struct vpe_fmt *fmt = q_data->fmt;
1036 const struct vpdma_data_format *vpdma_fmt;
1037 int mv_buf_selector = ctx->src_mv_buf_selector;
1038 int field = vbuf->field == V4L2_FIELD_BOTTOM;
1039 int frame_width, frame_height;
1040 dma_addr_t dma_addr;
1043 if (port == VPE_PORT_MV_IN) {
1044 vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
1045 dma_addr = ctx->mv_buf_dma[mv_buf_selector];
1047 /* to incorporate interleaved formats */
1048 int plane = fmt->coplanar ? p_data->vb_part : 0;
1050 vpdma_fmt = fmt->vpdma_fmt[plane];
1052 dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
1055 "acquiring input buffer(%d) dma_addr failed\n",
1061 if (q_data->flags & Q_DATA_FRAME_1D)
1062 flags |= VPDMA_DATA_FRAME_1D;
1063 if (q_data->flags & Q_DATA_MODE_TILED)
1064 flags |= VPDMA_DATA_MODE_TILED;
1066 frame_width = q_data->c_rect.width;
1067 frame_height = q_data->c_rect.height;
1069 if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12)
1072 vpdma_add_in_dtd(&ctx->desc_list, q_data->width, &q_data->c_rect,
1073 vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width,
1074 frame_height, 0, 0);
1078 * Enable the expected IRQ sources
1080 static void enable_irqs(struct vpe_ctx *ctx)
1082 write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE);
1083 write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
1084 VPE_DS1_UV_ERROR_INT);
1086 vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true);
1089 static void disable_irqs(struct vpe_ctx *ctx)
1091 write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
1092 write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
1094 vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false);
1097 /* device_run() - prepares and starts the device
1099 * This function is only called when both the source and destination
1100 * buffers are in place.
1102 static void device_run(void *priv)
1104 struct vpe_ctx *ctx = priv;
1105 struct sc_data *sc = ctx->dev->sc;
1106 struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
1108 if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
1109 ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1110 WARN_ON(ctx->src_vbs[2] == NULL);
1111 ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1112 WARN_ON(ctx->src_vbs[1] == NULL);
1115 ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1116 WARN_ON(ctx->src_vbs[0] == NULL);
1117 ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
1118 WARN_ON(ctx->dst_vb == NULL);
1120 /* config descriptors */
1121 if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
1122 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
1123 vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
1124 ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
1125 ctx->load_mmrs = false;
1128 if (sc->loaded_coeff_h != ctx->sc_coeff_h.dma_addr ||
1130 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_h);
1131 vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
1132 &ctx->sc_coeff_h, 0);
1134 sc->loaded_coeff_h = ctx->sc_coeff_h.dma_addr;
1135 sc->load_coeff_h = false;
1138 if (sc->loaded_coeff_v != ctx->sc_coeff_v.dma_addr ||
1140 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_v);
1141 vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
1142 &ctx->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4);
1144 sc->loaded_coeff_v = ctx->sc_coeff_v.dma_addr;
1145 sc->load_coeff_v = false;
1148 /* output data descriptors */
1149 if (ctx->deinterlacing)
1150 add_out_dtd(ctx, VPE_PORT_MV_OUT);
1152 if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
1153 add_out_dtd(ctx, VPE_PORT_RGB_OUT);
1155 add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
1156 if (d_q_data->fmt->coplanar)
1157 add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
1160 /* input data descriptors */
1161 if (ctx->deinterlacing) {
1162 add_in_dtd(ctx, VPE_PORT_LUMA3_IN);
1163 add_in_dtd(ctx, VPE_PORT_CHROMA3_IN);
1165 add_in_dtd(ctx, VPE_PORT_LUMA2_IN);
1166 add_in_dtd(ctx, VPE_PORT_CHROMA2_IN);
1169 add_in_dtd(ctx, VPE_PORT_LUMA1_IN);
1170 add_in_dtd(ctx, VPE_PORT_CHROMA1_IN);
1172 if (ctx->deinterlacing)
1173 add_in_dtd(ctx, VPE_PORT_MV_IN);
1175 /* sync on channel control descriptors for input ports */
1176 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN);
1177 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN);
1179 if (ctx->deinterlacing) {
1180 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1182 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1183 VPE_CHAN_CHROMA2_IN);
1185 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1187 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1188 VPE_CHAN_CHROMA3_IN);
1190 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN);
1193 /* sync on channel control descriptors for output ports */
1194 if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
1195 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1198 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1200 if (d_q_data->fmt->coplanar)
1201 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1202 VPE_CHAN_CHROMA_OUT);
1205 if (ctx->deinterlacing)
1206 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT);
1210 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
1211 vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list);
1214 static void dei_error(struct vpe_ctx *ctx)
1216 dev_warn(ctx->dev->v4l2_dev.dev,
1217 "received DEI error interrupt\n");
1220 static void ds1_uv_error(struct vpe_ctx *ctx)
1222 dev_warn(ctx->dev->v4l2_dev.dev,
1223 "received downsampler error interrupt\n");
1226 static irqreturn_t vpe_irq(int irq_vpe, void *data)
1228 struct vpe_dev *dev = (struct vpe_dev *)data;
1229 struct vpe_ctx *ctx;
1230 struct vpe_q_data *d_q_data;
1231 struct vb2_v4l2_buffer *s_vb, *d_vb;
1232 unsigned long flags;
1235 irqst0 = read_reg(dev, VPE_INT0_STATUS0);
1237 write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0);
1238 vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0);
1241 irqst1 = read_reg(dev, VPE_INT0_STATUS1);
1243 write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1);
1244 vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1);
1247 ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
1249 vpe_err(dev, "instance released before end of transaction\n");
1254 if (irqst1 & VPE_DEI_ERROR_INT) {
1255 irqst1 &= ~VPE_DEI_ERROR_INT;
1258 if (irqst1 & VPE_DS1_UV_ERROR_INT) {
1259 irqst1 &= ~VPE_DS1_UV_ERROR_INT;
1265 if (irqst0 & VPE_INT0_LIST0_COMPLETE)
1266 vpdma_clear_list_stat(ctx->dev->vpdma);
1268 irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
1271 if (irqst0 | irqst1) {
1272 dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: "
1273 "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
1279 vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
1280 vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
1281 vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
1282 vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
1284 vpdma_reset_desc_list(&ctx->desc_list);
1286 /* the previous dst mv buffer becomes the next src mv buffer */
1287 ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
1292 s_vb = ctx->src_vbs[0];
1295 d_vb->flags = s_vb->flags;
1296 d_vb->timestamp = s_vb->timestamp;
1298 if (s_vb->flags & V4L2_BUF_FLAG_TIMECODE)
1299 d_vb->timecode = s_vb->timecode;
1301 d_vb->sequence = ctx->sequence;
1302 s_vb->sequence = ctx->sequence;
1304 d_q_data = &ctx->q_data[Q_DATA_DST];
1305 if (d_q_data->flags & Q_DATA_INTERLACED) {
1306 d_vb->field = ctx->field;
1307 if (ctx->field == V4L2_FIELD_BOTTOM) {
1309 ctx->field = V4L2_FIELD_TOP;
1311 WARN_ON(ctx->field != V4L2_FIELD_TOP);
1312 ctx->field = V4L2_FIELD_BOTTOM;
1315 d_vb->field = V4L2_FIELD_NONE;
1319 if (ctx->deinterlacing)
1320 s_vb = ctx->src_vbs[2];
1322 spin_lock_irqsave(&dev->lock, flags);
1323 v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
1324 v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
1325 spin_unlock_irqrestore(&dev->lock, flags);
1327 if (ctx->deinterlacing) {
1328 ctx->src_vbs[2] = ctx->src_vbs[1];
1329 ctx->src_vbs[1] = ctx->src_vbs[0];
1332 ctx->bufs_completed++;
1333 if (ctx->bufs_completed < ctx->bufs_per_job) {
1339 vpe_dbg(ctx->dev, "finishing transaction\n");
1340 ctx->bufs_completed = 0;
1341 v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
1349 static int vpe_querycap(struct file *file, void *priv,
1350 struct v4l2_capability *cap)
1352 strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
1353 strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
1354 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
1356 cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
1357 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1361 static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type)
1364 struct vpe_fmt *fmt = NULL;
1367 for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) {
1368 if (vpe_formats[i].types & type) {
1369 if (index == f->index) {
1370 fmt = &vpe_formats[i];
1380 strncpy(f->description, fmt->name, sizeof(f->description) - 1);
1381 f->pixelformat = fmt->fourcc;
1385 static int vpe_enum_fmt(struct file *file, void *priv,
1386 struct v4l2_fmtdesc *f)
1388 if (V4L2_TYPE_IS_OUTPUT(f->type))
1389 return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT);
1391 return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE);
1394 static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
1396 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1397 struct vpe_ctx *ctx = file2ctx(file);
1398 struct vb2_queue *vq;
1399 struct vpe_q_data *q_data;
1402 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
1406 q_data = get_q_data(ctx, f->type);
1408 pix->width = q_data->width;
1409 pix->height = q_data->height;
1410 pix->pixelformat = q_data->fmt->fourcc;
1411 pix->field = q_data->field;
1413 if (V4L2_TYPE_IS_OUTPUT(f->type)) {
1414 pix->colorspace = q_data->colorspace;
1416 struct vpe_q_data *s_q_data;
1418 /* get colorspace from the source queue */
1419 s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
1421 pix->colorspace = s_q_data->colorspace;
1424 pix->num_planes = q_data->fmt->coplanar ? 2 : 1;
1426 for (i = 0; i < pix->num_planes; i++) {
1427 pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
1428 pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
1434 static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
1435 struct vpe_fmt *fmt, int type)
1437 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1438 struct v4l2_plane_pix_format *plane_fmt;
1439 unsigned int w_align;
1440 int i, depth, depth_bytes;
1442 if (!fmt || !(fmt->types & type)) {
1443 vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
1445 fmt = __find_format(V4L2_PIX_FMT_YUYV);
1448 if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
1449 pix->field = V4L2_FIELD_NONE;
1451 depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
1454 * the line stride should 16 byte aligned for VPDMA to work, based on
1455 * the bytes per pixel, figure out how much the width should be aligned
1456 * to make sure line stride is 16 byte aligned
1458 depth_bytes = depth >> 3;
1460 if (depth_bytes == 3)
1462 * if bpp is 3(as in some RGB formats), the pixel width doesn't
1463 * really help in ensuring line stride is 16 byte aligned
1468 * for the remainder bpp(4, 2 and 1), the pixel width alignment
1469 * can ensure a line stride alignment of 16 bytes. For example,
1470 * if bpp is 2, then the line stride can be 16 byte aligned if
1471 * the width is 8 byte aligned
1473 w_align = order_base_2(VPDMA_DESC_ALIGN / depth_bytes);
1475 v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
1476 &pix->height, MIN_H, MAX_H, H_ALIGN,
1479 pix->num_planes = fmt->coplanar ? 2 : 1;
1480 pix->pixelformat = fmt->fourcc;
1482 if (!pix->colorspace) {
1483 if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
1484 fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
1485 fmt->fourcc == V4L2_PIX_FMT_RGB32 ||
1486 fmt->fourcc == V4L2_PIX_FMT_BGR32) {
1487 pix->colorspace = V4L2_COLORSPACE_SRGB;
1489 if (pix->height > 1280) /* HD */
1490 pix->colorspace = V4L2_COLORSPACE_REC709;
1492 pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
1496 memset(pix->reserved, 0, sizeof(pix->reserved));
1497 for (i = 0; i < pix->num_planes; i++) {
1498 plane_fmt = &pix->plane_fmt[i];
1499 depth = fmt->vpdma_fmt[i]->depth;
1502 plane_fmt->bytesperline = (pix->width * depth) >> 3;
1504 plane_fmt->bytesperline = pix->width;
1506 plane_fmt->sizeimage =
1507 (pix->height * pix->width * depth) >> 3;
1509 memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
1515 static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
1517 struct vpe_ctx *ctx = file2ctx(file);
1518 struct vpe_fmt *fmt = find_format(f);
1520 if (V4L2_TYPE_IS_OUTPUT(f->type))
1521 return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT);
1523 return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE);
1526 static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
1528 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1529 struct v4l2_plane_pix_format *plane_fmt;
1530 struct vpe_q_data *q_data;
1531 struct vb2_queue *vq;
1534 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
1538 if (vb2_is_busy(vq)) {
1539 vpe_err(ctx->dev, "queue busy\n");
1543 q_data = get_q_data(ctx, f->type);
1547 q_data->fmt = find_format(f);
1548 q_data->width = pix->width;
1549 q_data->height = pix->height;
1550 q_data->colorspace = pix->colorspace;
1551 q_data->field = pix->field;
1553 for (i = 0; i < pix->num_planes; i++) {
1554 plane_fmt = &pix->plane_fmt[i];
1556 q_data->bytesperline[i] = plane_fmt->bytesperline;
1557 q_data->sizeimage[i] = plane_fmt->sizeimage;
1560 q_data->c_rect.left = 0;
1561 q_data->c_rect.top = 0;
1562 q_data->c_rect.width = q_data->width;
1563 q_data->c_rect.height = q_data->height;
1565 if (q_data->field == V4L2_FIELD_ALTERNATE)
1566 q_data->flags |= Q_DATA_INTERLACED;
1568 q_data->flags &= ~Q_DATA_INTERLACED;
1570 vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
1571 f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
1572 q_data->bytesperline[VPE_LUMA]);
1573 if (q_data->fmt->coplanar)
1574 vpe_dbg(ctx->dev, " bpl_uv %d\n",
1575 q_data->bytesperline[VPE_CHROMA]);
1580 static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
1583 struct vpe_ctx *ctx = file2ctx(file);
1585 ret = vpe_try_fmt(file, priv, f);
1589 ret = __vpe_s_fmt(ctx, f);
1593 if (V4L2_TYPE_IS_OUTPUT(f->type))
1594 set_src_registers(ctx);
1596 set_dst_registers(ctx);
1598 return set_srcdst_params(ctx);
1601 static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
1603 struct vpe_q_data *q_data;
1605 if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
1606 (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
1609 q_data = get_q_data(ctx, s->type);
1613 switch (s->target) {
1614 case V4L2_SEL_TGT_COMPOSE:
1616 * COMPOSE target is only valid for capture buffer type, return
1617 * error for output buffer type
1619 if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1622 case V4L2_SEL_TGT_CROP:
1624 * CROP target is only valid for output buffer type, return
1625 * error for capture buffer type
1627 if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1631 * bound and default crop/compose targets are invalid targets to
1638 if (s->r.top < 0 || s->r.left < 0) {
1639 vpe_err(ctx->dev, "negative values for top and left\n");
1640 s->r.top = s->r.left = 0;
1643 v4l_bound_align_image(&s->r.width, MIN_W, q_data->width, 1,
1644 &s->r.height, MIN_H, q_data->height, H_ALIGN, S_ALIGN);
1646 /* adjust left/top if cropping rectangle is out of bounds */
1647 if (s->r.left + s->r.width > q_data->width)
1648 s->r.left = q_data->width - s->r.width;
1649 if (s->r.top + s->r.height > q_data->height)
1650 s->r.top = q_data->height - s->r.height;
1655 static int vpe_g_selection(struct file *file, void *fh,
1656 struct v4l2_selection *s)
1658 struct vpe_ctx *ctx = file2ctx(file);
1659 struct vpe_q_data *q_data;
1660 bool use_c_rect = false;
1662 if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
1663 (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
1666 q_data = get_q_data(ctx, s->type);
1670 switch (s->target) {
1671 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
1672 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
1673 if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1676 case V4L2_SEL_TGT_CROP_BOUNDS:
1677 case V4L2_SEL_TGT_CROP_DEFAULT:
1678 if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1681 case V4L2_SEL_TGT_COMPOSE:
1682 if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1686 case V4L2_SEL_TGT_CROP:
1687 if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1697 * for CROP/COMPOSE target type, return c_rect params from the
1698 * respective buffer type
1700 s->r = q_data->c_rect;
1703 * for DEFAULT/BOUNDS target type, return width and height from
1704 * S_FMT of the respective buffer type
1708 s->r.width = q_data->width;
1709 s->r.height = q_data->height;
1716 static int vpe_s_selection(struct file *file, void *fh,
1717 struct v4l2_selection *s)
1719 struct vpe_ctx *ctx = file2ctx(file);
1720 struct vpe_q_data *q_data;
1721 struct v4l2_selection sel = *s;
1724 ret = __vpe_try_selection(ctx, &sel);
1728 q_data = get_q_data(ctx, sel.type);
1732 if ((q_data->c_rect.left == sel.r.left) &&
1733 (q_data->c_rect.top == sel.r.top) &&
1734 (q_data->c_rect.width == sel.r.width) &&
1735 (q_data->c_rect.height == sel.r.height)) {
1737 "requested crop/compose values are already set\n");
1741 q_data->c_rect = sel.r;
1743 return set_srcdst_params(ctx);
1747 * defines number of buffers/frames a context can process with VPE before
1748 * switching to a different context. default value is 1 buffer per context
1750 #define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0)
1752 static int vpe_s_ctrl(struct v4l2_ctrl *ctrl)
1754 struct vpe_ctx *ctx =
1755 container_of(ctrl->handler, struct vpe_ctx, hdl);
1758 case V4L2_CID_VPE_BUFS_PER_JOB:
1759 ctx->bufs_per_job = ctrl->val;
1763 vpe_err(ctx->dev, "Invalid control\n");
1770 static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
1771 .s_ctrl = vpe_s_ctrl,
1774 static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
1775 .vidioc_querycap = vpe_querycap,
1777 .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
1778 .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt,
1779 .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt,
1780 .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt,
1782 .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
1783 .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt,
1784 .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt,
1785 .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt,
1787 .vidioc_g_selection = vpe_g_selection,
1788 .vidioc_s_selection = vpe_s_selection,
1790 .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
1791 .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
1792 .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
1793 .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
1794 .vidioc_streamon = v4l2_m2m_ioctl_streamon,
1795 .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
1797 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
1798 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1804 static int vpe_queue_setup(struct vb2_queue *vq,
1806 unsigned int *nbuffers, unsigned int *nplanes,
1807 unsigned int sizes[], void *alloc_ctxs[])
1810 struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
1811 struct vpe_q_data *q_data;
1813 q_data = get_q_data(ctx, vq->type);
1815 *nplanes = q_data->fmt->coplanar ? 2 : 1;
1817 for (i = 0; i < *nplanes; i++) {
1818 sizes[i] = q_data->sizeimage[i];
1819 alloc_ctxs[i] = ctx->dev->alloc_ctx;
1822 vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
1824 if (q_data->fmt->coplanar)
1825 vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
1830 static int vpe_buf_prepare(struct vb2_buffer *vb)
1832 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1833 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1834 struct vpe_q_data *q_data;
1837 vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
1839 q_data = get_q_data(ctx, vb->vb2_queue->type);
1840 num_planes = q_data->fmt->coplanar ? 2 : 1;
1842 if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1843 if (!(q_data->flags & Q_DATA_INTERLACED)) {
1844 vbuf->field = V4L2_FIELD_NONE;
1846 if (vbuf->field != V4L2_FIELD_TOP &&
1847 vbuf->field != V4L2_FIELD_BOTTOM)
1852 for (i = 0; i < num_planes; i++) {
1853 if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
1855 "data will not fit into plane (%lu < %lu)\n",
1856 vb2_plane_size(vb, i),
1857 (long) q_data->sizeimage[i]);
1862 for (i = 0; i < num_planes; i++)
1863 vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
1868 static void vpe_buf_queue(struct vb2_buffer *vb)
1870 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1871 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1873 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
1876 static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
1878 /* currently we do nothing here */
1883 static void vpe_stop_streaming(struct vb2_queue *q)
1885 struct vpe_ctx *ctx = vb2_get_drv_priv(q);
1887 vpe_dump_regs(ctx->dev);
1888 vpdma_dump_regs(ctx->dev->vpdma);
1891 static struct vb2_ops vpe_qops = {
1892 .queue_setup = vpe_queue_setup,
1893 .buf_prepare = vpe_buf_prepare,
1894 .buf_queue = vpe_buf_queue,
1895 .wait_prepare = vb2_ops_wait_prepare,
1896 .wait_finish = vb2_ops_wait_finish,
1897 .start_streaming = vpe_start_streaming,
1898 .stop_streaming = vpe_stop_streaming,
1901 static int queue_init(void *priv, struct vb2_queue *src_vq,
1902 struct vb2_queue *dst_vq)
1904 struct vpe_ctx *ctx = priv;
1905 struct vpe_dev *dev = ctx->dev;
1908 memset(src_vq, 0, sizeof(*src_vq));
1909 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1910 src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
1911 src_vq->drv_priv = ctx;
1912 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1913 src_vq->ops = &vpe_qops;
1914 src_vq->mem_ops = &vb2_dma_contig_memops;
1915 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1916 src_vq->lock = &dev->dev_mutex;
1918 ret = vb2_queue_init(src_vq);
1922 memset(dst_vq, 0, sizeof(*dst_vq));
1923 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1924 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
1925 dst_vq->drv_priv = ctx;
1926 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1927 dst_vq->ops = &vpe_qops;
1928 dst_vq->mem_ops = &vb2_dma_contig_memops;
1929 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1930 dst_vq->lock = &dev->dev_mutex;
1932 return vb2_queue_init(dst_vq);
1935 static const struct v4l2_ctrl_config vpe_bufs_per_job = {
1936 .ops = &vpe_ctrl_ops,
1937 .id = V4L2_CID_VPE_BUFS_PER_JOB,
1938 .name = "Buffers Per Transaction",
1939 .type = V4L2_CTRL_TYPE_INTEGER,
1940 .def = VPE_DEF_BUFS_PER_JOB,
1942 .max = VIDEO_MAX_FRAME,
1949 static int vpe_open(struct file *file)
1951 struct vpe_dev *dev = video_drvdata(file);
1952 struct vpe_q_data *s_q_data;
1953 struct v4l2_ctrl_handler *hdl;
1954 struct vpe_ctx *ctx;
1957 vpe_dbg(dev, "vpe_open\n");
1959 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1965 if (mutex_lock_interruptible(&dev->dev_mutex)) {
1970 ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE,
1971 VPDMA_LIST_TYPE_NORMAL);
1975 ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb));
1977 goto free_desc_list;
1979 ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_h, SC_COEF_SRAM_SIZE);
1983 ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE);
1989 v4l2_fh_init(&ctx->fh, video_devdata(file));
1990 file->private_data = &ctx->fh;
1993 v4l2_ctrl_handler_init(hdl, 1);
1994 v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL);
1999 ctx->fh.ctrl_handler = hdl;
2000 v4l2_ctrl_handler_setup(hdl);
2002 s_q_data = &ctx->q_data[Q_DATA_SRC];
2003 s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV);
2004 s_q_data->width = 1920;
2005 s_q_data->height = 1080;
2006 s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
2007 s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
2008 s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
2010 s_q_data->colorspace = V4L2_COLORSPACE_REC709;
2011 s_q_data->field = V4L2_FIELD_NONE;
2012 s_q_data->c_rect.left = 0;
2013 s_q_data->c_rect.top = 0;
2014 s_q_data->c_rect.width = s_q_data->width;
2015 s_q_data->c_rect.height = s_q_data->height;
2016 s_q_data->flags = 0;
2018 ctx->q_data[Q_DATA_DST] = *s_q_data;
2020 set_dei_shadow_registers(ctx);
2021 set_src_registers(ctx);
2022 set_dst_registers(ctx);
2023 ret = set_srcdst_params(ctx);
2027 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
2029 if (IS_ERR(ctx->fh.m2m_ctx)) {
2030 ret = PTR_ERR(ctx->fh.m2m_ctx);
2034 v4l2_fh_add(&ctx->fh);
2037 * for now, just report the creation of the first instance, we can later
2038 * optimize the driver to enable or disable clocks when the first
2039 * instance is created or the last instance released
2041 if (atomic_inc_return(&dev->num_instances) == 1)
2042 vpe_dbg(dev, "first instance created\n");
2044 ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB;
2046 ctx->load_mmrs = true;
2048 vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n",
2049 ctx, ctx->fh.m2m_ctx);
2051 mutex_unlock(&dev->dev_mutex);
2055 v4l2_ctrl_handler_free(hdl);
2056 v4l2_fh_exit(&ctx->fh);
2057 vpdma_free_desc_buf(&ctx->sc_coeff_v);
2059 vpdma_free_desc_buf(&ctx->sc_coeff_h);
2061 vpdma_free_desc_buf(&ctx->mmr_adb);
2063 vpdma_free_desc_list(&ctx->desc_list);
2065 mutex_unlock(&dev->dev_mutex);
2071 static int vpe_release(struct file *file)
2073 struct vpe_dev *dev = video_drvdata(file);
2074 struct vpe_ctx *ctx = file2ctx(file);
2076 vpe_dbg(dev, "releasing instance %p\n", ctx);
2078 mutex_lock(&dev->dev_mutex);
2080 free_mv_buffers(ctx);
2081 vpdma_free_desc_list(&ctx->desc_list);
2082 vpdma_free_desc_buf(&ctx->mmr_adb);
2084 v4l2_fh_del(&ctx->fh);
2085 v4l2_fh_exit(&ctx->fh);
2086 v4l2_ctrl_handler_free(&ctx->hdl);
2087 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
2092 * for now, just report the release of the last instance, we can later
2093 * optimize the driver to enable or disable clocks when the first
2094 * instance is created or the last instance released
2096 if (atomic_dec_return(&dev->num_instances) == 0)
2097 vpe_dbg(dev, "last instance released\n");
2099 mutex_unlock(&dev->dev_mutex);
2104 static const struct v4l2_file_operations vpe_fops = {
2105 .owner = THIS_MODULE,
2107 .release = vpe_release,
2108 .poll = v4l2_m2m_fop_poll,
2109 .unlocked_ioctl = video_ioctl2,
2110 .mmap = v4l2_m2m_fop_mmap,
2113 static struct video_device vpe_videodev = {
2114 .name = VPE_MODULE_NAME,
2116 .ioctl_ops = &vpe_ioctl_ops,
2118 .release = video_device_release_empty,
2119 .vfl_dir = VFL_DIR_M2M,
2122 static struct v4l2_m2m_ops m2m_ops = {
2123 .device_run = device_run,
2124 .job_ready = job_ready,
2125 .job_abort = job_abort,
2127 .unlock = vpe_unlock,
2130 static int vpe_runtime_get(struct platform_device *pdev)
2134 dev_dbg(&pdev->dev, "vpe_runtime_get\n");
2136 r = pm_runtime_get_sync(&pdev->dev);
2139 pm_runtime_put_noidle(&pdev->dev);
2140 return r < 0 ? r : 0;
2143 static void vpe_runtime_put(struct platform_device *pdev)
2148 dev_dbg(&pdev->dev, "vpe_runtime_put\n");
2150 r = pm_runtime_put_sync(&pdev->dev);
2151 WARN_ON(r < 0 && r != -ENOSYS);
2154 static void vpe_fw_cb(struct platform_device *pdev)
2156 struct vpe_dev *dev = platform_get_drvdata(pdev);
2157 struct video_device *vfd;
2161 *vfd = vpe_videodev;
2162 vfd->lock = &dev->dev_mutex;
2163 vfd->v4l2_dev = &dev->v4l2_dev;
2165 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
2167 vpe_err(dev, "Failed to register video device\n");
2169 vpe_set_clock_enable(dev, 0);
2170 vpe_runtime_put(pdev);
2171 pm_runtime_disable(&pdev->dev);
2172 v4l2_m2m_release(dev->m2m_dev);
2173 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
2174 v4l2_device_unregister(&dev->v4l2_dev);
2179 video_set_drvdata(vfd, dev);
2180 snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name);
2181 dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n",
2185 static int vpe_probe(struct platform_device *pdev)
2187 struct vpe_dev *dev;
2190 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
2194 spin_lock_init(&dev->lock);
2196 ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
2200 atomic_set(&dev->num_instances, 0);
2201 mutex_init(&dev->dev_mutex);
2203 dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2206 * HACK: we get resource info from device tree in the form of a list of
2207 * VPE sub blocks, the driver currently uses only the base of vpe_top
2208 * for register access, the driver should be changed later to access
2209 * registers based on the sub block base addresses
2211 dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K);
2214 goto v4l2_dev_unreg;
2217 irq = platform_get_irq(pdev, 0);
2218 ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME,
2221 goto v4l2_dev_unreg;
2223 platform_set_drvdata(pdev, dev);
2225 dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
2226 if (IS_ERR(dev->alloc_ctx)) {
2227 vpe_err(dev, "Failed to alloc vb2 context\n");
2228 ret = PTR_ERR(dev->alloc_ctx);
2229 goto v4l2_dev_unreg;
2232 dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
2233 if (IS_ERR(dev->m2m_dev)) {
2234 vpe_err(dev, "Failed to init mem2mem device\n");
2235 ret = PTR_ERR(dev->m2m_dev);
2239 pm_runtime_enable(&pdev->dev);
2241 ret = vpe_runtime_get(pdev);
2245 /* Perform clk enable followed by reset */
2246 vpe_set_clock_enable(dev, 1);
2250 func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK,
2251 VPE_PID_FUNC_SHIFT);
2252 vpe_dbg(dev, "VPE PID function %x\n", func);
2254 vpe_top_vpdma_reset(dev);
2256 dev->sc = sc_create(pdev);
2257 if (IS_ERR(dev->sc)) {
2258 ret = PTR_ERR(dev->sc);
2262 dev->csc = csc_create(pdev);
2263 if (IS_ERR(dev->csc)) {
2264 ret = PTR_ERR(dev->csc);
2268 dev->vpdma = vpdma_create(pdev, vpe_fw_cb);
2269 if (IS_ERR(dev->vpdma)) {
2270 ret = PTR_ERR(dev->vpdma);
2277 vpe_runtime_put(pdev);
2279 pm_runtime_disable(&pdev->dev);
2280 v4l2_m2m_release(dev->m2m_dev);
2282 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
2284 v4l2_device_unregister(&dev->v4l2_dev);
2289 static int vpe_remove(struct platform_device *pdev)
2291 struct vpe_dev *dev = platform_get_drvdata(pdev);
2293 v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME);
2295 v4l2_m2m_release(dev->m2m_dev);
2296 video_unregister_device(&dev->vfd);
2297 v4l2_device_unregister(&dev->v4l2_dev);
2298 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
2300 vpe_set_clock_enable(dev, 0);
2301 vpe_runtime_put(pdev);
2302 pm_runtime_disable(&pdev->dev);
2307 #if defined(CONFIG_OF)
2308 static const struct of_device_id vpe_of_match[] = {
2310 .compatible = "ti,vpe",
2316 static struct platform_driver vpe_pdrv = {
2318 .remove = vpe_remove,
2320 .name = VPE_MODULE_NAME,
2321 .of_match_table = of_match_ptr(vpe_of_match),
2325 module_platform_driver(vpe_pdrv);
2327 MODULE_DESCRIPTION("TI VPE driver");
2328 MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
2329 MODULE_LICENSE("GPL");