1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Intel Corporation
5 * Based partially on Intel IPU4 driver written by
6 * Sakari Ailus <sakari.ailus@linux.intel.com>
7 * Samu Onkalo <samu.onkalo@intel.com>
8 * Jouni Högander <jouni.hogander@intel.com>
9 * Jouni Ukkonen <jouni.ukkonen@intel.com>
10 * Antti Laakso <antti.laakso@intel.com>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/property.h>
21 #include <linux/vmalloc.h>
22 #include <media/v4l2-ctrls.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-event.h>
25 #include <media/v4l2-fwnode.h>
26 #include <media/v4l2-ioctl.h>
27 #include <media/videobuf2-dma-sg.h>
29 #include "ipu3-cio2.h"
31 struct ipu3_cio2_fmt {
38 * These are raw formats used in Intel's third generation of
39 * Image Processing Unit known as IPU3.
40 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
41 * last LSB 6 bits unused.
43 static const struct ipu3_cio2_fmt formats[] = {
44 { /* put default entry at beginning */
45 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
46 .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
49 .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
50 .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
53 .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
54 .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
57 .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
58 .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
64 * cio2_find_format - lookup color format by fourcc or/and media bus code
65 * @pixelformat: fourcc to match, ignored if null
66 * @mbus_code: media bus code to match, ignored if null
68 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
73 for (i = 0; i < ARRAY_SIZE(formats); i++) {
74 if (pixelformat && *pixelformat != formats[i].fourcc)
76 if (mbus_code && *mbus_code != formats[i].mbus_code)
85 static inline u32 cio2_bytesperline(const unsigned int width)
88 * 64 bytes for every 50 pixels, the line length
89 * in bytes is multiple of 64 (line end alignment).
91 return DIV_ROUND_UP(width, 50) * 64;
94 /**************** FBPT operations ****************/
96 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
98 if (cio2->dummy_lop) {
99 dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
100 cio2->dummy_lop, cio2->dummy_lop_bus_addr);
101 cio2->dummy_lop = NULL;
103 if (cio2->dummy_page) {
104 dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
105 cio2->dummy_page, cio2->dummy_page_bus_addr);
106 cio2->dummy_page = NULL;
110 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
114 cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev,
116 &cio2->dummy_page_bus_addr,
118 cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev,
120 &cio2->dummy_lop_bus_addr,
122 if (!cio2->dummy_page || !cio2->dummy_lop) {
123 cio2_fbpt_exit_dummy(cio2);
127 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
128 * Initialize each entry to dummy_page bus base address.
130 for (i = 0; i < CIO2_PAGE_SIZE / sizeof(*cio2->dummy_lop); i++)
131 cio2->dummy_lop[i] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
136 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
137 struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
140 * The CPU first initializes some fields in fbpt, then sets
141 * the VALID bit, this barrier is to ensure that the DMA(device)
142 * does not see the VALID bit enabled before other fields are
143 * initialized; otherwise it could lead to havoc.
148 * Request interrupts for start and completion
149 * Valid bit is applicable only to 1st entry
151 entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
152 CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
155 /* Initialize fpbt entries to point to dummy frame */
156 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
157 struct cio2_fbpt_entry
158 entry[CIO2_MAX_LOPS])
162 entry[0].first_entry.first_page_offset = 0;
163 entry[1].second_entry.num_of_pages =
164 CIO2_PAGE_SIZE / sizeof(u32) * CIO2_MAX_LOPS;
165 entry[1].second_entry.last_page_available_bytes = CIO2_PAGE_SIZE - 1;
167 for (i = 0; i < CIO2_MAX_LOPS; i++)
168 entry[i].lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
170 cio2_fbpt_entry_enable(cio2, entry);
173 /* Initialize fpbt entries to point to a given buffer */
174 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
175 struct cio2_buffer *b,
176 struct cio2_fbpt_entry
177 entry[CIO2_MAX_LOPS])
179 struct vb2_buffer *vb = &b->vbb.vb2_buf;
180 unsigned int length = vb->planes[0].length;
183 entry[0].first_entry.first_page_offset = b->offset;
184 remaining = length + entry[0].first_entry.first_page_offset;
185 entry[1].second_entry.num_of_pages =
186 DIV_ROUND_UP(remaining, CIO2_PAGE_SIZE);
188 * last_page_available_bytes has the offset of the last byte in the
189 * last page which is still accessible by DMA. DMA cannot access
190 * beyond this point. Valid range for this is from 0 to 4095.
191 * 0 indicates 1st byte in the page is DMA accessible.
192 * 4095 (CIO2_PAGE_SIZE - 1) means every single byte in the last page
193 * is available for DMA transfer.
195 entry[1].second_entry.last_page_available_bytes =
196 (remaining & ~PAGE_MASK) ?
197 (remaining & ~PAGE_MASK) - 1 :
202 while (remaining > 0) {
203 entry->lop_page_addr = b->lop_bus_addr[i] >> PAGE_SHIFT;
204 remaining -= CIO2_PAGE_SIZE / sizeof(u32) * CIO2_PAGE_SIZE;
210 * The first not meaningful FBPT entry should point to a valid LOP
212 entry->lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
214 cio2_fbpt_entry_enable(cio2, entry);
217 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
219 struct device *dev = &cio2->pci_dev->dev;
221 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
226 memset(q->fbpt, 0, CIO2_FBPT_SIZE);
231 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
233 dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
236 /**************** CSI2 hardware setup ****************/
239 * The CSI2 receiver has several parameters affecting
240 * the receiver timings. These depend on the MIPI bus frequency
241 * F in Hz (sensor transmitter rate) as follows:
242 * register value = (A/1e9 + B * UI) / COUNT_ACC
244 * UI = 1 / (2 * F) in seconds
245 * COUNT_ACC = counter accuracy in seconds
246 * For IPU3 COUNT_ACC = 0.0625
248 * A and B are coefficients from the table below,
249 * depending whether the register minimum or maximum value is
253 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
254 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
256 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
257 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
258 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
259 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
260 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
261 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
262 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
263 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
265 * We use the minimum values of both A and B.
269 * shift for keeping value range suitable for 32-bit integer arithmetics
271 #define LIMIT_SHIFT 8
273 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
275 const u32 accinv = 16; /* invert of counter resolution */
276 const u32 uiinv = 500000000; /* 1e9 / 2 */
279 freq >>= LIMIT_SHIFT;
281 if (WARN_ON(freq <= 0 || freq > S32_MAX))
284 * b could be 0, -2 or -8, so |accinv * b| is always
285 * less than (1 << ds) and thus |r| < 500000000.
287 r = accinv * b * (uiinv >> LIMIT_SHIFT);
289 /* max value of a is 95 */
295 /* Calculate the the delay value for termination enable of clock lane HS Rx */
296 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
297 struct cio2_csi2_timing *timing)
299 struct device *dev = &cio2->pci_dev->dev;
300 struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, };
301 struct v4l2_ctrl *link_freq;
308 link_freq = v4l2_ctrl_find(q->sensor->ctrl_handler, V4L2_CID_LINK_FREQ);
310 dev_err(dev, "failed to find LINK_FREQ\n");
314 qm.index = v4l2_ctrl_g_ctrl(link_freq);
315 r = v4l2_querymenu(q->sensor->ctrl_handler, &qm);
317 dev_err(dev, "failed to get menu item\n");
322 dev_err(dev, "error invalid link_freq\n");
327 timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
328 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
330 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
331 timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
332 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
334 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
335 timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
336 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
338 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
339 timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
340 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
342 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
344 dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
345 dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
346 dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
347 dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
352 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
354 static const int NUM_VCS = 4;
355 static const int SID; /* Stream id */
356 static const int ENTRY;
357 static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
358 CIO2_FBPT_SUBENTRY_UNIT);
359 const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
360 const struct ipu3_cio2_fmt *fmt;
361 void __iomem *const base = cio2->base;
362 u8 lanes, csi2bus = q->csi2.port;
363 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
364 struct cio2_csi2_timing timing = { 0 };
367 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
371 lanes = q->csi2.lanes;
373 r = cio2_csi2_calc_timing(cio2, q, &timing);
377 writel(timing.clk_termen, q->csi_rx_base +
378 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
379 writel(timing.clk_settle, q->csi_rx_base +
380 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
382 for (i = 0; i < lanes; i++) {
383 writel(timing.dat_termen, q->csi_rx_base +
384 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
385 writel(timing.dat_settle, q->csi_rx_base +
386 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
389 writel(CIO2_PBM_WMCTRL1_MIN_2CK |
390 CIO2_PBM_WMCTRL1_MID1_2CK |
391 CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
392 writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
393 CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
394 CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
395 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
396 CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
397 CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
398 writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
399 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
400 CIO2_PBM_ARB_CTRL_LE_EN |
401 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
402 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
403 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
404 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
405 base + CIO2_REG_PBM_ARB_CTRL);
406 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
407 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
408 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
409 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
411 writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
412 writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
414 /* Configure MIPI backend */
415 for (i = 0; i < NUM_VCS; i++)
416 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
418 /* There are 16 short packet LUT entry */
419 for (i = 0; i < 16; i++)
420 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
421 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
422 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
423 q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
425 writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
426 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
427 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
428 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
429 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
430 writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
432 writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
433 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
434 base + CIO2_REG_INT_EN);
436 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
437 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
438 base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
439 writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
440 sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
441 fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
442 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
443 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
444 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
445 writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
447 writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
448 writel(CIO2_CGC_PRIM_TGE |
452 CIO2_CGC_CSI2_INTERFRAME_TGE |
453 CIO2_CGC_CSI2_PORT_DCGE |
458 CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
459 CIO2_CGC_CSI_CLKGATE_HOLDOFF
460 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
461 writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
462 writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
463 CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
464 CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
465 CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
466 base + CIO2_REG_LTRVAL01);
467 writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
468 CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
469 CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
470 CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
471 base + CIO2_REG_LTRVAL23);
473 for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
474 writel(0, base + CIO2_REG_CDMABA(i));
475 writel(0, base + CIO2_REG_CDMAC0(i));
476 writel(0, base + CIO2_REG_CDMAC1(i));
480 writel(q->fbpt_bus_addr >> PAGE_SHIFT,
481 base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
483 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
484 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
485 CIO2_CDMAC0_DMA_INTR_ON_FE |
486 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
488 CIO2_CDMAC0_DMA_INTR_ON_FS |
489 CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
491 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
492 base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
494 writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
496 writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
497 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
498 CIO2_PXM_FRF_CFG_MSK_ECC_RE |
499 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
500 base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
502 /* Clear interrupts */
503 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
504 writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
505 writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
506 writel(~0, base + CIO2_REG_INT_STS);
508 /* Enable devices, starting from the last device in the pipe */
509 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
510 writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
515 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
517 void __iomem *base = cio2->base;
518 unsigned int i, maxloops = 1000;
520 /* Disable CSI receiver and MIPI backend devices */
521 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
522 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
523 writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
524 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
527 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
529 if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) &
530 CIO2_CDMAC0_DMA_HALTED)
532 usleep_range(1000, 2000);
533 } while (--maxloops);
535 dev_err(&cio2->pci_dev->dev,
536 "DMA %i can not be halted\n", CIO2_DMA_CHAN);
538 for (i = 0; i < CIO2_NUM_PORTS; i++) {
539 writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
540 CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
541 writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
542 CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
546 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
548 struct device *dev = &cio2->pci_dev->dev;
549 struct cio2_queue *q = cio2->cur_queue;
550 int buffers_found = 0;
551 u64 ns = ktime_get_ns();
553 if (dma_chan >= CIO2_QUEUES) {
554 dev_err(dev, "bad DMA channel %i\n", dma_chan);
558 /* Find out which buffer(s) are ready */
560 struct cio2_fbpt_entry *const entry =
561 &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
562 struct cio2_buffer *b;
564 if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID)
567 b = q->bufs[q->bufs_first];
569 unsigned int bytes = entry[1].second_entry.num_of_bytes;
571 q->bufs[q->bufs_first] = NULL;
572 atomic_dec(&q->bufs_queued);
573 dev_dbg(&cio2->pci_dev->dev,
574 "buffer %i done\n", b->vbb.vb2_buf.index);
576 b->vbb.vb2_buf.timestamp = ns;
577 b->vbb.field = V4L2_FIELD_NONE;
578 b->vbb.sequence = atomic_read(&q->frame_sequence);
579 if (b->vbb.vb2_buf.planes[0].length != bytes)
580 dev_warn(dev, "buffer length is %d received %d\n",
581 b->vbb.vb2_buf.planes[0].length,
583 vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
585 atomic_inc(&q->frame_sequence);
586 cio2_fbpt_entry_init_dummy(cio2, entry);
587 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
591 if (buffers_found == 0)
592 dev_warn(&cio2->pci_dev->dev,
593 "no ready buffers found on DMA channel %u\n",
597 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
600 * For the user space camera control algorithms it is essential
601 * to know when the reception of a frame has begun. That's often
602 * the best timing information to get from the hardware.
604 struct v4l2_event event = {
605 .type = V4L2_EVENT_FRAME_SYNC,
606 .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
609 v4l2_event_queue(q->subdev.devnode, &event);
612 static const char *const cio2_irq_errs[] = {
613 "single packet header error corrected",
614 "multiple packet header errors detected",
615 "payload checksum (CRC) error",
617 "reserved short packet data type detected",
618 "reserved long packet data type detected",
619 "incomplete long packet detected",
622 "DPHY start of transmission error",
623 "DPHY synchronization error",
625 "escape mode trigger event",
626 "escape mode ultra-low power state for data lane(s)",
627 "escape mode ultra-low power state exit for clock lane",
628 "inter-frame short packet discarded",
629 "inter-frame long packet discarded",
630 "non-matching Long Packet stalled",
633 static const char *const cio2_port_errs[] = {
635 "DPHY not recoverable",
636 "ECC not recoverable",
643 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
645 void __iomem *const base = cio2->base;
646 struct device *dev = &cio2->pci_dev->dev;
648 if (int_status & CIO2_INT_IOOE) {
650 * Interrupt on Output Error:
651 * 1) SRAM is full and FS received, or
652 * 2) An invalid bit detected by DMA.
654 u32 oe_status, oe_clear;
656 oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
657 oe_status = oe_clear;
659 if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
660 dev_err(dev, "DMA output error: 0x%x\n",
661 (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
662 >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
663 oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
665 if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
666 dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
667 (oe_status & CIO2_INT_EXT_OE_OES_MASK)
668 >> CIO2_INT_EXT_OE_OES_SHIFT);
669 oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
671 writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
673 dev_warn(dev, "unknown interrupt 0x%x on OE\n",
675 int_status &= ~CIO2_INT_IOOE;
678 if (int_status & CIO2_INT_IOC_MASK) {
679 /* DMA IO done -- frame ready */
683 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
684 if (int_status & CIO2_INT_IOC(d)) {
685 clr |= CIO2_INT_IOC(d);
686 cio2_buffer_done(cio2, d);
691 if (int_status & CIO2_INT_IOS_IOLN_MASK) {
692 /* DMA IO starts or reached specified line */
696 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
697 if (int_status & CIO2_INT_IOS_IOLN(d)) {
698 clr |= CIO2_INT_IOS_IOLN(d);
699 if (d == CIO2_DMA_CHAN)
700 cio2_queue_event_sof(cio2,
706 if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
707 /* CSI2 receiver (error) interrupt */
708 u32 ie_status, ie_clear;
711 ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
712 ie_status = ie_clear;
714 for (port = 0; port < CIO2_NUM_PORTS; port++) {
715 u32 port_status = (ie_status >> (port * 8)) & 0xff;
716 u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
717 void __iomem *const csi_rx_base =
718 base + CIO2_REG_PIPE_BASE(port);
721 while (port_status & err_mask) {
722 i = ffs(port_status) - 1;
723 dev_err(dev, "port %i error %s\n",
724 port, cio2_port_errs[i]);
725 ie_status &= ~BIT(port * 8 + i);
726 port_status &= ~BIT(i);
729 if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
730 u32 csi2_status, csi2_clear;
732 csi2_status = readl(csi_rx_base +
733 CIO2_REG_IRQCTRL_STATUS);
734 csi2_clear = csi2_status;
736 BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
738 while (csi2_status & err_mask) {
739 i = ffs(csi2_status) - 1;
741 "CSI-2 receiver port %i: %s\n",
742 port, cio2_irq_errs[i]);
743 csi2_status &= ~BIT(i);
747 csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
750 "unknown CSI2 error 0x%x on port %i\n",
753 ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
757 writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
759 dev_warn(dev, "unknown interrupt 0x%x on IE\n",
762 int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
766 dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
769 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
771 struct cio2_device *cio2 = cio2_ptr;
772 void __iomem *const base = cio2->base;
773 struct device *dev = &cio2->pci_dev->dev;
776 int_status = readl(base + CIO2_REG_INT_STS);
777 dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
782 writel(int_status, base + CIO2_REG_INT_STS);
783 cio2_irq_handle_once(cio2, int_status);
784 int_status = readl(base + CIO2_REG_INT_STS);
786 dev_dbg(dev, "pending status 0x%x\n", int_status);
787 } while (int_status);
792 /**************** Videobuf2 interface ****************/
794 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
795 enum vb2_buffer_state state)
799 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
801 atomic_dec(&q->bufs_queued);
802 vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
809 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
810 unsigned int *num_buffers,
811 unsigned int *num_planes,
812 unsigned int sizes[],
813 struct device *alloc_devs[])
815 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
816 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
819 *num_planes = q->format.num_planes;
821 for (i = 0; i < *num_planes; ++i) {
822 sizes[i] = q->format.plane_fmt[i].sizeimage;
823 alloc_devs[i] = &cio2->pci_dev->dev;
826 *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
828 /* Initialize buffer queue */
829 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
831 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
833 atomic_set(&q->bufs_queued, 0);
840 /* Called after each buffer is allocated */
841 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
843 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
844 struct device *dev = &cio2->pci_dev->dev;
845 struct cio2_buffer *b =
846 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
847 static const unsigned int entries_per_page =
848 CIO2_PAGE_SIZE / sizeof(u32);
849 unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE);
850 unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page);
852 struct sg_page_iter sg_iter;
855 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
856 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
857 vb->planes[0].length);
858 return -ENOSPC; /* Should never happen */
861 memset(b->lop, 0, sizeof(b->lop));
862 /* Allocate LOP table */
863 for (i = 0; i < lops; i++) {
864 b->lop[i] = dma_alloc_coherent(dev, CIO2_PAGE_SIZE,
865 &b->lop_bus_addr[i], GFP_KERNEL);
871 sg = vb2_dma_sg_plane_desc(vb, 0);
875 if (sg->nents && sg->sgl)
876 b->offset = sg->sgl->offset;
879 for_each_sg_page(sg->sgl, &sg_iter, sg->nents, 0) {
882 b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT;
884 if (j == entries_per_page) {
890 b->lop[i][j] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
893 for (i--; i >= 0; i--)
894 dma_free_coherent(dev, CIO2_PAGE_SIZE,
895 b->lop[i], b->lop_bus_addr[i]);
899 /* Transfer buffer ownership to cio2 */
900 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
902 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
903 struct cio2_queue *q =
904 container_of(vb->vb2_queue, struct cio2_queue, vbq);
905 struct cio2_buffer *b =
906 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
907 struct cio2_fbpt_entry *entry;
909 unsigned int i, j, next = q->bufs_next;
910 int bufs_queued = atomic_inc_return(&q->bufs_queued);
913 dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
916 * This code queues the buffer to the CIO2 DMA engine, which starts
917 * running once streaming has started. It is possible that this code
918 * gets pre-empted due to increased CPU load. Upon this, the driver
919 * does not get an opportunity to queue new buffers to the CIO2 DMA
920 * engine. When the DMA engine encounters an FBPT entry without the
921 * VALID bit set, the DMA engine halts, which requires a restart of
922 * the DMA engine and sensor, to continue streaming.
923 * This is not desired and is highly unlikely given that there are
924 * 32 FBPT entries that the DMA engine needs to process, to run into
925 * an FBPT entry, without the VALID bit set. We try to mitigate this
926 * by disabling interrupts for the duration of this queueing.
928 local_irq_save(flags);
930 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
931 >> CIO2_CDMARI_FBPT_RP_SHIFT)
932 & CIO2_CDMARI_FBPT_RP_MASK;
935 * fbpt_rp is the fbpt entry that the dma is currently working
936 * on, but since it could jump to next entry at any time,
937 * assume that we might already be there.
939 fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
941 if (bufs_queued <= 1 || fbpt_rp == next)
942 /* Buffers were drained */
943 next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
945 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
947 * We have allocated CIO2_MAX_BUFFERS circularly for the
948 * hw, the user has requested N buffer queue. The driver
949 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
950 * user queues a buffer, there necessarily is a free buffer.
952 if (!q->bufs[next]) {
954 entry = &q->fbpt[next * CIO2_MAX_LOPS];
955 cio2_fbpt_entry_init_buf(cio2, b, entry);
956 local_irq_restore(flags);
957 q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
958 for (j = 0; j < vb->num_planes; j++)
959 vb2_set_plane_payload(vb, j,
960 q->format.plane_fmt[j].sizeimage);
964 dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
965 next = (next + 1) % CIO2_MAX_BUFFERS;
968 local_irq_restore(flags);
969 dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
970 atomic_dec(&q->bufs_queued);
971 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
974 /* Called when each buffer is freed */
975 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
977 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
978 struct cio2_buffer *b =
979 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
983 for (i = 0; i < CIO2_MAX_LOPS; i++) {
985 dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
986 b->lop[i], b->lop_bus_addr[i]);
990 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
992 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
993 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
997 atomic_set(&q->frame_sequence, 0);
999 r = pm_runtime_get_sync(&cio2->pci_dev->dev);
1001 dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
1002 pm_runtime_put_noidle(&cio2->pci_dev->dev);
1006 r = media_pipeline_start(&q->vdev.entity, &q->pipe);
1010 r = cio2_hw_init(cio2, q);
1014 /* Start streaming on sensor */
1015 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1017 goto fail_csi2_subdev;
1019 cio2->streaming = true;
1024 cio2_hw_exit(cio2, q);
1026 media_pipeline_stop(&q->vdev.entity);
1028 dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
1029 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1030 pm_runtime_put(&cio2->pci_dev->dev);
1035 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1037 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1038 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1040 if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1041 dev_err(&cio2->pci_dev->dev,
1042 "failed to stop sensor streaming\n");
1044 cio2_hw_exit(cio2, q);
1045 synchronize_irq(cio2->pci_dev->irq);
1046 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1047 media_pipeline_stop(&q->vdev.entity);
1048 pm_runtime_put(&cio2->pci_dev->dev);
1049 cio2->streaming = false;
1052 static const struct vb2_ops cio2_vb2_ops = {
1053 .buf_init = cio2_vb2_buf_init,
1054 .buf_queue = cio2_vb2_buf_queue,
1055 .buf_cleanup = cio2_vb2_buf_cleanup,
1056 .queue_setup = cio2_vb2_queue_setup,
1057 .start_streaming = cio2_vb2_start_streaming,
1058 .stop_streaming = cio2_vb2_stop_streaming,
1059 .wait_prepare = vb2_ops_wait_prepare,
1060 .wait_finish = vb2_ops_wait_finish,
1063 /**************** V4L2 interface ****************/
1065 static int cio2_v4l2_querycap(struct file *file, void *fh,
1066 struct v4l2_capability *cap)
1068 struct cio2_device *cio2 = video_drvdata(file);
1070 strlcpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1071 strlcpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1072 snprintf(cap->bus_info, sizeof(cap->bus_info),
1073 "PCI:%s", pci_name(cio2->pci_dev));
1078 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1079 struct v4l2_fmtdesc *f)
1081 if (f->index >= ARRAY_SIZE(formats))
1084 f->pixelformat = formats[f->index].fourcc;
1089 /* The format is validated in cio2_video_link_validate() */
1090 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1092 struct cio2_queue *q = file_to_cio2_queue(file);
1094 f->fmt.pix_mp = q->format;
1099 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1101 const struct ipu3_cio2_fmt *fmt;
1102 struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1104 fmt = cio2_find_format(&mpix->pixelformat, NULL);
1108 /* Only supports up to 4224x3136 */
1109 if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1110 mpix->width = CIO2_IMAGE_MAX_WIDTH;
1111 if (mpix->height > CIO2_IMAGE_MAX_LENGTH)
1112 mpix->height = CIO2_IMAGE_MAX_LENGTH;
1114 mpix->num_planes = 1;
1115 mpix->pixelformat = fmt->fourcc;
1116 mpix->colorspace = V4L2_COLORSPACE_RAW;
1117 mpix->field = V4L2_FIELD_NONE;
1118 memset(mpix->reserved, 0, sizeof(mpix->reserved));
1119 mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1120 mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1122 memset(mpix->plane_fmt[0].reserved, 0,
1123 sizeof(mpix->plane_fmt[0].reserved));
1126 mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1127 mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1128 mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1133 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1135 struct cio2_queue *q = file_to_cio2_queue(file);
1137 cio2_v4l2_try_fmt(file, fh, f);
1138 q->format = f->fmt.pix_mp;
1144 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1146 if (input->index > 0)
1149 strlcpy(input->name, "camera", sizeof(input->name));
1150 input->type = V4L2_INPUT_TYPE_CAMERA;
1156 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1164 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1166 return input == 0 ? 0 : -EINVAL;
1169 static const struct v4l2_file_operations cio2_v4l2_fops = {
1170 .owner = THIS_MODULE,
1171 .unlocked_ioctl = video_ioctl2,
1172 .open = v4l2_fh_open,
1173 .release = vb2_fop_release,
1174 .poll = vb2_fop_poll,
1175 .mmap = vb2_fop_mmap,
1178 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1179 .vidioc_querycap = cio2_v4l2_querycap,
1180 .vidioc_enum_fmt_vid_cap_mplane = cio2_v4l2_enum_fmt,
1181 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1182 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1183 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1184 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1185 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1186 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1187 .vidioc_querybuf = vb2_ioctl_querybuf,
1188 .vidioc_qbuf = vb2_ioctl_qbuf,
1189 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1190 .vidioc_streamon = vb2_ioctl_streamon,
1191 .vidioc_streamoff = vb2_ioctl_streamoff,
1192 .vidioc_expbuf = vb2_ioctl_expbuf,
1193 .vidioc_enum_input = cio2_video_enum_input,
1194 .vidioc_g_input = cio2_video_g_input,
1195 .vidioc_s_input = cio2_video_s_input,
1198 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1200 struct v4l2_event_subscription *sub)
1202 if (sub->type != V4L2_EVENT_FRAME_SYNC)
1205 /* Line number. For now only zero accepted. */
1209 return v4l2_event_subscribe(fh, sub, 0, NULL);
1212 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1214 struct v4l2_mbus_framefmt *format;
1215 const struct v4l2_mbus_framefmt fmt_default = {
1218 .code = formats[0].mbus_code,
1219 .field = V4L2_FIELD_NONE,
1220 .colorspace = V4L2_COLORSPACE_RAW,
1221 .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1222 .quantization = V4L2_QUANTIZATION_DEFAULT,
1223 .xfer_func = V4L2_XFER_FUNC_DEFAULT,
1226 /* Initialize try_fmt */
1227 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK);
1228 *format = fmt_default;
1231 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE);
1232 *format = fmt_default;
1238 * cio2_subdev_get_fmt - Handle get format by pads subdev method
1239 * @sd : pointer to v4l2 subdev structure
1240 * @cfg: V4L2 subdev pad config
1241 * @fmt: pointer to v4l2 subdev format structure
1242 * return -EINVAL or zero on success
1244 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1245 struct v4l2_subdev_pad_config *cfg,
1246 struct v4l2_subdev_format *fmt)
1248 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1250 mutex_lock(&q->subdev_lock);
1252 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1253 fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
1255 fmt->format = q->subdev_fmt;
1257 mutex_unlock(&q->subdev_lock);
1263 * cio2_subdev_set_fmt - Handle set format by pads subdev method
1264 * @sd : pointer to v4l2 subdev structure
1265 * @cfg: V4L2 subdev pad config
1266 * @fmt: pointer to v4l2 subdev format structure
1267 * return -EINVAL or zero on success
1269 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1270 struct v4l2_subdev_pad_config *cfg,
1271 struct v4l2_subdev_format *fmt)
1273 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1274 struct v4l2_mbus_framefmt *mbus;
1275 u32 mbus_code = fmt->format.code;
1279 * Only allow setting sink pad format;
1280 * source always propagates from sink
1282 if (fmt->pad == CIO2_PAD_SOURCE)
1283 return cio2_subdev_get_fmt(sd, cfg, fmt);
1285 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1286 mbus = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
1288 mbus = &q->subdev_fmt;
1290 fmt->format.code = formats[0].mbus_code;
1292 for (i = 0; i < ARRAY_SIZE(formats); i++) {
1293 if (formats[i].mbus_code == mbus_code) {
1294 fmt->format.code = mbus_code;
1299 fmt->format.width = min_t(u32, fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1300 fmt->format.height = min_t(u32, fmt->format.height,
1301 CIO2_IMAGE_MAX_LENGTH);
1302 fmt->format.field = V4L2_FIELD_NONE;
1304 mutex_lock(&q->subdev_lock);
1305 *mbus = fmt->format;
1306 mutex_unlock(&q->subdev_lock);
1311 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1312 struct v4l2_subdev_pad_config *cfg,
1313 struct v4l2_subdev_mbus_code_enum *code)
1315 if (code->index >= ARRAY_SIZE(formats))
1318 code->code = formats[code->index].mbus_code;
1322 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1323 struct v4l2_subdev_format *fmt)
1325 if (is_media_entity_v4l2_subdev(pad->entity)) {
1326 struct v4l2_subdev *sd =
1327 media_entity_to_v4l2_subdev(pad->entity);
1329 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1330 fmt->pad = pad->index;
1331 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1337 static int cio2_video_link_validate(struct media_link *link)
1339 struct video_device *vd = container_of(link->sink->entity,
1340 struct video_device, entity);
1341 struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1342 struct cio2_device *cio2 = video_get_drvdata(vd);
1343 struct v4l2_subdev_format source_fmt;
1346 if (!media_entity_remote_pad(link->sink->entity->pads)) {
1347 dev_info(&cio2->pci_dev->dev,
1348 "video node %s pad not connected\n", vd->name);
1352 ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1356 if (source_fmt.format.width != q->format.width ||
1357 source_fmt.format.height != q->format.height) {
1358 dev_err(&cio2->pci_dev->dev,
1359 "Wrong width or height %ux%u (%ux%u expected)\n",
1360 q->format.width, q->format.height,
1361 source_fmt.format.width, source_fmt.format.height);
1365 if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1371 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1372 .subscribe_event = cio2_subdev_subscribe_event,
1373 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
1376 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1377 .open = cio2_subdev_open,
1380 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1381 .link_validate = v4l2_subdev_link_validate_default,
1382 .get_fmt = cio2_subdev_get_fmt,
1383 .set_fmt = cio2_subdev_set_fmt,
1384 .enum_mbus_code = cio2_subdev_enum_mbus_code,
1387 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1388 .core = &cio2_subdev_core_ops,
1389 .pad = &cio2_subdev_pad_ops,
1392 /******* V4L2 sub-device asynchronous registration callbacks***********/
1394 struct sensor_async_subdev {
1395 struct v4l2_async_subdev asd;
1396 struct csi2_bus_info csi2;
1399 /* The .bound() notifier callback when a match is found */
1400 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1401 struct v4l2_subdev *sd,
1402 struct v4l2_async_subdev *asd)
1404 struct cio2_device *cio2 = container_of(notifier,
1405 struct cio2_device, notifier);
1406 struct sensor_async_subdev *s_asd = container_of(asd,
1407 struct sensor_async_subdev, asd);
1408 struct cio2_queue *q;
1410 if (cio2->queue[s_asd->csi2.port].sensor)
1413 q = &cio2->queue[s_asd->csi2.port];
1415 q->csi2 = s_asd->csi2;
1417 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1422 /* The .unbind callback */
1423 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1424 struct v4l2_subdev *sd,
1425 struct v4l2_async_subdev *asd)
1427 struct cio2_device *cio2 = container_of(notifier,
1428 struct cio2_device, notifier);
1429 struct sensor_async_subdev *s_asd = container_of(asd,
1430 struct sensor_async_subdev, asd);
1432 cio2->queue[s_asd->csi2.port].sensor = NULL;
1435 /* .complete() is called after all subdevices have been located */
1436 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1438 struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
1440 struct sensor_async_subdev *s_asd;
1441 struct cio2_queue *q;
1442 unsigned int i, pad;
1445 for (i = 0; i < notifier->num_subdevs; i++) {
1446 s_asd = container_of(cio2->notifier.subdevs[i],
1447 struct sensor_async_subdev, asd);
1448 q = &cio2->queue[s_asd->csi2.port];
1450 for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
1451 if (q->sensor->entity.pads[pad].flags &
1452 MEDIA_PAD_FL_SOURCE)
1455 if (pad == q->sensor->entity.num_pads) {
1456 dev_err(&cio2->pci_dev->dev,
1457 "failed to find src pad for %s\n",
1462 ret = media_create_pad_link(
1463 &q->sensor->entity, pad,
1464 &q->subdev.entity, CIO2_PAD_SINK,
1467 dev_err(&cio2->pci_dev->dev,
1468 "failed to create link for %s\n",
1469 cio2->queue[i].sensor->name);
1474 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1477 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1478 .bound = cio2_notifier_bound,
1479 .unbind = cio2_notifier_unbind,
1480 .complete = cio2_notifier_complete,
1483 static int cio2_fwnode_parse(struct device *dev,
1484 struct v4l2_fwnode_endpoint *vep,
1485 struct v4l2_async_subdev *asd)
1487 struct sensor_async_subdev *s_asd =
1488 container_of(asd, struct sensor_async_subdev, asd);
1490 if (vep->bus_type != V4L2_MBUS_CSI2) {
1491 dev_err(dev, "Only CSI2 bus type is currently supported\n");
1495 s_asd->csi2.port = vep->base.port;
1496 s_asd->csi2.lanes = vep->bus.mipi_csi2.num_data_lanes;
1501 static int cio2_notifier_init(struct cio2_device *cio2)
1505 ret = v4l2_async_notifier_parse_fwnode_endpoints(
1506 &cio2->pci_dev->dev, &cio2->notifier,
1507 sizeof(struct sensor_async_subdev),
1512 if (!cio2->notifier.num_subdevs)
1513 return -ENODEV; /* no endpoint */
1515 cio2->notifier.ops = &cio2_async_ops;
1516 ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
1518 dev_err(&cio2->pci_dev->dev,
1519 "failed to register async notifier : %d\n", ret);
1520 v4l2_async_notifier_cleanup(&cio2->notifier);
1526 static void cio2_notifier_exit(struct cio2_device *cio2)
1528 v4l2_async_notifier_unregister(&cio2->notifier);
1529 v4l2_async_notifier_cleanup(&cio2->notifier);
1532 /**************** Queue initialization ****************/
1533 static const struct media_entity_operations cio2_media_ops = {
1534 .link_validate = v4l2_subdev_link_validate,
1537 static const struct media_entity_operations cio2_video_entity_ops = {
1538 .link_validate = cio2_video_link_validate,
1541 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1543 static const u32 default_width = 1936;
1544 static const u32 default_height = 1096;
1545 const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1547 struct video_device *vdev = &q->vdev;
1548 struct vb2_queue *vbq = &q->vbq;
1549 struct v4l2_subdev *subdev = &q->subdev;
1550 struct v4l2_mbus_framefmt *fmt;
1553 /* Initialize miscellaneous variables */
1554 mutex_init(&q->lock);
1555 mutex_init(&q->subdev_lock);
1557 /* Initialize formats to default values */
1558 fmt = &q->subdev_fmt;
1559 fmt->width = default_width;
1560 fmt->height = default_height;
1561 fmt->code = dflt_fmt.mbus_code;
1562 fmt->field = V4L2_FIELD_NONE;
1564 q->format.width = default_width;
1565 q->format.height = default_height;
1566 q->format.pixelformat = dflt_fmt.fourcc;
1567 q->format.colorspace = V4L2_COLORSPACE_RAW;
1568 q->format.field = V4L2_FIELD_NONE;
1569 q->format.num_planes = 1;
1570 q->format.plane_fmt[0].bytesperline =
1571 cio2_bytesperline(q->format.width);
1572 q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1575 /* Initialize fbpt */
1576 r = cio2_fbpt_init(cio2, q);
1580 /* Initialize media entities */
1581 q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1582 MEDIA_PAD_FL_MUST_CONNECT;
1583 q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1584 subdev->entity.ops = &cio2_media_ops;
1585 subdev->internal_ops = &cio2_subdev_internal_ops;
1586 r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1588 dev_err(&cio2->pci_dev->dev,
1589 "failed initialize subdev media entity (%d)\n", r);
1590 goto fail_subdev_media_entity;
1593 q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1594 vdev->entity.ops = &cio2_video_entity_ops;
1595 r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1597 dev_err(&cio2->pci_dev->dev,
1598 "failed initialize videodev media entity (%d)\n", r);
1599 goto fail_vdev_media_entity;
1602 /* Initialize subdev */
1603 v4l2_subdev_init(subdev, &cio2_subdev_ops);
1604 subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1605 subdev->owner = THIS_MODULE;
1606 snprintf(subdev->name, sizeof(subdev->name),
1607 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1608 v4l2_set_subdevdata(subdev, cio2);
1609 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1611 dev_err(&cio2->pci_dev->dev,
1612 "failed initialize subdev (%d)\n", r);
1616 /* Initialize vbq */
1617 vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1618 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1619 vbq->ops = &cio2_vb2_ops;
1620 vbq->mem_ops = &vb2_dma_sg_memops;
1621 vbq->buf_struct_size = sizeof(struct cio2_buffer);
1622 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1623 vbq->min_buffers_needed = 1;
1624 vbq->drv_priv = cio2;
1625 vbq->lock = &q->lock;
1626 r = vb2_queue_init(vbq);
1628 dev_err(&cio2->pci_dev->dev,
1629 "failed to initialize videobuf2 queue (%d)\n", r);
1633 /* Initialize vdev */
1634 snprintf(vdev->name, sizeof(vdev->name),
1635 "%s %td", CIO2_NAME, q - cio2->queue);
1636 vdev->release = video_device_release_empty;
1637 vdev->fops = &cio2_v4l2_fops;
1638 vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1639 vdev->lock = &cio2->lock;
1640 vdev->v4l2_dev = &cio2->v4l2_dev;
1641 vdev->queue = &q->vbq;
1642 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1643 video_set_drvdata(vdev, cio2);
1644 r = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
1646 dev_err(&cio2->pci_dev->dev,
1647 "failed to register video device (%d)\n", r);
1651 /* Create link from CIO2 subdev to output node */
1652 r = media_create_pad_link(
1653 &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1654 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1661 video_unregister_device(&q->vdev);
1663 vb2_queue_release(vbq);
1665 v4l2_device_unregister_subdev(subdev);
1667 media_entity_cleanup(&vdev->entity);
1668 fail_vdev_media_entity:
1669 media_entity_cleanup(&subdev->entity);
1670 fail_subdev_media_entity:
1671 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1673 mutex_destroy(&q->subdev_lock);
1674 mutex_destroy(&q->lock);
1679 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1681 video_unregister_device(&q->vdev);
1682 media_entity_cleanup(&q->vdev.entity);
1683 vb2_queue_release(&q->vbq);
1684 v4l2_device_unregister_subdev(&q->subdev);
1685 media_entity_cleanup(&q->subdev.entity);
1686 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1687 mutex_destroy(&q->subdev_lock);
1688 mutex_destroy(&q->lock);
1691 static int cio2_queues_init(struct cio2_device *cio2)
1695 for (i = 0; i < CIO2_QUEUES; i++) {
1696 r = cio2_queue_init(cio2, &cio2->queue[i]);
1701 if (i == CIO2_QUEUES)
1704 for (i--; i >= 0; i--)
1705 cio2_queue_exit(cio2, &cio2->queue[i]);
1710 static void cio2_queues_exit(struct cio2_device *cio2)
1714 for (i = 0; i < CIO2_QUEUES; i++)
1715 cio2_queue_exit(cio2, &cio2->queue[i]);
1718 /**************** PCI interface ****************/
1720 static int cio2_pci_config_setup(struct pci_dev *dev)
1723 int r = pci_enable_msi(dev);
1726 dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
1730 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1731 pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
1732 PCI_COMMAND_INTX_DISABLE;
1733 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1738 static int cio2_pci_probe(struct pci_dev *pci_dev,
1739 const struct pci_device_id *id)
1741 struct cio2_device *cio2;
1742 void __iomem *const *iomap;
1745 cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
1748 cio2->pci_dev = pci_dev;
1750 r = pcim_enable_device(pci_dev);
1752 dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
1756 dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
1757 pci_dev->device, pci_dev->revision);
1759 r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1761 dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
1765 iomap = pcim_iomap_table(pci_dev);
1767 dev_err(&pci_dev->dev, "failed to iomap table\n");
1771 cio2->base = iomap[CIO2_PCI_BAR];
1773 pci_set_drvdata(pci_dev, cio2);
1775 pci_set_master(pci_dev);
1777 r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
1779 dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
1783 r = cio2_pci_config_setup(pci_dev);
1787 r = cio2_fbpt_init_dummy(cio2);
1791 mutex_init(&cio2->lock);
1793 cio2->media_dev.dev = &cio2->pci_dev->dev;
1794 strlcpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1795 sizeof(cio2->media_dev.model));
1796 snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
1797 "PCI:%s", pci_name(cio2->pci_dev));
1798 cio2->media_dev.hw_revision = 0;
1800 media_device_init(&cio2->media_dev);
1801 r = media_device_register(&cio2->media_dev);
1803 goto fail_mutex_destroy;
1805 cio2->v4l2_dev.mdev = &cio2->media_dev;
1806 r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
1808 dev_err(&pci_dev->dev,
1809 "failed to register V4L2 device (%d)\n", r);
1810 goto fail_media_device_unregister;
1813 r = cio2_queues_init(cio2);
1815 goto fail_v4l2_device_unregister;
1817 /* Register notifier for subdevices we care */
1818 r = cio2_notifier_init(cio2);
1820 goto fail_cio2_queue_exit;
1822 r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
1823 IRQF_SHARED, CIO2_NAME, cio2);
1825 dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
1829 pm_runtime_put_noidle(&pci_dev->dev);
1830 pm_runtime_allow(&pci_dev->dev);
1835 cio2_notifier_exit(cio2);
1836 fail_cio2_queue_exit:
1837 cio2_queues_exit(cio2);
1838 fail_v4l2_device_unregister:
1839 v4l2_device_unregister(&cio2->v4l2_dev);
1840 fail_media_device_unregister:
1841 media_device_unregister(&cio2->media_dev);
1842 media_device_cleanup(&cio2->media_dev);
1844 mutex_destroy(&cio2->lock);
1845 cio2_fbpt_exit_dummy(cio2);
1850 static void cio2_pci_remove(struct pci_dev *pci_dev)
1852 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1855 media_device_unregister(&cio2->media_dev);
1856 cio2_notifier_exit(cio2);
1857 for (i = 0; i < CIO2_QUEUES; i++)
1858 cio2_queue_exit(cio2, &cio2->queue[i]);
1859 cio2_fbpt_exit_dummy(cio2);
1860 v4l2_device_unregister(&cio2->v4l2_dev);
1861 media_device_cleanup(&cio2->media_dev);
1862 mutex_destroy(&cio2->lock);
1864 pm_runtime_forbid(&pci_dev->dev);
1865 pm_runtime_get_noresume(&pci_dev->dev);
1868 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1870 struct pci_dev *pci_dev = to_pci_dev(dev);
1871 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1872 void __iomem *const base = cio2->base;
1875 writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1876 dev_dbg(dev, "cio2 runtime suspend.\n");
1878 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1879 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1880 pm |= CIO2_PMCSR_D3;
1881 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1886 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1888 struct pci_dev *pci_dev = to_pci_dev(dev);
1889 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1890 void __iomem *const base = cio2->base;
1893 writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1894 dev_dbg(dev, "cio2 runtime resume.\n");
1896 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1897 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1898 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1904 * Helper function to advance all the elements of a circular buffer by "start"
1907 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1913 { start, elems - 1 },
1916 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1918 /* Loop as long as we have out-of-place entries */
1919 while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1923 * Find the number of entries that can be arranged on this
1926 size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1928 /* Swap the entries in two parts of the array. */
1929 for (i = 0; i < size0; i++) {
1930 u8 *d = ptr + elem_size * (arr[1].begin + i);
1931 u8 *s = ptr + elem_size * (arr[0].begin + i);
1934 for (j = 0; j < elem_size; j++)
1938 if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1939 /* The end of the first array remains unarranged. */
1940 arr[0].begin += size0;
1943 * The first array is fully arranged so we proceed
1944 * handling the next one.
1946 arr[0].begin = arr[1].begin;
1947 arr[0].end = arr[1].begin + size0 - 1;
1948 arr[1].begin += size0;
1953 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1957 for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1958 i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1962 if (i == CIO2_MAX_BUFFERS)
1966 arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1967 CIO2_MAX_BUFFERS, j);
1968 arrange(q->bufs, sizeof(struct cio2_buffer *),
1969 CIO2_MAX_BUFFERS, j);
1973 * DMA clears the valid bit when accessing the buffer.
1974 * When stopping stream in suspend callback, some of the buffers
1975 * may be in invalid state. After resume, when DMA meets the invalid
1976 * buffer, it will halt and stop receiving new data.
1977 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1979 for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1980 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1983 static int __maybe_unused cio2_suspend(struct device *dev)
1985 struct pci_dev *pci_dev = to_pci_dev(dev);
1986 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1987 struct cio2_queue *q = cio2->cur_queue;
1989 dev_dbg(dev, "cio2 suspend\n");
1990 if (!cio2->streaming)
1994 cio2_hw_exit(cio2, q);
1995 synchronize_irq(pci_dev->irq);
1997 pm_runtime_force_suspend(dev);
2000 * Upon resume, hw starts to process the fbpt entries from beginning,
2001 * so relocate the queued buffs to the fbpt head before suspend.
2003 cio2_fbpt_rearrange(cio2, q);
2010 static int __maybe_unused cio2_resume(struct device *dev)
2012 struct pci_dev *pci_dev = to_pci_dev(dev);
2013 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
2015 struct cio2_queue *q = cio2->cur_queue;
2017 dev_dbg(dev, "cio2 resume\n");
2018 if (!cio2->streaming)
2021 r = pm_runtime_force_resume(&cio2->pci_dev->dev);
2023 dev_err(&cio2->pci_dev->dev,
2024 "failed to set power %d\n", r);
2028 r = cio2_hw_init(cio2, q);
2030 dev_err(dev, "fail to init cio2 hw\n");
2035 static const struct dev_pm_ops cio2_pm_ops = {
2036 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2037 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2040 static const struct pci_device_id cio2_pci_id_table[] = {
2041 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2045 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2047 static struct pci_driver cio2_pci_driver = {
2049 .id_table = cio2_pci_id_table,
2050 .probe = cio2_pci_probe,
2051 .remove = cio2_pci_remove,
2057 module_pci_driver(cio2_pci_driver);
2059 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2060 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2061 MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>");
2062 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2063 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2064 MODULE_LICENSE("GPL v2");
2065 MODULE_DESCRIPTION("IPU3 CIO2 driver");