1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Intel Corporation
5 * Based partially on Intel IPU4 driver written by
6 * Sakari Ailus <sakari.ailus@linux.intel.com>
7 * Samu Onkalo <samu.onkalo@intel.com>
8 * Jouni Högander <jouni.hogander@intel.com>
9 * Jouni Ukkonen <jouni.ukkonen@intel.com>
10 * Antti Laakso <antti.laakso@intel.com>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/pfn.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/property.h>
23 #include <linux/vmalloc.h>
24 #include <media/v4l2-ctrls.h>
25 #include <media/v4l2-device.h>
26 #include <media/v4l2-event.h>
27 #include <media/v4l2-fwnode.h>
28 #include <media/v4l2-ioctl.h>
29 #include <media/videobuf2-dma-sg.h>
31 #include "ipu3-cio2.h"
33 struct ipu3_cio2_fmt {
40 * These are raw formats used in Intel's third generation of
41 * Image Processing Unit known as IPU3.
42 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
43 * last LSB 6 bits unused.
45 static const struct ipu3_cio2_fmt formats[] = {
46 { /* put default entry at beginning */
47 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
48 .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
51 .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
52 .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
55 .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
56 .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
59 .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
60 .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
66 * cio2_find_format - lookup color format by fourcc or/and media bus code
67 * @pixelformat: fourcc to match, ignored if null
68 * @mbus_code: media bus code to match, ignored if null
70 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
75 for (i = 0; i < ARRAY_SIZE(formats); i++) {
76 if (pixelformat && *pixelformat != formats[i].fourcc)
78 if (mbus_code && *mbus_code != formats[i].mbus_code)
87 static inline u32 cio2_bytesperline(const unsigned int width)
90 * 64 bytes for every 50 pixels, the line length
91 * in bytes is multiple of 64 (line end alignment).
93 return DIV_ROUND_UP(width, 50) * 64;
96 /**************** FBPT operations ****************/
98 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
100 if (cio2->dummy_lop) {
101 dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
102 cio2->dummy_lop, cio2->dummy_lop_bus_addr);
103 cio2->dummy_lop = NULL;
105 if (cio2->dummy_page) {
106 dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
107 cio2->dummy_page, cio2->dummy_page_bus_addr);
108 cio2->dummy_page = NULL;
112 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
116 cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
117 &cio2->dummy_page_bus_addr,
119 cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
120 &cio2->dummy_lop_bus_addr,
122 if (!cio2->dummy_page || !cio2->dummy_lop) {
123 cio2_fbpt_exit_dummy(cio2);
127 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
128 * Initialize each entry to dummy_page bus base address.
130 for (i = 0; i < CIO2_LOP_ENTRIES; i++)
131 cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
136 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
137 struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
140 * The CPU first initializes some fields in fbpt, then sets
141 * the VALID bit, this barrier is to ensure that the DMA(device)
142 * does not see the VALID bit enabled before other fields are
143 * initialized; otherwise it could lead to havoc.
148 * Request interrupts for start and completion
149 * Valid bit is applicable only to 1st entry
151 entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
152 CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
155 /* Initialize fpbt entries to point to dummy frame */
156 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
157 struct cio2_fbpt_entry
158 entry[CIO2_MAX_LOPS])
162 entry[0].first_entry.first_page_offset = 0;
163 entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
164 entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
166 for (i = 0; i < CIO2_MAX_LOPS; i++)
167 entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
169 cio2_fbpt_entry_enable(cio2, entry);
172 /* Initialize fpbt entries to point to a given buffer */
173 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
174 struct cio2_buffer *b,
175 struct cio2_fbpt_entry
176 entry[CIO2_MAX_LOPS])
178 struct vb2_buffer *vb = &b->vbb.vb2_buf;
179 unsigned int length = vb->planes[0].length;
182 entry[0].first_entry.first_page_offset = b->offset;
183 remaining = length + entry[0].first_entry.first_page_offset;
184 entry[1].second_entry.num_of_pages = PFN_UP(remaining);
186 * last_page_available_bytes has the offset of the last byte in the
187 * last page which is still accessible by DMA. DMA cannot access
188 * beyond this point. Valid range for this is from 0 to 4095.
189 * 0 indicates 1st byte in the page is DMA accessible.
190 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
191 * is available for DMA transfer.
193 entry[1].second_entry.last_page_available_bytes =
194 (remaining & ~PAGE_MASK) ?
195 (remaining & ~PAGE_MASK) - 1 : PAGE_SIZE - 1;
199 while (remaining > 0) {
200 entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
201 remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
207 * The first not meaningful FBPT entry should point to a valid LOP
209 entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
211 cio2_fbpt_entry_enable(cio2, entry);
214 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
216 struct device *dev = &cio2->pci_dev->dev;
218 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
226 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
228 dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
231 /**************** CSI2 hardware setup ****************/
234 * The CSI2 receiver has several parameters affecting
235 * the receiver timings. These depend on the MIPI bus frequency
236 * F in Hz (sensor transmitter rate) as follows:
237 * register value = (A/1e9 + B * UI) / COUNT_ACC
239 * UI = 1 / (2 * F) in seconds
240 * COUNT_ACC = counter accuracy in seconds
241 * For IPU3 COUNT_ACC = 0.0625
243 * A and B are coefficients from the table below,
244 * depending whether the register minimum or maximum value is
248 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
249 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
251 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
252 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
253 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
254 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
255 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
256 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
257 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
258 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
260 * We use the minimum values of both A and B.
264 * shift for keeping value range suitable for 32-bit integer arithmetic
266 #define LIMIT_SHIFT 8
268 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
270 const u32 accinv = 16; /* invert of counter resolution */
271 const u32 uiinv = 500000000; /* 1e9 / 2 */
274 freq >>= LIMIT_SHIFT;
276 if (WARN_ON(freq <= 0 || freq > S32_MAX))
279 * b could be 0, -2 or -8, so |accinv * b| is always
280 * less than (1 << ds) and thus |r| < 500000000.
282 r = accinv * b * (uiinv >> LIMIT_SHIFT);
284 /* max value of a is 95 */
290 /* Calculate the the delay value for termination enable of clock lane HS Rx */
291 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
292 struct cio2_csi2_timing *timing)
294 struct device *dev = &cio2->pci_dev->dev;
295 struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, };
296 struct v4l2_ctrl *link_freq;
303 link_freq = v4l2_ctrl_find(q->sensor->ctrl_handler, V4L2_CID_LINK_FREQ);
305 dev_err(dev, "failed to find LINK_FREQ\n");
309 qm.index = v4l2_ctrl_g_ctrl(link_freq);
310 r = v4l2_querymenu(q->sensor->ctrl_handler, &qm);
312 dev_err(dev, "failed to get menu item\n");
317 dev_err(dev, "error invalid link_freq\n");
322 timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
323 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
325 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
326 timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
327 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
329 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
330 timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
331 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
333 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
334 timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
335 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
337 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
339 dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
340 dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
341 dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
342 dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
347 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
349 static const int NUM_VCS = 4;
350 static const int SID; /* Stream id */
351 static const int ENTRY;
352 static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
353 CIO2_FBPT_SUBENTRY_UNIT);
354 const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
355 const struct ipu3_cio2_fmt *fmt;
356 void __iomem *const base = cio2->base;
357 u8 lanes, csi2bus = q->csi2.port;
358 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
359 struct cio2_csi2_timing timing;
362 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
366 lanes = q->csi2.lanes;
368 r = cio2_csi2_calc_timing(cio2, q, &timing);
372 writel(timing.clk_termen, q->csi_rx_base +
373 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
374 writel(timing.clk_settle, q->csi_rx_base +
375 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
377 for (i = 0; i < lanes; i++) {
378 writel(timing.dat_termen, q->csi_rx_base +
379 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
380 writel(timing.dat_settle, q->csi_rx_base +
381 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
384 writel(CIO2_PBM_WMCTRL1_MIN_2CK |
385 CIO2_PBM_WMCTRL1_MID1_2CK |
386 CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
387 writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
388 CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
389 CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
390 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
391 CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
392 CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
393 writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
394 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
395 CIO2_PBM_ARB_CTRL_LE_EN |
396 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
397 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
398 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
399 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
400 base + CIO2_REG_PBM_ARB_CTRL);
401 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
402 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
403 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
404 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
406 writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
407 writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
409 /* Configure MIPI backend */
410 for (i = 0; i < NUM_VCS; i++)
411 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
413 /* There are 16 short packet LUT entry */
414 for (i = 0; i < 16; i++)
415 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
416 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
417 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
418 q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
420 writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
421 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
422 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
423 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
424 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
425 writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
427 writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
428 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
429 base + CIO2_REG_INT_EN);
431 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
432 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
433 base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
434 writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
435 sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
436 fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
437 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
438 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
439 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
440 writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
442 writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
443 writel(CIO2_CGC_PRIM_TGE |
447 CIO2_CGC_CSI2_INTERFRAME_TGE |
448 CIO2_CGC_CSI2_PORT_DCGE |
453 CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
454 CIO2_CGC_CSI_CLKGATE_HOLDOFF
455 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
456 writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
457 writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
458 CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
459 CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
460 CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
461 base + CIO2_REG_LTRVAL01);
462 writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
463 CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
464 CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
465 CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
466 base + CIO2_REG_LTRVAL23);
468 for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
469 writel(0, base + CIO2_REG_CDMABA(i));
470 writel(0, base + CIO2_REG_CDMAC0(i));
471 writel(0, base + CIO2_REG_CDMAC1(i));
475 writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
477 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
478 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
479 CIO2_CDMAC0_DMA_INTR_ON_FE |
480 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
482 CIO2_CDMAC0_DMA_INTR_ON_FS |
483 CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
485 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
486 base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
488 writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
490 writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
491 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
492 CIO2_PXM_FRF_CFG_MSK_ECC_RE |
493 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
494 base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
496 /* Clear interrupts */
497 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
498 writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
499 writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
500 writel(~0, base + CIO2_REG_INT_STS);
502 /* Enable devices, starting from the last device in the pipe */
503 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
504 writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
509 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
511 void __iomem *const base = cio2->base;
516 /* Disable CSI receiver and MIPI backend devices */
517 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
518 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
519 writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
520 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
523 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
524 ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
525 value, value & CIO2_CDMAC0_DMA_HALTED,
528 dev_err(&cio2->pci_dev->dev,
529 "DMA %i can not be halted\n", CIO2_DMA_CHAN);
531 for (i = 0; i < CIO2_NUM_PORTS; i++) {
532 writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
533 CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
534 writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
535 CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
539 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
541 struct device *dev = &cio2->pci_dev->dev;
542 struct cio2_queue *q = cio2->cur_queue;
543 struct cio2_fbpt_entry *entry;
544 u64 ns = ktime_get_ns();
546 if (dma_chan >= CIO2_QUEUES) {
547 dev_err(dev, "bad DMA channel %i\n", dma_chan);
551 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
552 if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
553 dev_warn(&cio2->pci_dev->dev,
554 "no ready buffers found on DMA channel %u\n",
559 /* Find out which buffer(s) are ready */
561 struct cio2_buffer *b;
563 b = q->bufs[q->bufs_first];
565 unsigned int bytes = entry[1].second_entry.num_of_bytes;
567 q->bufs[q->bufs_first] = NULL;
568 atomic_dec(&q->bufs_queued);
569 dev_dbg(&cio2->pci_dev->dev,
570 "buffer %i done\n", b->vbb.vb2_buf.index);
572 b->vbb.vb2_buf.timestamp = ns;
573 b->vbb.field = V4L2_FIELD_NONE;
574 b->vbb.sequence = atomic_read(&q->frame_sequence);
575 if (b->vbb.vb2_buf.planes[0].length != bytes)
576 dev_warn(dev, "buffer length is %d received %d\n",
577 b->vbb.vb2_buf.planes[0].length,
579 vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
581 atomic_inc(&q->frame_sequence);
582 cio2_fbpt_entry_init_dummy(cio2, entry);
583 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
584 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
585 } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
588 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
591 * For the user space camera control algorithms it is essential
592 * to know when the reception of a frame has begun. That's often
593 * the best timing information to get from the hardware.
595 struct v4l2_event event = {
596 .type = V4L2_EVENT_FRAME_SYNC,
597 .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
600 v4l2_event_queue(q->subdev.devnode, &event);
603 static const char *const cio2_irq_errs[] = {
604 "single packet header error corrected",
605 "multiple packet header errors detected",
606 "payload checksum (CRC) error",
608 "reserved short packet data type detected",
609 "reserved long packet data type detected",
610 "incomplete long packet detected",
613 "DPHY start of transmission error",
614 "DPHY synchronization error",
616 "escape mode trigger event",
617 "escape mode ultra-low power state for data lane(s)",
618 "escape mode ultra-low power state exit for clock lane",
619 "inter-frame short packet discarded",
620 "inter-frame long packet discarded",
621 "non-matching Long Packet stalled",
624 static const char *const cio2_port_errs[] = {
626 "DPHY not recoverable",
627 "ECC not recoverable",
634 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
636 void __iomem *const base = cio2->base;
637 struct device *dev = &cio2->pci_dev->dev;
639 if (int_status & CIO2_INT_IOOE) {
641 * Interrupt on Output Error:
642 * 1) SRAM is full and FS received, or
643 * 2) An invalid bit detected by DMA.
645 u32 oe_status, oe_clear;
647 oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
648 oe_status = oe_clear;
650 if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
651 dev_err(dev, "DMA output error: 0x%x\n",
652 (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
653 >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
654 oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
656 if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
657 dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
658 (oe_status & CIO2_INT_EXT_OE_OES_MASK)
659 >> CIO2_INT_EXT_OE_OES_SHIFT);
660 oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
662 writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
664 dev_warn(dev, "unknown interrupt 0x%x on OE\n",
666 int_status &= ~CIO2_INT_IOOE;
669 if (int_status & CIO2_INT_IOC_MASK) {
670 /* DMA IO done -- frame ready */
674 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
675 if (int_status & CIO2_INT_IOC(d)) {
676 clr |= CIO2_INT_IOC(d);
677 cio2_buffer_done(cio2, d);
682 if (int_status & CIO2_INT_IOS_IOLN_MASK) {
683 /* DMA IO starts or reached specified line */
687 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
688 if (int_status & CIO2_INT_IOS_IOLN(d)) {
689 clr |= CIO2_INT_IOS_IOLN(d);
690 if (d == CIO2_DMA_CHAN)
691 cio2_queue_event_sof(cio2,
697 if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
698 /* CSI2 receiver (error) interrupt */
699 u32 ie_status, ie_clear;
702 ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
703 ie_status = ie_clear;
705 for (port = 0; port < CIO2_NUM_PORTS; port++) {
706 u32 port_status = (ie_status >> (port * 8)) & 0xff;
707 u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
708 void __iomem *const csi_rx_base =
709 base + CIO2_REG_PIPE_BASE(port);
712 while (port_status & err_mask) {
713 i = ffs(port_status) - 1;
714 dev_err(dev, "port %i error %s\n",
715 port, cio2_port_errs[i]);
716 ie_status &= ~BIT(port * 8 + i);
717 port_status &= ~BIT(i);
720 if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
721 u32 csi2_status, csi2_clear;
723 csi2_status = readl(csi_rx_base +
724 CIO2_REG_IRQCTRL_STATUS);
725 csi2_clear = csi2_status;
727 BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
729 while (csi2_status & err_mask) {
730 i = ffs(csi2_status) - 1;
732 "CSI-2 receiver port %i: %s\n",
733 port, cio2_irq_errs[i]);
734 csi2_status &= ~BIT(i);
738 csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
741 "unknown CSI2 error 0x%x on port %i\n",
744 ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
748 writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
750 dev_warn(dev, "unknown interrupt 0x%x on IE\n",
753 int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
757 dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
760 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
762 struct cio2_device *cio2 = cio2_ptr;
763 void __iomem *const base = cio2->base;
764 struct device *dev = &cio2->pci_dev->dev;
767 int_status = readl(base + CIO2_REG_INT_STS);
768 dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
773 writel(int_status, base + CIO2_REG_INT_STS);
774 cio2_irq_handle_once(cio2, int_status);
775 int_status = readl(base + CIO2_REG_INT_STS);
777 dev_dbg(dev, "pending status 0x%x\n", int_status);
778 } while (int_status);
783 /**************** Videobuf2 interface ****************/
785 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
786 enum vb2_buffer_state state)
790 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
792 atomic_dec(&q->bufs_queued);
793 vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
799 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
800 unsigned int *num_buffers,
801 unsigned int *num_planes,
802 unsigned int sizes[],
803 struct device *alloc_devs[])
805 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
806 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
809 *num_planes = q->format.num_planes;
811 for (i = 0; i < *num_planes; ++i) {
812 sizes[i] = q->format.plane_fmt[i].sizeimage;
813 alloc_devs[i] = &cio2->pci_dev->dev;
816 *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
818 /* Initialize buffer queue */
819 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
821 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
823 atomic_set(&q->bufs_queued, 0);
830 /* Called after each buffer is allocated */
831 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
833 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
834 struct device *dev = &cio2->pci_dev->dev;
835 struct cio2_buffer *b =
836 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
837 unsigned int pages = PFN_UP(vb->planes[0].length);
838 unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
840 struct sg_dma_page_iter sg_iter;
843 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
844 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
845 vb->planes[0].length);
846 return -ENOSPC; /* Should never happen */
849 memset(b->lop, 0, sizeof(b->lop));
850 /* Allocate LOP table */
851 for (i = 0; i < lops; i++) {
852 b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
853 &b->lop_bus_addr[i], GFP_KERNEL);
859 sg = vb2_dma_sg_plane_desc(vb, 0);
863 if (sg->nents && sg->sgl)
864 b->offset = sg->sgl->offset;
867 for_each_sg_dma_page (sg->sgl, &sg_iter, sg->nents, 0) {
870 b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
872 if (j == CIO2_LOP_ENTRIES) {
878 b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
882 dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
886 /* Transfer buffer ownership to cio2 */
887 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
889 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
890 struct cio2_queue *q =
891 container_of(vb->vb2_queue, struct cio2_queue, vbq);
892 struct cio2_buffer *b =
893 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
894 struct cio2_fbpt_entry *entry;
896 unsigned int i, j, next = q->bufs_next;
897 int bufs_queued = atomic_inc_return(&q->bufs_queued);
900 dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
903 * This code queues the buffer to the CIO2 DMA engine, which starts
904 * running once streaming has started. It is possible that this code
905 * gets pre-empted due to increased CPU load. Upon this, the driver
906 * does not get an opportunity to queue new buffers to the CIO2 DMA
907 * engine. When the DMA engine encounters an FBPT entry without the
908 * VALID bit set, the DMA engine halts, which requires a restart of
909 * the DMA engine and sensor, to continue streaming.
910 * This is not desired and is highly unlikely given that there are
911 * 32 FBPT entries that the DMA engine needs to process, to run into
912 * an FBPT entry, without the VALID bit set. We try to mitigate this
913 * by disabling interrupts for the duration of this queueing.
915 local_irq_save(flags);
917 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
918 >> CIO2_CDMARI_FBPT_RP_SHIFT)
919 & CIO2_CDMARI_FBPT_RP_MASK;
922 * fbpt_rp is the fbpt entry that the dma is currently working
923 * on, but since it could jump to next entry at any time,
924 * assume that we might already be there.
926 fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
928 if (bufs_queued <= 1 || fbpt_rp == next)
929 /* Buffers were drained */
930 next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
932 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
934 * We have allocated CIO2_MAX_BUFFERS circularly for the
935 * hw, the user has requested N buffer queue. The driver
936 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
937 * user queues a buffer, there necessarily is a free buffer.
939 if (!q->bufs[next]) {
941 entry = &q->fbpt[next * CIO2_MAX_LOPS];
942 cio2_fbpt_entry_init_buf(cio2, b, entry);
943 local_irq_restore(flags);
944 q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
945 for (j = 0; j < vb->num_planes; j++)
946 vb2_set_plane_payload(vb, j,
947 q->format.plane_fmt[j].sizeimage);
951 dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
952 next = (next + 1) % CIO2_MAX_BUFFERS;
955 local_irq_restore(flags);
956 dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
957 atomic_dec(&q->bufs_queued);
958 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
961 /* Called when each buffer is freed */
962 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
964 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
965 struct cio2_buffer *b =
966 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
970 for (i = 0; i < CIO2_MAX_LOPS; i++) {
972 dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
973 b->lop[i], b->lop_bus_addr[i]);
977 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
979 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
980 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
984 atomic_set(&q->frame_sequence, 0);
986 r = pm_runtime_get_sync(&cio2->pci_dev->dev);
988 dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
989 pm_runtime_put_noidle(&cio2->pci_dev->dev);
993 r = media_pipeline_start(&q->vdev.entity, &q->pipe);
997 r = cio2_hw_init(cio2, q);
1001 /* Start streaming on sensor */
1002 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1004 goto fail_csi2_subdev;
1006 cio2->streaming = true;
1011 cio2_hw_exit(cio2, q);
1013 media_pipeline_stop(&q->vdev.entity);
1015 dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
1016 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1017 pm_runtime_put(&cio2->pci_dev->dev);
1022 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1024 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1025 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1027 if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1028 dev_err(&cio2->pci_dev->dev,
1029 "failed to stop sensor streaming\n");
1031 cio2_hw_exit(cio2, q);
1032 synchronize_irq(cio2->pci_dev->irq);
1033 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1034 media_pipeline_stop(&q->vdev.entity);
1035 pm_runtime_put(&cio2->pci_dev->dev);
1036 cio2->streaming = false;
1039 static const struct vb2_ops cio2_vb2_ops = {
1040 .buf_init = cio2_vb2_buf_init,
1041 .buf_queue = cio2_vb2_buf_queue,
1042 .buf_cleanup = cio2_vb2_buf_cleanup,
1043 .queue_setup = cio2_vb2_queue_setup,
1044 .start_streaming = cio2_vb2_start_streaming,
1045 .stop_streaming = cio2_vb2_stop_streaming,
1046 .wait_prepare = vb2_ops_wait_prepare,
1047 .wait_finish = vb2_ops_wait_finish,
1050 /**************** V4L2 interface ****************/
1052 static int cio2_v4l2_querycap(struct file *file, void *fh,
1053 struct v4l2_capability *cap)
1055 struct cio2_device *cio2 = video_drvdata(file);
1057 strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1058 strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1059 snprintf(cap->bus_info, sizeof(cap->bus_info),
1060 "PCI:%s", pci_name(cio2->pci_dev));
1065 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1066 struct v4l2_fmtdesc *f)
1068 if (f->index >= ARRAY_SIZE(formats))
1071 f->pixelformat = formats[f->index].fourcc;
1076 /* The format is validated in cio2_video_link_validate() */
1077 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1079 struct cio2_queue *q = file_to_cio2_queue(file);
1081 f->fmt.pix_mp = q->format;
1086 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1088 const struct ipu3_cio2_fmt *fmt;
1089 struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1091 fmt = cio2_find_format(&mpix->pixelformat, NULL);
1095 /* Only supports up to 4224x3136 */
1096 if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1097 mpix->width = CIO2_IMAGE_MAX_WIDTH;
1098 if (mpix->height > CIO2_IMAGE_MAX_LENGTH)
1099 mpix->height = CIO2_IMAGE_MAX_LENGTH;
1101 mpix->num_planes = 1;
1102 mpix->pixelformat = fmt->fourcc;
1103 mpix->colorspace = V4L2_COLORSPACE_RAW;
1104 mpix->field = V4L2_FIELD_NONE;
1105 memset(mpix->reserved, 0, sizeof(mpix->reserved));
1106 mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1107 mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1109 memset(mpix->plane_fmt[0].reserved, 0,
1110 sizeof(mpix->plane_fmt[0].reserved));
1113 mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1114 mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1115 mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1120 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1122 struct cio2_queue *q = file_to_cio2_queue(file);
1124 cio2_v4l2_try_fmt(file, fh, f);
1125 q->format = f->fmt.pix_mp;
1131 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1133 if (input->index > 0)
1136 strscpy(input->name, "camera", sizeof(input->name));
1137 input->type = V4L2_INPUT_TYPE_CAMERA;
1143 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1151 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1153 return input == 0 ? 0 : -EINVAL;
1156 static const struct v4l2_file_operations cio2_v4l2_fops = {
1157 .owner = THIS_MODULE,
1158 .unlocked_ioctl = video_ioctl2,
1159 .open = v4l2_fh_open,
1160 .release = vb2_fop_release,
1161 .poll = vb2_fop_poll,
1162 .mmap = vb2_fop_mmap,
1165 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1166 .vidioc_querycap = cio2_v4l2_querycap,
1167 .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1168 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1169 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1170 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1171 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1172 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1173 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1174 .vidioc_querybuf = vb2_ioctl_querybuf,
1175 .vidioc_qbuf = vb2_ioctl_qbuf,
1176 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1177 .vidioc_streamon = vb2_ioctl_streamon,
1178 .vidioc_streamoff = vb2_ioctl_streamoff,
1179 .vidioc_expbuf = vb2_ioctl_expbuf,
1180 .vidioc_enum_input = cio2_video_enum_input,
1181 .vidioc_g_input = cio2_video_g_input,
1182 .vidioc_s_input = cio2_video_s_input,
1185 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1187 struct v4l2_event_subscription *sub)
1189 if (sub->type != V4L2_EVENT_FRAME_SYNC)
1192 /* Line number. For now only zero accepted. */
1196 return v4l2_event_subscribe(fh, sub, 0, NULL);
1199 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1201 struct v4l2_mbus_framefmt *format;
1202 const struct v4l2_mbus_framefmt fmt_default = {
1205 .code = formats[0].mbus_code,
1206 .field = V4L2_FIELD_NONE,
1207 .colorspace = V4L2_COLORSPACE_RAW,
1208 .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1209 .quantization = V4L2_QUANTIZATION_DEFAULT,
1210 .xfer_func = V4L2_XFER_FUNC_DEFAULT,
1213 /* Initialize try_fmt */
1214 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK);
1215 *format = fmt_default;
1218 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE);
1219 *format = fmt_default;
1225 * cio2_subdev_get_fmt - Handle get format by pads subdev method
1226 * @sd : pointer to v4l2 subdev structure
1227 * @cfg: V4L2 subdev pad config
1228 * @fmt: pointer to v4l2 subdev format structure
1229 * return -EINVAL or zero on success
1231 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1232 struct v4l2_subdev_pad_config *cfg,
1233 struct v4l2_subdev_format *fmt)
1235 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1236 struct v4l2_subdev_format format;
1239 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
1240 fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
1244 if (fmt->pad == CIO2_PAD_SINK) {
1245 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1246 ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
1251 /* update colorspace etc */
1252 q->subdev_fmt.colorspace = format.format.colorspace;
1253 q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc;
1254 q->subdev_fmt.quantization = format.format.quantization;
1255 q->subdev_fmt.xfer_func = format.format.xfer_func;
1258 fmt->format = q->subdev_fmt;
1264 * cio2_subdev_set_fmt - Handle set format by pads subdev method
1265 * @sd : pointer to v4l2 subdev structure
1266 * @cfg: V4L2 subdev pad config
1267 * @fmt: pointer to v4l2 subdev format structure
1268 * return -EINVAL or zero on success
1270 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1271 struct v4l2_subdev_pad_config *cfg,
1272 struct v4l2_subdev_format *fmt)
1274 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1277 * Only allow setting sink pad format;
1278 * source always propagates from sink
1280 if (fmt->pad == CIO2_PAD_SOURCE)
1281 return cio2_subdev_get_fmt(sd, cfg, fmt);
1283 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
1284 *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
1286 /* It's the sink, allow changing frame size */
1287 q->subdev_fmt.width = fmt->format.width;
1288 q->subdev_fmt.height = fmt->format.height;
1289 q->subdev_fmt.code = fmt->format.code;
1290 fmt->format = q->subdev_fmt;
1296 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1297 struct v4l2_subdev_pad_config *cfg,
1298 struct v4l2_subdev_mbus_code_enum *code)
1300 if (code->index >= ARRAY_SIZE(formats))
1303 code->code = formats[code->index].mbus_code;
1307 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1308 struct v4l2_subdev_format *fmt)
1310 if (is_media_entity_v4l2_subdev(pad->entity)) {
1311 struct v4l2_subdev *sd =
1312 media_entity_to_v4l2_subdev(pad->entity);
1314 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1315 fmt->pad = pad->index;
1316 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1322 static int cio2_video_link_validate(struct media_link *link)
1324 struct video_device *vd = container_of(link->sink->entity,
1325 struct video_device, entity);
1326 struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1327 struct cio2_device *cio2 = video_get_drvdata(vd);
1328 struct v4l2_subdev_format source_fmt;
1331 if (!media_entity_remote_pad(link->sink->entity->pads)) {
1332 dev_info(&cio2->pci_dev->dev,
1333 "video node %s pad not connected\n", vd->name);
1337 ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1341 if (source_fmt.format.width != q->format.width ||
1342 source_fmt.format.height != q->format.height) {
1343 dev_err(&cio2->pci_dev->dev,
1344 "Wrong width or height %ux%u (%ux%u expected)\n",
1345 q->format.width, q->format.height,
1346 source_fmt.format.width, source_fmt.format.height);
1350 if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1356 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1357 .subscribe_event = cio2_subdev_subscribe_event,
1358 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
1361 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1362 .open = cio2_subdev_open,
1365 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1366 .link_validate = v4l2_subdev_link_validate_default,
1367 .get_fmt = cio2_subdev_get_fmt,
1368 .set_fmt = cio2_subdev_set_fmt,
1369 .enum_mbus_code = cio2_subdev_enum_mbus_code,
1372 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1373 .core = &cio2_subdev_core_ops,
1374 .pad = &cio2_subdev_pad_ops,
1377 /******* V4L2 sub-device asynchronous registration callbacks***********/
1379 struct sensor_async_subdev {
1380 struct v4l2_async_subdev asd;
1381 struct csi2_bus_info csi2;
1384 /* The .bound() notifier callback when a match is found */
1385 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1386 struct v4l2_subdev *sd,
1387 struct v4l2_async_subdev *asd)
1389 struct cio2_device *cio2 = container_of(notifier,
1390 struct cio2_device, notifier);
1391 struct sensor_async_subdev *s_asd = container_of(asd,
1392 struct sensor_async_subdev, asd);
1393 struct cio2_queue *q;
1395 if (cio2->queue[s_asd->csi2.port].sensor)
1398 q = &cio2->queue[s_asd->csi2.port];
1400 q->csi2 = s_asd->csi2;
1402 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1407 /* The .unbind callback */
1408 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1409 struct v4l2_subdev *sd,
1410 struct v4l2_async_subdev *asd)
1412 struct cio2_device *cio2 = container_of(notifier,
1413 struct cio2_device, notifier);
1414 struct sensor_async_subdev *s_asd = container_of(asd,
1415 struct sensor_async_subdev, asd);
1417 cio2->queue[s_asd->csi2.port].sensor = NULL;
1420 /* .complete() is called after all subdevices have been located */
1421 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1423 struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
1425 struct sensor_async_subdev *s_asd;
1426 struct v4l2_async_subdev *asd;
1427 struct cio2_queue *q;
1431 list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
1432 s_asd = container_of(asd, struct sensor_async_subdev, asd);
1433 q = &cio2->queue[s_asd->csi2.port];
1435 for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
1436 if (q->sensor->entity.pads[pad].flags &
1437 MEDIA_PAD_FL_SOURCE)
1440 if (pad == q->sensor->entity.num_pads) {
1441 dev_err(&cio2->pci_dev->dev,
1442 "failed to find src pad for %s\n",
1447 ret = media_create_pad_link(
1448 &q->sensor->entity, pad,
1449 &q->subdev.entity, CIO2_PAD_SINK,
1452 dev_err(&cio2->pci_dev->dev,
1453 "failed to create link for %s\n",
1459 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1462 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1463 .bound = cio2_notifier_bound,
1464 .unbind = cio2_notifier_unbind,
1465 .complete = cio2_notifier_complete,
1468 static int cio2_parse_firmware(struct cio2_device *cio2)
1473 for (i = 0; i < CIO2_NUM_PORTS; i++) {
1474 struct v4l2_fwnode_endpoint vep = {
1475 .bus_type = V4L2_MBUS_CSI2_DPHY
1477 struct sensor_async_subdev *s_asd = NULL;
1478 struct fwnode_handle *ep;
1480 ep = fwnode_graph_get_endpoint_by_id(
1481 dev_fwnode(&cio2->pci_dev->dev), i, 0,
1482 FWNODE_GRAPH_ENDPOINT_NEXT);
1487 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1491 s_asd = kzalloc(sizeof(*s_asd), GFP_KERNEL);
1497 s_asd->csi2.port = vep.base.port;
1498 s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1500 ret = v4l2_async_notifier_add_fwnode_remote_subdev(
1501 &cio2->notifier, ep, &s_asd->asd);
1505 fwnode_handle_put(ep);
1510 fwnode_handle_put(ep);
1516 * Proceed even without sensors connected to allow the device to
1519 cio2->notifier.ops = &cio2_async_ops;
1520 ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
1522 dev_err(&cio2->pci_dev->dev,
1523 "failed to register async notifier : %d\n", ret);
1528 /**************** Queue initialization ****************/
1529 static const struct media_entity_operations cio2_media_ops = {
1530 .link_validate = v4l2_subdev_link_validate,
1533 static const struct media_entity_operations cio2_video_entity_ops = {
1534 .link_validate = cio2_video_link_validate,
1537 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1539 static const u32 default_width = 1936;
1540 static const u32 default_height = 1096;
1541 const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1543 struct video_device *vdev = &q->vdev;
1544 struct vb2_queue *vbq = &q->vbq;
1545 struct v4l2_subdev *subdev = &q->subdev;
1546 struct v4l2_mbus_framefmt *fmt;
1549 /* Initialize miscellaneous variables */
1550 mutex_init(&q->lock);
1552 /* Initialize formats to default values */
1553 fmt = &q->subdev_fmt;
1554 fmt->width = default_width;
1555 fmt->height = default_height;
1556 fmt->code = dflt_fmt.mbus_code;
1557 fmt->field = V4L2_FIELD_NONE;
1559 q->format.width = default_width;
1560 q->format.height = default_height;
1561 q->format.pixelformat = dflt_fmt.fourcc;
1562 q->format.colorspace = V4L2_COLORSPACE_RAW;
1563 q->format.field = V4L2_FIELD_NONE;
1564 q->format.num_planes = 1;
1565 q->format.plane_fmt[0].bytesperline =
1566 cio2_bytesperline(q->format.width);
1567 q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1570 /* Initialize fbpt */
1571 r = cio2_fbpt_init(cio2, q);
1575 /* Initialize media entities */
1576 q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1577 MEDIA_PAD_FL_MUST_CONNECT;
1578 q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1579 subdev->entity.ops = &cio2_media_ops;
1580 subdev->internal_ops = &cio2_subdev_internal_ops;
1581 r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1583 dev_err(&cio2->pci_dev->dev,
1584 "failed initialize subdev media entity (%d)\n", r);
1585 goto fail_subdev_media_entity;
1588 q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1589 vdev->entity.ops = &cio2_video_entity_ops;
1590 r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1592 dev_err(&cio2->pci_dev->dev,
1593 "failed initialize videodev media entity (%d)\n", r);
1594 goto fail_vdev_media_entity;
1597 /* Initialize subdev */
1598 v4l2_subdev_init(subdev, &cio2_subdev_ops);
1599 subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1600 subdev->owner = THIS_MODULE;
1601 snprintf(subdev->name, sizeof(subdev->name),
1602 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1603 subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1604 v4l2_set_subdevdata(subdev, cio2);
1605 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1607 dev_err(&cio2->pci_dev->dev,
1608 "failed initialize subdev (%d)\n", r);
1612 /* Initialize vbq */
1613 vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1614 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1615 vbq->ops = &cio2_vb2_ops;
1616 vbq->mem_ops = &vb2_dma_sg_memops;
1617 vbq->buf_struct_size = sizeof(struct cio2_buffer);
1618 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1619 vbq->min_buffers_needed = 1;
1620 vbq->drv_priv = cio2;
1621 vbq->lock = &q->lock;
1622 r = vb2_queue_init(vbq);
1624 dev_err(&cio2->pci_dev->dev,
1625 "failed to initialize videobuf2 queue (%d)\n", r);
1629 /* Initialize vdev */
1630 snprintf(vdev->name, sizeof(vdev->name),
1631 "%s %td", CIO2_NAME, q - cio2->queue);
1632 vdev->release = video_device_release_empty;
1633 vdev->fops = &cio2_v4l2_fops;
1634 vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1635 vdev->lock = &cio2->lock;
1636 vdev->v4l2_dev = &cio2->v4l2_dev;
1637 vdev->queue = &q->vbq;
1638 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1639 video_set_drvdata(vdev, cio2);
1640 r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1642 dev_err(&cio2->pci_dev->dev,
1643 "failed to register video device (%d)\n", r);
1647 /* Create link from CIO2 subdev to output node */
1648 r = media_create_pad_link(
1649 &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1650 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1657 vb2_video_unregister_device(&q->vdev);
1659 v4l2_device_unregister_subdev(subdev);
1661 media_entity_cleanup(&vdev->entity);
1662 fail_vdev_media_entity:
1663 media_entity_cleanup(&subdev->entity);
1664 fail_subdev_media_entity:
1665 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1667 mutex_destroy(&q->lock);
1672 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1674 vb2_video_unregister_device(&q->vdev);
1675 media_entity_cleanup(&q->vdev.entity);
1676 v4l2_device_unregister_subdev(&q->subdev);
1677 media_entity_cleanup(&q->subdev.entity);
1678 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1679 mutex_destroy(&q->lock);
1682 static int cio2_queues_init(struct cio2_device *cio2)
1686 for (i = 0; i < CIO2_QUEUES; i++) {
1687 r = cio2_queue_init(cio2, &cio2->queue[i]);
1692 if (i == CIO2_QUEUES)
1695 for (i--; i >= 0; i--)
1696 cio2_queue_exit(cio2, &cio2->queue[i]);
1701 static void cio2_queues_exit(struct cio2_device *cio2)
1705 for (i = 0; i < CIO2_QUEUES; i++)
1706 cio2_queue_exit(cio2, &cio2->queue[i]);
1709 /**************** PCI interface ****************/
1711 static int cio2_pci_probe(struct pci_dev *pci_dev,
1712 const struct pci_device_id *id)
1714 struct cio2_device *cio2;
1715 void __iomem *const *iomap;
1718 cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
1721 cio2->pci_dev = pci_dev;
1723 r = pcim_enable_device(pci_dev);
1725 dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
1729 dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
1730 pci_dev->device, pci_dev->revision);
1732 r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1734 dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
1738 iomap = pcim_iomap_table(pci_dev);
1740 dev_err(&pci_dev->dev, "failed to iomap table\n");
1744 cio2->base = iomap[CIO2_PCI_BAR];
1746 pci_set_drvdata(pci_dev, cio2);
1748 pci_set_master(pci_dev);
1750 r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
1752 dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
1756 r = pci_enable_msi(pci_dev);
1758 dev_err(&pci_dev->dev, "failed to enable MSI (%d)\n", r);
1762 r = cio2_fbpt_init_dummy(cio2);
1766 mutex_init(&cio2->lock);
1768 cio2->media_dev.dev = &cio2->pci_dev->dev;
1769 strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1770 sizeof(cio2->media_dev.model));
1771 snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
1772 "PCI:%s", pci_name(cio2->pci_dev));
1773 cio2->media_dev.hw_revision = 0;
1775 media_device_init(&cio2->media_dev);
1776 r = media_device_register(&cio2->media_dev);
1778 goto fail_mutex_destroy;
1780 cio2->v4l2_dev.mdev = &cio2->media_dev;
1781 r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
1783 dev_err(&pci_dev->dev,
1784 "failed to register V4L2 device (%d)\n", r);
1785 goto fail_media_device_unregister;
1788 r = cio2_queues_init(cio2);
1790 goto fail_v4l2_device_unregister;
1792 v4l2_async_notifier_init(&cio2->notifier);
1794 /* Register notifier for subdevices we care */
1795 r = cio2_parse_firmware(cio2);
1797 goto fail_clean_notifier;
1799 r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
1800 IRQF_SHARED, CIO2_NAME, cio2);
1802 dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
1803 goto fail_clean_notifier;
1806 pm_runtime_put_noidle(&pci_dev->dev);
1807 pm_runtime_allow(&pci_dev->dev);
1811 fail_clean_notifier:
1812 v4l2_async_notifier_unregister(&cio2->notifier);
1813 v4l2_async_notifier_cleanup(&cio2->notifier);
1814 cio2_queues_exit(cio2);
1815 fail_v4l2_device_unregister:
1816 v4l2_device_unregister(&cio2->v4l2_dev);
1817 fail_media_device_unregister:
1818 media_device_unregister(&cio2->media_dev);
1819 media_device_cleanup(&cio2->media_dev);
1821 mutex_destroy(&cio2->lock);
1822 cio2_fbpt_exit_dummy(cio2);
1827 static void cio2_pci_remove(struct pci_dev *pci_dev)
1829 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1831 media_device_unregister(&cio2->media_dev);
1832 v4l2_async_notifier_unregister(&cio2->notifier);
1833 v4l2_async_notifier_cleanup(&cio2->notifier);
1834 cio2_queues_exit(cio2);
1835 cio2_fbpt_exit_dummy(cio2);
1836 v4l2_device_unregister(&cio2->v4l2_dev);
1837 media_device_cleanup(&cio2->media_dev);
1838 mutex_destroy(&cio2->lock);
1841 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1843 struct pci_dev *pci_dev = to_pci_dev(dev);
1844 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1845 void __iomem *const base = cio2->base;
1848 writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1849 dev_dbg(dev, "cio2 runtime suspend.\n");
1851 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1852 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1853 pm |= CIO2_PMCSR_D3;
1854 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1859 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1861 struct pci_dev *pci_dev = to_pci_dev(dev);
1862 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1863 void __iomem *const base = cio2->base;
1866 writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1867 dev_dbg(dev, "cio2 runtime resume.\n");
1869 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1870 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1871 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1877 * Helper function to advance all the elements of a circular buffer by "start"
1880 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1886 { start, elems - 1 },
1889 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1891 /* Loop as long as we have out-of-place entries */
1892 while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1896 * Find the number of entries that can be arranged on this
1899 size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1901 /* Swap the entries in two parts of the array. */
1902 for (i = 0; i < size0; i++) {
1903 u8 *d = ptr + elem_size * (arr[1].begin + i);
1904 u8 *s = ptr + elem_size * (arr[0].begin + i);
1907 for (j = 0; j < elem_size; j++)
1911 if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1912 /* The end of the first array remains unarranged. */
1913 arr[0].begin += size0;
1916 * The first array is fully arranged so we proceed
1917 * handling the next one.
1919 arr[0].begin = arr[1].begin;
1920 arr[0].end = arr[1].begin + size0 - 1;
1921 arr[1].begin += size0;
1926 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1930 for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1931 i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1935 if (i == CIO2_MAX_BUFFERS)
1939 arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1940 CIO2_MAX_BUFFERS, j);
1941 arrange(q->bufs, sizeof(struct cio2_buffer *),
1942 CIO2_MAX_BUFFERS, j);
1946 * DMA clears the valid bit when accessing the buffer.
1947 * When stopping stream in suspend callback, some of the buffers
1948 * may be in invalid state. After resume, when DMA meets the invalid
1949 * buffer, it will halt and stop receiving new data.
1950 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1952 for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1953 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1956 static int __maybe_unused cio2_suspend(struct device *dev)
1958 struct pci_dev *pci_dev = to_pci_dev(dev);
1959 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1960 struct cio2_queue *q = cio2->cur_queue;
1962 dev_dbg(dev, "cio2 suspend\n");
1963 if (!cio2->streaming)
1967 cio2_hw_exit(cio2, q);
1968 synchronize_irq(pci_dev->irq);
1970 pm_runtime_force_suspend(dev);
1973 * Upon resume, hw starts to process the fbpt entries from beginning,
1974 * so relocate the queued buffs to the fbpt head before suspend.
1976 cio2_fbpt_rearrange(cio2, q);
1983 static int __maybe_unused cio2_resume(struct device *dev)
1985 struct cio2_device *cio2 = dev_get_drvdata(dev);
1987 struct cio2_queue *q = cio2->cur_queue;
1989 dev_dbg(dev, "cio2 resume\n");
1990 if (!cio2->streaming)
1993 r = pm_runtime_force_resume(&cio2->pci_dev->dev);
1995 dev_err(&cio2->pci_dev->dev,
1996 "failed to set power %d\n", r);
2000 r = cio2_hw_init(cio2, q);
2002 dev_err(dev, "fail to init cio2 hw\n");
2007 static const struct dev_pm_ops cio2_pm_ops = {
2008 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2009 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2012 static const struct pci_device_id cio2_pci_id_table[] = {
2013 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2017 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2019 static struct pci_driver cio2_pci_driver = {
2021 .id_table = cio2_pci_id_table,
2022 .probe = cio2_pci_probe,
2023 .remove = cio2_pci_remove,
2029 module_pci_driver(cio2_pci_driver);
2031 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2032 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2033 MODULE_AUTHOR("Jian Xu Zheng");
2034 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2035 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2036 MODULE_LICENSE("GPL v2");
2037 MODULE_DESCRIPTION("IPU3 CIO2 driver");