1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Intel Corporation
5 * Based partially on Intel IPU4 driver written by
6 * Sakari Ailus <sakari.ailus@linux.intel.com>
7 * Samu Onkalo <samu.onkalo@intel.com>
8 * Jouni Högander <jouni.hogander@intel.com>
9 * Jouni Ukkonen <jouni.ukkonen@intel.com>
10 * Antti Laakso <antti.laakso@intel.com>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/pfn.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/property.h>
22 #include <linux/vmalloc.h>
23 #include <media/v4l2-ctrls.h>
24 #include <media/v4l2-device.h>
25 #include <media/v4l2-event.h>
26 #include <media/v4l2-fwnode.h>
27 #include <media/v4l2-ioctl.h>
28 #include <media/videobuf2-dma-sg.h>
30 #include "ipu3-cio2.h"
32 struct ipu3_cio2_fmt {
39 * These are raw formats used in Intel's third generation of
40 * Image Processing Unit known as IPU3.
41 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
42 * last LSB 6 bits unused.
44 static const struct ipu3_cio2_fmt formats[] = {
45 { /* put default entry at beginning */
46 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
47 .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
50 .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
51 .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
54 .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
55 .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
58 .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
59 .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
65 * cio2_find_format - lookup color format by fourcc or/and media bus code
66 * @pixelformat: fourcc to match, ignored if null
67 * @mbus_code: media bus code to match, ignored if null
69 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
74 for (i = 0; i < ARRAY_SIZE(formats); i++) {
75 if (pixelformat && *pixelformat != formats[i].fourcc)
77 if (mbus_code && *mbus_code != formats[i].mbus_code)
86 static inline u32 cio2_bytesperline(const unsigned int width)
89 * 64 bytes for every 50 pixels, the line length
90 * in bytes is multiple of 64 (line end alignment).
92 return DIV_ROUND_UP(width, 50) * 64;
95 /**************** FBPT operations ****************/
97 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
99 if (cio2->dummy_lop) {
100 dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
101 cio2->dummy_lop, cio2->dummy_lop_bus_addr);
102 cio2->dummy_lop = NULL;
104 if (cio2->dummy_page) {
105 dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
106 cio2->dummy_page, cio2->dummy_page_bus_addr);
107 cio2->dummy_page = NULL;
111 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
115 cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
116 &cio2->dummy_page_bus_addr,
118 cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
119 &cio2->dummy_lop_bus_addr,
121 if (!cio2->dummy_page || !cio2->dummy_lop) {
122 cio2_fbpt_exit_dummy(cio2);
126 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
127 * Initialize each entry to dummy_page bus base address.
129 for (i = 0; i < CIO2_LOP_ENTRIES; i++)
130 cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
135 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
136 struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
139 * The CPU first initializes some fields in fbpt, then sets
140 * the VALID bit, this barrier is to ensure that the DMA(device)
141 * does not see the VALID bit enabled before other fields are
142 * initialized; otherwise it could lead to havoc.
147 * Request interrupts for start and completion
148 * Valid bit is applicable only to 1st entry
150 entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
151 CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
154 /* Initialize fpbt entries to point to dummy frame */
155 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
156 struct cio2_fbpt_entry
157 entry[CIO2_MAX_LOPS])
161 entry[0].first_entry.first_page_offset = 0;
162 entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
163 entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
165 for (i = 0; i < CIO2_MAX_LOPS; i++)
166 entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
168 cio2_fbpt_entry_enable(cio2, entry);
171 /* Initialize fpbt entries to point to a given buffer */
172 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
173 struct cio2_buffer *b,
174 struct cio2_fbpt_entry
175 entry[CIO2_MAX_LOPS])
177 struct vb2_buffer *vb = &b->vbb.vb2_buf;
178 unsigned int length = vb->planes[0].length;
181 entry[0].first_entry.first_page_offset = b->offset;
182 remaining = length + entry[0].first_entry.first_page_offset;
183 entry[1].second_entry.num_of_pages = PFN_UP(remaining);
185 * last_page_available_bytes has the offset of the last byte in the
186 * last page which is still accessible by DMA. DMA cannot access
187 * beyond this point. Valid range for this is from 0 to 4095.
188 * 0 indicates 1st byte in the page is DMA accessible.
189 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
190 * is available for DMA transfer.
192 entry[1].second_entry.last_page_available_bytes =
193 (remaining & ~PAGE_MASK) ?
194 (remaining & ~PAGE_MASK) - 1 : PAGE_SIZE - 1;
198 while (remaining > 0) {
199 entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
200 remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
206 * The first not meaningful FBPT entry should point to a valid LOP
208 entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
210 cio2_fbpt_entry_enable(cio2, entry);
213 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
215 struct device *dev = &cio2->pci_dev->dev;
217 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
225 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
227 dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
230 /**************** CSI2 hardware setup ****************/
233 * The CSI2 receiver has several parameters affecting
234 * the receiver timings. These depend on the MIPI bus frequency
235 * F in Hz (sensor transmitter rate) as follows:
236 * register value = (A/1e9 + B * UI) / COUNT_ACC
238 * UI = 1 / (2 * F) in seconds
239 * COUNT_ACC = counter accuracy in seconds
240 * For IPU3 COUNT_ACC = 0.0625
242 * A and B are coefficients from the table below,
243 * depending whether the register minimum or maximum value is
247 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
248 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
250 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
251 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
252 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
253 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
254 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
255 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
256 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
257 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
259 * We use the minimum values of both A and B.
263 * shift for keeping value range suitable for 32-bit integer arithmetic
265 #define LIMIT_SHIFT 8
267 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
269 const u32 accinv = 16; /* invert of counter resolution */
270 const u32 uiinv = 500000000; /* 1e9 / 2 */
273 freq >>= LIMIT_SHIFT;
275 if (WARN_ON(freq <= 0 || freq > S32_MAX))
278 * b could be 0, -2 or -8, so |accinv * b| is always
279 * less than (1 << ds) and thus |r| < 500000000.
281 r = accinv * b * (uiinv >> LIMIT_SHIFT);
283 /* max value of a is 95 */
289 /* Calculate the the delay value for termination enable of clock lane HS Rx */
290 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
291 struct cio2_csi2_timing *timing)
293 struct device *dev = &cio2->pci_dev->dev;
294 struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, };
295 struct v4l2_ctrl *link_freq;
302 link_freq = v4l2_ctrl_find(q->sensor->ctrl_handler, V4L2_CID_LINK_FREQ);
304 dev_err(dev, "failed to find LINK_FREQ\n");
308 qm.index = v4l2_ctrl_g_ctrl(link_freq);
309 r = v4l2_querymenu(q->sensor->ctrl_handler, &qm);
311 dev_err(dev, "failed to get menu item\n");
316 dev_err(dev, "error invalid link_freq\n");
321 timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
322 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
324 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
325 timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
326 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
328 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
329 timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
330 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
332 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
333 timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
334 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
336 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
338 dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
339 dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
340 dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
341 dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
346 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
348 static const int NUM_VCS = 4;
349 static const int SID; /* Stream id */
350 static const int ENTRY;
351 static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
352 CIO2_FBPT_SUBENTRY_UNIT);
353 const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
354 const struct ipu3_cio2_fmt *fmt;
355 void __iomem *const base = cio2->base;
356 u8 lanes, csi2bus = q->csi2.port;
357 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
358 struct cio2_csi2_timing timing;
361 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
365 lanes = q->csi2.lanes;
367 r = cio2_csi2_calc_timing(cio2, q, &timing);
371 writel(timing.clk_termen, q->csi_rx_base +
372 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
373 writel(timing.clk_settle, q->csi_rx_base +
374 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
376 for (i = 0; i < lanes; i++) {
377 writel(timing.dat_termen, q->csi_rx_base +
378 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
379 writel(timing.dat_settle, q->csi_rx_base +
380 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
383 writel(CIO2_PBM_WMCTRL1_MIN_2CK |
384 CIO2_PBM_WMCTRL1_MID1_2CK |
385 CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
386 writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
387 CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
388 CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
389 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
390 CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
391 CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
392 writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
393 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
394 CIO2_PBM_ARB_CTRL_LE_EN |
395 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
396 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
397 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
398 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
399 base + CIO2_REG_PBM_ARB_CTRL);
400 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
401 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
402 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
403 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
405 writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
406 writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
408 /* Configure MIPI backend */
409 for (i = 0; i < NUM_VCS; i++)
410 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
412 /* There are 16 short packet LUT entry */
413 for (i = 0; i < 16; i++)
414 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
415 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
416 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
417 q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
419 writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
420 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
421 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
422 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
423 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
424 writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
426 writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
427 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
428 base + CIO2_REG_INT_EN);
430 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
431 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
432 base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
433 writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
434 sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
435 fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
436 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
437 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
438 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
439 writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
441 writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
442 writel(CIO2_CGC_PRIM_TGE |
446 CIO2_CGC_CSI2_INTERFRAME_TGE |
447 CIO2_CGC_CSI2_PORT_DCGE |
452 CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
453 CIO2_CGC_CSI_CLKGATE_HOLDOFF
454 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
455 writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
456 writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
457 CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
458 CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
459 CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
460 base + CIO2_REG_LTRVAL01);
461 writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
462 CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
463 CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
464 CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
465 base + CIO2_REG_LTRVAL23);
467 for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
468 writel(0, base + CIO2_REG_CDMABA(i));
469 writel(0, base + CIO2_REG_CDMAC0(i));
470 writel(0, base + CIO2_REG_CDMAC1(i));
474 writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
476 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
477 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
478 CIO2_CDMAC0_DMA_INTR_ON_FE |
479 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
481 CIO2_CDMAC0_DMA_INTR_ON_FS |
482 CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
484 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
485 base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
487 writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
489 writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
490 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
491 CIO2_PXM_FRF_CFG_MSK_ECC_RE |
492 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
493 base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
495 /* Clear interrupts */
496 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
497 writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
498 writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
499 writel(~0, base + CIO2_REG_INT_STS);
501 /* Enable devices, starting from the last device in the pipe */
502 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
503 writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
508 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
510 void __iomem *base = cio2->base;
511 unsigned int i, maxloops = 1000;
513 /* Disable CSI receiver and MIPI backend devices */
514 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
515 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
516 writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
517 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
520 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
522 if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) &
523 CIO2_CDMAC0_DMA_HALTED)
525 usleep_range(1000, 2000);
526 } while (--maxloops);
528 dev_err(&cio2->pci_dev->dev,
529 "DMA %i can not be halted\n", CIO2_DMA_CHAN);
531 for (i = 0; i < CIO2_NUM_PORTS; i++) {
532 writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
533 CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
534 writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
535 CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
539 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
541 struct device *dev = &cio2->pci_dev->dev;
542 struct cio2_queue *q = cio2->cur_queue;
543 struct cio2_fbpt_entry *entry;
544 u64 ns = ktime_get_ns();
546 if (dma_chan >= CIO2_QUEUES) {
547 dev_err(dev, "bad DMA channel %i\n", dma_chan);
551 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
552 if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
553 dev_warn(&cio2->pci_dev->dev,
554 "no ready buffers found on DMA channel %u\n",
559 /* Find out which buffer(s) are ready */
561 struct cio2_buffer *b;
563 b = q->bufs[q->bufs_first];
565 unsigned int bytes = entry[1].second_entry.num_of_bytes;
567 q->bufs[q->bufs_first] = NULL;
568 atomic_dec(&q->bufs_queued);
569 dev_dbg(&cio2->pci_dev->dev,
570 "buffer %i done\n", b->vbb.vb2_buf.index);
572 b->vbb.vb2_buf.timestamp = ns;
573 b->vbb.field = V4L2_FIELD_NONE;
574 b->vbb.sequence = atomic_read(&q->frame_sequence);
575 if (b->vbb.vb2_buf.planes[0].length != bytes)
576 dev_warn(dev, "buffer length is %d received %d\n",
577 b->vbb.vb2_buf.planes[0].length,
579 vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
581 atomic_inc(&q->frame_sequence);
582 cio2_fbpt_entry_init_dummy(cio2, entry);
583 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
584 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
585 } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
588 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
591 * For the user space camera control algorithms it is essential
592 * to know when the reception of a frame has begun. That's often
593 * the best timing information to get from the hardware.
595 struct v4l2_event event = {
596 .type = V4L2_EVENT_FRAME_SYNC,
597 .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
600 v4l2_event_queue(q->subdev.devnode, &event);
603 static const char *const cio2_irq_errs[] = {
604 "single packet header error corrected",
605 "multiple packet header errors detected",
606 "payload checksum (CRC) error",
608 "reserved short packet data type detected",
609 "reserved long packet data type detected",
610 "incomplete long packet detected",
613 "DPHY start of transmission error",
614 "DPHY synchronization error",
616 "escape mode trigger event",
617 "escape mode ultra-low power state for data lane(s)",
618 "escape mode ultra-low power state exit for clock lane",
619 "inter-frame short packet discarded",
620 "inter-frame long packet discarded",
621 "non-matching Long Packet stalled",
624 static const char *const cio2_port_errs[] = {
626 "DPHY not recoverable",
627 "ECC not recoverable",
634 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
636 void __iomem *const base = cio2->base;
637 struct device *dev = &cio2->pci_dev->dev;
639 if (int_status & CIO2_INT_IOOE) {
641 * Interrupt on Output Error:
642 * 1) SRAM is full and FS received, or
643 * 2) An invalid bit detected by DMA.
645 u32 oe_status, oe_clear;
647 oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
648 oe_status = oe_clear;
650 if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
651 dev_err(dev, "DMA output error: 0x%x\n",
652 (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
653 >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
654 oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
656 if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
657 dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
658 (oe_status & CIO2_INT_EXT_OE_OES_MASK)
659 >> CIO2_INT_EXT_OE_OES_SHIFT);
660 oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
662 writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
664 dev_warn(dev, "unknown interrupt 0x%x on OE\n",
666 int_status &= ~CIO2_INT_IOOE;
669 if (int_status & CIO2_INT_IOC_MASK) {
670 /* DMA IO done -- frame ready */
674 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
675 if (int_status & CIO2_INT_IOC(d)) {
676 clr |= CIO2_INT_IOC(d);
677 cio2_buffer_done(cio2, d);
682 if (int_status & CIO2_INT_IOS_IOLN_MASK) {
683 /* DMA IO starts or reached specified line */
687 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
688 if (int_status & CIO2_INT_IOS_IOLN(d)) {
689 clr |= CIO2_INT_IOS_IOLN(d);
690 if (d == CIO2_DMA_CHAN)
691 cio2_queue_event_sof(cio2,
697 if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
698 /* CSI2 receiver (error) interrupt */
699 u32 ie_status, ie_clear;
702 ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
703 ie_status = ie_clear;
705 for (port = 0; port < CIO2_NUM_PORTS; port++) {
706 u32 port_status = (ie_status >> (port * 8)) & 0xff;
707 u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
708 void __iomem *const csi_rx_base =
709 base + CIO2_REG_PIPE_BASE(port);
712 while (port_status & err_mask) {
713 i = ffs(port_status) - 1;
714 dev_err(dev, "port %i error %s\n",
715 port, cio2_port_errs[i]);
716 ie_status &= ~BIT(port * 8 + i);
717 port_status &= ~BIT(i);
720 if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
721 u32 csi2_status, csi2_clear;
723 csi2_status = readl(csi_rx_base +
724 CIO2_REG_IRQCTRL_STATUS);
725 csi2_clear = csi2_status;
727 BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
729 while (csi2_status & err_mask) {
730 i = ffs(csi2_status) - 1;
732 "CSI-2 receiver port %i: %s\n",
733 port, cio2_irq_errs[i]);
734 csi2_status &= ~BIT(i);
738 csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
741 "unknown CSI2 error 0x%x on port %i\n",
744 ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
748 writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
750 dev_warn(dev, "unknown interrupt 0x%x on IE\n",
753 int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
757 dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
760 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
762 struct cio2_device *cio2 = cio2_ptr;
763 void __iomem *const base = cio2->base;
764 struct device *dev = &cio2->pci_dev->dev;
767 int_status = readl(base + CIO2_REG_INT_STS);
768 dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
773 writel(int_status, base + CIO2_REG_INT_STS);
774 cio2_irq_handle_once(cio2, int_status);
775 int_status = readl(base + CIO2_REG_INT_STS);
777 dev_dbg(dev, "pending status 0x%x\n", int_status);
778 } while (int_status);
783 /**************** Videobuf2 interface ****************/
785 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
786 enum vb2_buffer_state state)
790 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
792 atomic_dec(&q->bufs_queued);
793 vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
799 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
800 unsigned int *num_buffers,
801 unsigned int *num_planes,
802 unsigned int sizes[],
803 struct device *alloc_devs[])
805 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
806 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
809 *num_planes = q->format.num_planes;
811 for (i = 0; i < *num_planes; ++i) {
812 sizes[i] = q->format.plane_fmt[i].sizeimage;
813 alloc_devs[i] = &cio2->pci_dev->dev;
816 *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
818 /* Initialize buffer queue */
819 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
821 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
823 atomic_set(&q->bufs_queued, 0);
830 /* Called after each buffer is allocated */
831 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
833 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
834 struct device *dev = &cio2->pci_dev->dev;
835 struct cio2_buffer *b =
836 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
837 unsigned int pages = PFN_UP(vb->planes[0].length);
838 unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
840 struct sg_dma_page_iter sg_iter;
843 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
844 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
845 vb->planes[0].length);
846 return -ENOSPC; /* Should never happen */
849 memset(b->lop, 0, sizeof(b->lop));
850 /* Allocate LOP table */
851 for (i = 0; i < lops; i++) {
852 b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
853 &b->lop_bus_addr[i], GFP_KERNEL);
859 sg = vb2_dma_sg_plane_desc(vb, 0);
863 if (sg->nents && sg->sgl)
864 b->offset = sg->sgl->offset;
867 for_each_sg_dma_page (sg->sgl, &sg_iter, sg->nents, 0) {
870 b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
872 if (j == CIO2_LOP_ENTRIES) {
878 b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
882 dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
886 /* Transfer buffer ownership to cio2 */
887 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
889 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
890 struct cio2_queue *q =
891 container_of(vb->vb2_queue, struct cio2_queue, vbq);
892 struct cio2_buffer *b =
893 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
894 struct cio2_fbpt_entry *entry;
896 unsigned int i, j, next = q->bufs_next;
897 int bufs_queued = atomic_inc_return(&q->bufs_queued);
900 dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
903 * This code queues the buffer to the CIO2 DMA engine, which starts
904 * running once streaming has started. It is possible that this code
905 * gets pre-empted due to increased CPU load. Upon this, the driver
906 * does not get an opportunity to queue new buffers to the CIO2 DMA
907 * engine. When the DMA engine encounters an FBPT entry without the
908 * VALID bit set, the DMA engine halts, which requires a restart of
909 * the DMA engine and sensor, to continue streaming.
910 * This is not desired and is highly unlikely given that there are
911 * 32 FBPT entries that the DMA engine needs to process, to run into
912 * an FBPT entry, without the VALID bit set. We try to mitigate this
913 * by disabling interrupts for the duration of this queueing.
915 local_irq_save(flags);
917 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
918 >> CIO2_CDMARI_FBPT_RP_SHIFT)
919 & CIO2_CDMARI_FBPT_RP_MASK;
922 * fbpt_rp is the fbpt entry that the dma is currently working
923 * on, but since it could jump to next entry at any time,
924 * assume that we might already be there.
926 fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
928 if (bufs_queued <= 1 || fbpt_rp == next)
929 /* Buffers were drained */
930 next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
932 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
934 * We have allocated CIO2_MAX_BUFFERS circularly for the
935 * hw, the user has requested N buffer queue. The driver
936 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
937 * user queues a buffer, there necessarily is a free buffer.
939 if (!q->bufs[next]) {
941 entry = &q->fbpt[next * CIO2_MAX_LOPS];
942 cio2_fbpt_entry_init_buf(cio2, b, entry);
943 local_irq_restore(flags);
944 q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
945 for (j = 0; j < vb->num_planes; j++)
946 vb2_set_plane_payload(vb, j,
947 q->format.plane_fmt[j].sizeimage);
951 dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
952 next = (next + 1) % CIO2_MAX_BUFFERS;
955 local_irq_restore(flags);
956 dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
957 atomic_dec(&q->bufs_queued);
958 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
961 /* Called when each buffer is freed */
962 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
964 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
965 struct cio2_buffer *b =
966 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
970 for (i = 0; i < CIO2_MAX_LOPS; i++) {
972 dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
973 b->lop[i], b->lop_bus_addr[i]);
977 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
979 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
980 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
984 atomic_set(&q->frame_sequence, 0);
986 r = pm_runtime_get_sync(&cio2->pci_dev->dev);
988 dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
989 pm_runtime_put_noidle(&cio2->pci_dev->dev);
993 r = media_pipeline_start(&q->vdev.entity, &q->pipe);
997 r = cio2_hw_init(cio2, q);
1001 /* Start streaming on sensor */
1002 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1004 goto fail_csi2_subdev;
1006 cio2->streaming = true;
1011 cio2_hw_exit(cio2, q);
1013 media_pipeline_stop(&q->vdev.entity);
1015 dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
1016 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1017 pm_runtime_put(&cio2->pci_dev->dev);
1022 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1024 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1025 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1027 if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1028 dev_err(&cio2->pci_dev->dev,
1029 "failed to stop sensor streaming\n");
1031 cio2_hw_exit(cio2, q);
1032 synchronize_irq(cio2->pci_dev->irq);
1033 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1034 media_pipeline_stop(&q->vdev.entity);
1035 pm_runtime_put(&cio2->pci_dev->dev);
1036 cio2->streaming = false;
1039 static const struct vb2_ops cio2_vb2_ops = {
1040 .buf_init = cio2_vb2_buf_init,
1041 .buf_queue = cio2_vb2_buf_queue,
1042 .buf_cleanup = cio2_vb2_buf_cleanup,
1043 .queue_setup = cio2_vb2_queue_setup,
1044 .start_streaming = cio2_vb2_start_streaming,
1045 .stop_streaming = cio2_vb2_stop_streaming,
1046 .wait_prepare = vb2_ops_wait_prepare,
1047 .wait_finish = vb2_ops_wait_finish,
1050 /**************** V4L2 interface ****************/
1052 static int cio2_v4l2_querycap(struct file *file, void *fh,
1053 struct v4l2_capability *cap)
1055 struct cio2_device *cio2 = video_drvdata(file);
1057 strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1058 strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1059 snprintf(cap->bus_info, sizeof(cap->bus_info),
1060 "PCI:%s", pci_name(cio2->pci_dev));
1065 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1066 struct v4l2_fmtdesc *f)
1068 if (f->index >= ARRAY_SIZE(formats))
1071 f->pixelformat = formats[f->index].fourcc;
1076 /* The format is validated in cio2_video_link_validate() */
1077 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1079 struct cio2_queue *q = file_to_cio2_queue(file);
1081 f->fmt.pix_mp = q->format;
1086 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1088 const struct ipu3_cio2_fmt *fmt;
1089 struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1091 fmt = cio2_find_format(&mpix->pixelformat, NULL);
1095 /* Only supports up to 4224x3136 */
1096 if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1097 mpix->width = CIO2_IMAGE_MAX_WIDTH;
1098 if (mpix->height > CIO2_IMAGE_MAX_LENGTH)
1099 mpix->height = CIO2_IMAGE_MAX_LENGTH;
1101 mpix->num_planes = 1;
1102 mpix->pixelformat = fmt->fourcc;
1103 mpix->colorspace = V4L2_COLORSPACE_RAW;
1104 mpix->field = V4L2_FIELD_NONE;
1105 memset(mpix->reserved, 0, sizeof(mpix->reserved));
1106 mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1107 mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1109 memset(mpix->plane_fmt[0].reserved, 0,
1110 sizeof(mpix->plane_fmt[0].reserved));
1113 mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1114 mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1115 mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1120 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1122 struct cio2_queue *q = file_to_cio2_queue(file);
1124 cio2_v4l2_try_fmt(file, fh, f);
1125 q->format = f->fmt.pix_mp;
1131 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1133 if (input->index > 0)
1136 strscpy(input->name, "camera", sizeof(input->name));
1137 input->type = V4L2_INPUT_TYPE_CAMERA;
1143 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1151 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1153 return input == 0 ? 0 : -EINVAL;
1156 static const struct v4l2_file_operations cio2_v4l2_fops = {
1157 .owner = THIS_MODULE,
1158 .unlocked_ioctl = video_ioctl2,
1159 .open = v4l2_fh_open,
1160 .release = vb2_fop_release,
1161 .poll = vb2_fop_poll,
1162 .mmap = vb2_fop_mmap,
1165 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1166 .vidioc_querycap = cio2_v4l2_querycap,
1167 .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1168 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1169 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1170 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1171 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1172 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1173 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1174 .vidioc_querybuf = vb2_ioctl_querybuf,
1175 .vidioc_qbuf = vb2_ioctl_qbuf,
1176 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1177 .vidioc_streamon = vb2_ioctl_streamon,
1178 .vidioc_streamoff = vb2_ioctl_streamoff,
1179 .vidioc_expbuf = vb2_ioctl_expbuf,
1180 .vidioc_enum_input = cio2_video_enum_input,
1181 .vidioc_g_input = cio2_video_g_input,
1182 .vidioc_s_input = cio2_video_s_input,
1185 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1187 struct v4l2_event_subscription *sub)
1189 if (sub->type != V4L2_EVENT_FRAME_SYNC)
1192 /* Line number. For now only zero accepted. */
1196 return v4l2_event_subscribe(fh, sub, 0, NULL);
1199 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1201 struct v4l2_mbus_framefmt *format;
1202 const struct v4l2_mbus_framefmt fmt_default = {
1205 .code = formats[0].mbus_code,
1206 .field = V4L2_FIELD_NONE,
1207 .colorspace = V4L2_COLORSPACE_RAW,
1208 .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1209 .quantization = V4L2_QUANTIZATION_DEFAULT,
1210 .xfer_func = V4L2_XFER_FUNC_DEFAULT,
1213 /* Initialize try_fmt */
1214 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK);
1215 *format = fmt_default;
1218 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE);
1219 *format = fmt_default;
1225 * cio2_subdev_get_fmt - Handle get format by pads subdev method
1226 * @sd : pointer to v4l2 subdev structure
1227 * @cfg: V4L2 subdev pad config
1228 * @fmt: pointer to v4l2 subdev format structure
1229 * return -EINVAL or zero on success
1231 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1232 struct v4l2_subdev_pad_config *cfg,
1233 struct v4l2_subdev_format *fmt)
1235 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1236 struct v4l2_subdev_format format;
1239 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
1240 fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
1244 if (fmt->pad == CIO2_PAD_SINK) {
1245 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1246 ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
1251 /* update colorspace etc */
1252 q->subdev_fmt.colorspace = format.format.colorspace;
1253 q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc;
1254 q->subdev_fmt.quantization = format.format.quantization;
1255 q->subdev_fmt.xfer_func = format.format.xfer_func;
1258 fmt->format = q->subdev_fmt;
1264 * cio2_subdev_set_fmt - Handle set format by pads subdev method
1265 * @sd : pointer to v4l2 subdev structure
1266 * @cfg: V4L2 subdev pad config
1267 * @fmt: pointer to v4l2 subdev format structure
1268 * return -EINVAL or zero on success
1270 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1271 struct v4l2_subdev_pad_config *cfg,
1272 struct v4l2_subdev_format *fmt)
1274 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1277 * Only allow setting sink pad format;
1278 * source always propagates from sink
1280 if (fmt->pad == CIO2_PAD_SOURCE)
1281 return cio2_subdev_get_fmt(sd, cfg, fmt);
1283 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
1284 *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
1286 /* It's the sink, allow changing frame size */
1287 q->subdev_fmt.width = fmt->format.width;
1288 q->subdev_fmt.height = fmt->format.height;
1289 q->subdev_fmt.code = fmt->format.code;
1290 fmt->format = q->subdev_fmt;
1296 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1297 struct v4l2_subdev_pad_config *cfg,
1298 struct v4l2_subdev_mbus_code_enum *code)
1300 if (code->index >= ARRAY_SIZE(formats))
1303 code->code = formats[code->index].mbus_code;
1307 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1308 struct v4l2_subdev_format *fmt)
1310 if (is_media_entity_v4l2_subdev(pad->entity)) {
1311 struct v4l2_subdev *sd =
1312 media_entity_to_v4l2_subdev(pad->entity);
1314 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1315 fmt->pad = pad->index;
1316 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1322 static int cio2_video_link_validate(struct media_link *link)
1324 struct video_device *vd = container_of(link->sink->entity,
1325 struct video_device, entity);
1326 struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1327 struct cio2_device *cio2 = video_get_drvdata(vd);
1328 struct v4l2_subdev_format source_fmt;
1331 if (!media_entity_remote_pad(link->sink->entity->pads)) {
1332 dev_info(&cio2->pci_dev->dev,
1333 "video node %s pad not connected\n", vd->name);
1337 ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1341 if (source_fmt.format.width != q->format.width ||
1342 source_fmt.format.height != q->format.height) {
1343 dev_err(&cio2->pci_dev->dev,
1344 "Wrong width or height %ux%u (%ux%u expected)\n",
1345 q->format.width, q->format.height,
1346 source_fmt.format.width, source_fmt.format.height);
1350 if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1356 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1357 .subscribe_event = cio2_subdev_subscribe_event,
1358 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
1361 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1362 .open = cio2_subdev_open,
1365 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1366 .link_validate = v4l2_subdev_link_validate_default,
1367 .get_fmt = cio2_subdev_get_fmt,
1368 .set_fmt = cio2_subdev_set_fmt,
1369 .enum_mbus_code = cio2_subdev_enum_mbus_code,
1372 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1373 .core = &cio2_subdev_core_ops,
1374 .pad = &cio2_subdev_pad_ops,
1377 /******* V4L2 sub-device asynchronous registration callbacks***********/
1379 struct sensor_async_subdev {
1380 struct v4l2_async_subdev asd;
1381 struct csi2_bus_info csi2;
1384 /* The .bound() notifier callback when a match is found */
1385 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1386 struct v4l2_subdev *sd,
1387 struct v4l2_async_subdev *asd)
1389 struct cio2_device *cio2 = container_of(notifier,
1390 struct cio2_device, notifier);
1391 struct sensor_async_subdev *s_asd = container_of(asd,
1392 struct sensor_async_subdev, asd);
1393 struct cio2_queue *q;
1395 if (cio2->queue[s_asd->csi2.port].sensor)
1398 q = &cio2->queue[s_asd->csi2.port];
1400 q->csi2 = s_asd->csi2;
1402 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1407 /* The .unbind callback */
1408 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1409 struct v4l2_subdev *sd,
1410 struct v4l2_async_subdev *asd)
1412 struct cio2_device *cio2 = container_of(notifier,
1413 struct cio2_device, notifier);
1414 struct sensor_async_subdev *s_asd = container_of(asd,
1415 struct sensor_async_subdev, asd);
1417 cio2->queue[s_asd->csi2.port].sensor = NULL;
1420 /* .complete() is called after all subdevices have been located */
1421 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1423 struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
1425 struct sensor_async_subdev *s_asd;
1426 struct v4l2_async_subdev *asd;
1427 struct cio2_queue *q;
1431 list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
1432 s_asd = container_of(asd, struct sensor_async_subdev, asd);
1433 q = &cio2->queue[s_asd->csi2.port];
1435 for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
1436 if (q->sensor->entity.pads[pad].flags &
1437 MEDIA_PAD_FL_SOURCE)
1440 if (pad == q->sensor->entity.num_pads) {
1441 dev_err(&cio2->pci_dev->dev,
1442 "failed to find src pad for %s\n",
1447 ret = media_create_pad_link(
1448 &q->sensor->entity, pad,
1449 &q->subdev.entity, CIO2_PAD_SINK,
1452 dev_err(&cio2->pci_dev->dev,
1453 "failed to create link for %s\n",
1459 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1462 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1463 .bound = cio2_notifier_bound,
1464 .unbind = cio2_notifier_unbind,
1465 .complete = cio2_notifier_complete,
1468 static int cio2_parse_firmware(struct cio2_device *cio2)
1473 for (i = 0; i < CIO2_NUM_PORTS; i++) {
1474 struct v4l2_fwnode_endpoint vep = {
1475 .bus_type = V4L2_MBUS_CSI2_DPHY
1477 struct sensor_async_subdev *s_asd = NULL;
1478 struct fwnode_handle *ep;
1480 ep = fwnode_graph_get_endpoint_by_id(
1481 dev_fwnode(&cio2->pci_dev->dev), i, 0,
1482 FWNODE_GRAPH_ENDPOINT_NEXT);
1487 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1491 s_asd = kzalloc(sizeof(*s_asd), GFP_KERNEL);
1497 s_asd->csi2.port = vep.base.port;
1498 s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1500 ret = v4l2_async_notifier_add_fwnode_remote_subdev(
1501 &cio2->notifier, ep, &s_asd->asd);
1505 fwnode_handle_put(ep);
1510 fwnode_handle_put(ep);
1516 * Proceed even without sensors connected to allow the device to
1519 cio2->notifier.ops = &cio2_async_ops;
1520 ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
1522 dev_err(&cio2->pci_dev->dev,
1523 "failed to register async notifier : %d\n", ret);
1528 /**************** Queue initialization ****************/
1529 static const struct media_entity_operations cio2_media_ops = {
1530 .link_validate = v4l2_subdev_link_validate,
1533 static const struct media_entity_operations cio2_video_entity_ops = {
1534 .link_validate = cio2_video_link_validate,
1537 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1539 static const u32 default_width = 1936;
1540 static const u32 default_height = 1096;
1541 const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1543 struct video_device *vdev = &q->vdev;
1544 struct vb2_queue *vbq = &q->vbq;
1545 struct v4l2_subdev *subdev = &q->subdev;
1546 struct v4l2_mbus_framefmt *fmt;
1549 /* Initialize miscellaneous variables */
1550 mutex_init(&q->lock);
1552 /* Initialize formats to default values */
1553 fmt = &q->subdev_fmt;
1554 fmt->width = default_width;
1555 fmt->height = default_height;
1556 fmt->code = dflt_fmt.mbus_code;
1557 fmt->field = V4L2_FIELD_NONE;
1559 q->format.width = default_width;
1560 q->format.height = default_height;
1561 q->format.pixelformat = dflt_fmt.fourcc;
1562 q->format.colorspace = V4L2_COLORSPACE_RAW;
1563 q->format.field = V4L2_FIELD_NONE;
1564 q->format.num_planes = 1;
1565 q->format.plane_fmt[0].bytesperline =
1566 cio2_bytesperline(q->format.width);
1567 q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1570 /* Initialize fbpt */
1571 r = cio2_fbpt_init(cio2, q);
1575 /* Initialize media entities */
1576 q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1577 MEDIA_PAD_FL_MUST_CONNECT;
1578 q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1579 subdev->entity.ops = &cio2_media_ops;
1580 subdev->internal_ops = &cio2_subdev_internal_ops;
1581 r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1583 dev_err(&cio2->pci_dev->dev,
1584 "failed initialize subdev media entity (%d)\n", r);
1585 goto fail_subdev_media_entity;
1588 q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1589 vdev->entity.ops = &cio2_video_entity_ops;
1590 r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1592 dev_err(&cio2->pci_dev->dev,
1593 "failed initialize videodev media entity (%d)\n", r);
1594 goto fail_vdev_media_entity;
1597 /* Initialize subdev */
1598 v4l2_subdev_init(subdev, &cio2_subdev_ops);
1599 subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1600 subdev->owner = THIS_MODULE;
1601 snprintf(subdev->name, sizeof(subdev->name),
1602 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1603 subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1604 v4l2_set_subdevdata(subdev, cio2);
1605 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1607 dev_err(&cio2->pci_dev->dev,
1608 "failed initialize subdev (%d)\n", r);
1612 /* Initialize vbq */
1613 vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1614 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1615 vbq->ops = &cio2_vb2_ops;
1616 vbq->mem_ops = &vb2_dma_sg_memops;
1617 vbq->buf_struct_size = sizeof(struct cio2_buffer);
1618 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1619 vbq->min_buffers_needed = 1;
1620 vbq->drv_priv = cio2;
1621 vbq->lock = &q->lock;
1622 r = vb2_queue_init(vbq);
1624 dev_err(&cio2->pci_dev->dev,
1625 "failed to initialize videobuf2 queue (%d)\n", r);
1629 /* Initialize vdev */
1630 snprintf(vdev->name, sizeof(vdev->name),
1631 "%s %td", CIO2_NAME, q - cio2->queue);
1632 vdev->release = video_device_release_empty;
1633 vdev->fops = &cio2_v4l2_fops;
1634 vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1635 vdev->lock = &cio2->lock;
1636 vdev->v4l2_dev = &cio2->v4l2_dev;
1637 vdev->queue = &q->vbq;
1638 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1639 video_set_drvdata(vdev, cio2);
1640 r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1642 dev_err(&cio2->pci_dev->dev,
1643 "failed to register video device (%d)\n", r);
1647 /* Create link from CIO2 subdev to output node */
1648 r = media_create_pad_link(
1649 &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1650 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1657 vb2_video_unregister_device(&q->vdev);
1659 v4l2_device_unregister_subdev(subdev);
1661 media_entity_cleanup(&vdev->entity);
1662 fail_vdev_media_entity:
1663 media_entity_cleanup(&subdev->entity);
1664 fail_subdev_media_entity:
1665 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1667 mutex_destroy(&q->lock);
1672 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1674 vb2_video_unregister_device(&q->vdev);
1675 media_entity_cleanup(&q->vdev.entity);
1676 v4l2_device_unregister_subdev(&q->subdev);
1677 media_entity_cleanup(&q->subdev.entity);
1678 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1679 mutex_destroy(&q->lock);
1682 static int cio2_queues_init(struct cio2_device *cio2)
1686 for (i = 0; i < CIO2_QUEUES; i++) {
1687 r = cio2_queue_init(cio2, &cio2->queue[i]);
1692 if (i == CIO2_QUEUES)
1695 for (i--; i >= 0; i--)
1696 cio2_queue_exit(cio2, &cio2->queue[i]);
1701 static void cio2_queues_exit(struct cio2_device *cio2)
1705 for (i = 0; i < CIO2_QUEUES; i++)
1706 cio2_queue_exit(cio2, &cio2->queue[i]);
1709 /**************** PCI interface ****************/
1711 static int cio2_pci_config_setup(struct pci_dev *dev)
1714 int r = pci_enable_msi(dev);
1717 dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
1721 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1722 pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
1723 PCI_COMMAND_INTX_DISABLE;
1724 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1729 static int cio2_pci_probe(struct pci_dev *pci_dev,
1730 const struct pci_device_id *id)
1732 struct cio2_device *cio2;
1733 void __iomem *const *iomap;
1736 cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
1739 cio2->pci_dev = pci_dev;
1741 r = pcim_enable_device(pci_dev);
1743 dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
1747 dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
1748 pci_dev->device, pci_dev->revision);
1750 r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1752 dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
1756 iomap = pcim_iomap_table(pci_dev);
1758 dev_err(&pci_dev->dev, "failed to iomap table\n");
1762 cio2->base = iomap[CIO2_PCI_BAR];
1764 pci_set_drvdata(pci_dev, cio2);
1766 pci_set_master(pci_dev);
1768 r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
1770 dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
1774 r = cio2_pci_config_setup(pci_dev);
1778 r = cio2_fbpt_init_dummy(cio2);
1782 mutex_init(&cio2->lock);
1784 cio2->media_dev.dev = &cio2->pci_dev->dev;
1785 strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1786 sizeof(cio2->media_dev.model));
1787 snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
1788 "PCI:%s", pci_name(cio2->pci_dev));
1789 cio2->media_dev.hw_revision = 0;
1791 media_device_init(&cio2->media_dev);
1792 r = media_device_register(&cio2->media_dev);
1794 goto fail_mutex_destroy;
1796 cio2->v4l2_dev.mdev = &cio2->media_dev;
1797 r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
1799 dev_err(&pci_dev->dev,
1800 "failed to register V4L2 device (%d)\n", r);
1801 goto fail_media_device_unregister;
1804 r = cio2_queues_init(cio2);
1806 goto fail_v4l2_device_unregister;
1808 v4l2_async_notifier_init(&cio2->notifier);
1810 /* Register notifier for subdevices we care */
1811 r = cio2_parse_firmware(cio2);
1813 goto fail_clean_notifier;
1815 r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
1816 IRQF_SHARED, CIO2_NAME, cio2);
1818 dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
1819 goto fail_clean_notifier;
1822 pm_runtime_put_noidle(&pci_dev->dev);
1823 pm_runtime_allow(&pci_dev->dev);
1827 fail_clean_notifier:
1828 v4l2_async_notifier_unregister(&cio2->notifier);
1829 v4l2_async_notifier_cleanup(&cio2->notifier);
1830 cio2_queues_exit(cio2);
1831 fail_v4l2_device_unregister:
1832 v4l2_device_unregister(&cio2->v4l2_dev);
1833 fail_media_device_unregister:
1834 media_device_unregister(&cio2->media_dev);
1835 media_device_cleanup(&cio2->media_dev);
1837 mutex_destroy(&cio2->lock);
1838 cio2_fbpt_exit_dummy(cio2);
1843 static void cio2_pci_remove(struct pci_dev *pci_dev)
1845 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1847 media_device_unregister(&cio2->media_dev);
1848 v4l2_async_notifier_unregister(&cio2->notifier);
1849 v4l2_async_notifier_cleanup(&cio2->notifier);
1850 cio2_queues_exit(cio2);
1851 cio2_fbpt_exit_dummy(cio2);
1852 v4l2_device_unregister(&cio2->v4l2_dev);
1853 media_device_cleanup(&cio2->media_dev);
1854 mutex_destroy(&cio2->lock);
1857 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1859 struct pci_dev *pci_dev = to_pci_dev(dev);
1860 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1861 void __iomem *const base = cio2->base;
1864 writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1865 dev_dbg(dev, "cio2 runtime suspend.\n");
1867 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1868 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1869 pm |= CIO2_PMCSR_D3;
1870 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1875 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1877 struct pci_dev *pci_dev = to_pci_dev(dev);
1878 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1879 void __iomem *const base = cio2->base;
1882 writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1883 dev_dbg(dev, "cio2 runtime resume.\n");
1885 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1886 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1887 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1893 * Helper function to advance all the elements of a circular buffer by "start"
1896 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1902 { start, elems - 1 },
1905 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1907 /* Loop as long as we have out-of-place entries */
1908 while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1912 * Find the number of entries that can be arranged on this
1915 size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1917 /* Swap the entries in two parts of the array. */
1918 for (i = 0; i < size0; i++) {
1919 u8 *d = ptr + elem_size * (arr[1].begin + i);
1920 u8 *s = ptr + elem_size * (arr[0].begin + i);
1923 for (j = 0; j < elem_size; j++)
1927 if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1928 /* The end of the first array remains unarranged. */
1929 arr[0].begin += size0;
1932 * The first array is fully arranged so we proceed
1933 * handling the next one.
1935 arr[0].begin = arr[1].begin;
1936 arr[0].end = arr[1].begin + size0 - 1;
1937 arr[1].begin += size0;
1942 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1946 for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1947 i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1951 if (i == CIO2_MAX_BUFFERS)
1955 arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1956 CIO2_MAX_BUFFERS, j);
1957 arrange(q->bufs, sizeof(struct cio2_buffer *),
1958 CIO2_MAX_BUFFERS, j);
1962 * DMA clears the valid bit when accessing the buffer.
1963 * When stopping stream in suspend callback, some of the buffers
1964 * may be in invalid state. After resume, when DMA meets the invalid
1965 * buffer, it will halt and stop receiving new data.
1966 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1968 for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1969 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1972 static int __maybe_unused cio2_suspend(struct device *dev)
1974 struct pci_dev *pci_dev = to_pci_dev(dev);
1975 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1976 struct cio2_queue *q = cio2->cur_queue;
1978 dev_dbg(dev, "cio2 suspend\n");
1979 if (!cio2->streaming)
1983 cio2_hw_exit(cio2, q);
1984 synchronize_irq(pci_dev->irq);
1986 pm_runtime_force_suspend(dev);
1989 * Upon resume, hw starts to process the fbpt entries from beginning,
1990 * so relocate the queued buffs to the fbpt head before suspend.
1992 cio2_fbpt_rearrange(cio2, q);
1999 static int __maybe_unused cio2_resume(struct device *dev)
2001 struct cio2_device *cio2 = dev_get_drvdata(dev);
2003 struct cio2_queue *q = cio2->cur_queue;
2005 dev_dbg(dev, "cio2 resume\n");
2006 if (!cio2->streaming)
2009 r = pm_runtime_force_resume(&cio2->pci_dev->dev);
2011 dev_err(&cio2->pci_dev->dev,
2012 "failed to set power %d\n", r);
2016 r = cio2_hw_init(cio2, q);
2018 dev_err(dev, "fail to init cio2 hw\n");
2023 static const struct dev_pm_ops cio2_pm_ops = {
2024 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2025 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2028 static const struct pci_device_id cio2_pci_id_table[] = {
2029 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2033 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2035 static struct pci_driver cio2_pci_driver = {
2037 .id_table = cio2_pci_id_table,
2038 .probe = cio2_pci_probe,
2039 .remove = cio2_pci_remove,
2045 module_pci_driver(cio2_pci_driver);
2047 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2048 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2049 MODULE_AUTHOR("Jian Xu Zheng");
2050 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2051 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2052 MODULE_LICENSE("GPL v2");
2053 MODULE_DESCRIPTION("IPU3 CIO2 driver");