1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2017,2020 Intel Corporation
4 *
5 * Based partially on Intel IPU4 driver written by
6 * Sakari Ailus <sakari.ailus@linux.intel.com>
7 * Samu Onkalo <samu.onkalo@intel.com>
8 * Jouni Högander <jouni.hogander@intel.com>
9 * Jouni Ukkonen <jouni.ukkonen@intel.com>
10 * Antti Laakso <antti.laakso@intel.com>
11 * et al.
12 */
13
14 #include <linux/bitops.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/pfn.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/property.h>
24 #include <linux/vmalloc.h>
25
26 #include <media/ipu-bridge.h>
27 #include <media/v4l2-ctrls.h>
28 #include <media/v4l2-device.h>
29 #include <media/v4l2-event.h>
30 #include <media/v4l2-fwnode.h>
31 #include <media/v4l2-ioctl.h>
32 #include <media/videobuf2-dma-sg.h>
33
34 #include "ipu3-cio2.h"
35
36 struct ipu3_cio2_fmt {
37 u32 mbus_code;
38 u32 fourcc;
39 u8 mipicode;
40 u8 bpp;
41 };
42
43 /*
44 * These are raw formats used in Intel's third generation of
45 * Image Processing Unit known as IPU3.
46 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
47 * last LSB 6 bits unused.
48 */
49 static const struct ipu3_cio2_fmt formats[] = {
50 { /* put default entry at beginning */
51 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
52 .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
53 .mipicode = 0x2b,
54 .bpp = 10,
55 }, {
56 .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
57 .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
58 .mipicode = 0x2b,
59 .bpp = 10,
60 }, {
61 .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
62 .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
63 .mipicode = 0x2b,
64 .bpp = 10,
65 }, {
66 .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
67 .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
68 .mipicode = 0x2b,
69 .bpp = 10,
70 }, {
71 .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
72 .fourcc = V4L2_PIX_FMT_IPU3_Y10,
73 .mipicode = 0x2b,
74 .bpp = 10,
75 },
76 };
77
78 /*
79 * cio2_find_format - lookup color format by fourcc or/and media bus code
80 * @pixelformat: fourcc to match, ignored if null
81 * @mbus_code: media bus code to match, ignored if null
82 */
cio2_find_format(const u32 * pixelformat,const u32 * mbus_code)83 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
84 const u32 *mbus_code)
85 {
86 unsigned int i;
87
88 for (i = 0; i < ARRAY_SIZE(formats); i++) {
89 if (pixelformat && *pixelformat != formats[i].fourcc)
90 continue;
91 if (mbus_code && *mbus_code != formats[i].mbus_code)
92 continue;
93
94 return &formats[i];
95 }
96
97 return NULL;
98 }
99
cio2_bytesperline(const unsigned int width)100 static inline u32 cio2_bytesperline(const unsigned int width)
101 {
102 /*
103 * 64 bytes for every 50 pixels, the line length
104 * in bytes is multiple of 64 (line end alignment).
105 */
106 return DIV_ROUND_UP(width, 50) * 64;
107 }
108
109 /**************** FBPT operations ****************/
110
cio2_fbpt_exit_dummy(struct cio2_device * cio2)111 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
112 {
113 struct device *dev = &cio2->pci_dev->dev;
114
115 if (cio2->dummy_lop) {
116 dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
117 cio2->dummy_lop_bus_addr);
118 cio2->dummy_lop = NULL;
119 }
120 if (cio2->dummy_page) {
121 dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
122 cio2->dummy_page_bus_addr);
123 cio2->dummy_page = NULL;
124 }
125 }
126
cio2_fbpt_init_dummy(struct cio2_device * cio2)127 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
128 {
129 struct device *dev = &cio2->pci_dev->dev;
130 unsigned int i;
131
132 cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
133 &cio2->dummy_page_bus_addr,
134 GFP_KERNEL);
135 cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
136 &cio2->dummy_lop_bus_addr,
137 GFP_KERNEL);
138 if (!cio2->dummy_page || !cio2->dummy_lop) {
139 cio2_fbpt_exit_dummy(cio2);
140 return -ENOMEM;
141 }
142 /*
143 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
144 * Initialize each entry to dummy_page bus base address.
145 */
146 for (i = 0; i < CIO2_LOP_ENTRIES; i++)
147 cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
148
149 return 0;
150 }
151
cio2_fbpt_entry_enable(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])152 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
153 struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
154 {
155 /*
156 * The CPU first initializes some fields in fbpt, then sets
157 * the VALID bit, this barrier is to ensure that the DMA(device)
158 * does not see the VALID bit enabled before other fields are
159 * initialized; otherwise it could lead to havoc.
160 */
161 dma_wmb();
162
163 /*
164 * Request interrupts for start and completion
165 * Valid bit is applicable only to 1st entry
166 */
167 entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
168 CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
169 }
170
171 /* Initialize fpbt entries to point to dummy frame */
cio2_fbpt_entry_init_dummy(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])172 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
173 struct cio2_fbpt_entry
174 entry[CIO2_MAX_LOPS])
175 {
176 unsigned int i;
177
178 entry[0].first_entry.first_page_offset = 0;
179 entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
180 entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
181
182 for (i = 0; i < CIO2_MAX_LOPS; i++)
183 entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
184
185 cio2_fbpt_entry_enable(cio2, entry);
186 }
187
188 /* Initialize fpbt entries to point to a given buffer */
cio2_fbpt_entry_init_buf(struct cio2_device * cio2,struct cio2_buffer * b,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])189 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
190 struct cio2_buffer *b,
191 struct cio2_fbpt_entry
192 entry[CIO2_MAX_LOPS])
193 {
194 struct vb2_buffer *vb = &b->vbb.vb2_buf;
195 unsigned int length = vb->planes[0].length;
196 int remaining, i;
197
198 entry[0].first_entry.first_page_offset = b->offset;
199 remaining = length + entry[0].first_entry.first_page_offset;
200 entry[1].second_entry.num_of_pages = PFN_UP(remaining);
201 /*
202 * last_page_available_bytes has the offset of the last byte in the
203 * last page which is still accessible by DMA. DMA cannot access
204 * beyond this point. Valid range for this is from 0 to 4095.
205 * 0 indicates 1st byte in the page is DMA accessible.
206 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
207 * is available for DMA transfer.
208 */
209 remaining = offset_in_page(remaining) ?: PAGE_SIZE;
210 entry[1].second_entry.last_page_available_bytes = remaining - 1;
211 /* Fill FBPT */
212 remaining = length;
213 i = 0;
214 while (remaining > 0) {
215 entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
216 remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
217 entry++;
218 i++;
219 }
220
221 /*
222 * The first not meaningful FBPT entry should point to a valid LOP
223 */
224 entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
225
226 cio2_fbpt_entry_enable(cio2, entry);
227 }
228
cio2_fbpt_init(struct cio2_device * cio2,struct cio2_queue * q)229 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
230 {
231 struct device *dev = &cio2->pci_dev->dev;
232
233 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
234 GFP_KERNEL);
235 if (!q->fbpt)
236 return -ENOMEM;
237
238 return 0;
239 }
240
cio2_fbpt_exit(struct cio2_queue * q,struct device * dev)241 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
242 {
243 dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
244 }
245
246 /**************** CSI2 hardware setup ****************/
247
248 /*
249 * The CSI2 receiver has several parameters affecting
250 * the receiver timings. These depend on the MIPI bus frequency
251 * F in Hz (sensor transmitter rate) as follows:
252 * register value = (A/1e9 + B * UI) / COUNT_ACC
253 * where
254 * UI = 1 / (2 * F) in seconds
255 * COUNT_ACC = counter accuracy in seconds
256 * For IPU3 COUNT_ACC = 0.0625
257 *
258 * A and B are coefficients from the table below,
259 * depending whether the register minimum or maximum value is
260 * calculated.
261 * Minimum Maximum
262 * Clock lane A B A B
263 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
264 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
265 * Data lanes
266 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
267 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
268 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
269 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
270 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
271 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
272 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
273 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
274 *
275 * We use the minimum values of both A and B.
276 */
277
278 /*
279 * shift for keeping value range suitable for 32-bit integer arithmetic
280 */
281 #define LIMIT_SHIFT 8
282
cio2_rx_timing(s32 a,s32 b,s64 freq,int def)283 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
284 {
285 const u32 accinv = 16; /* invert of counter resolution */
286 const u32 uiinv = 500000000; /* 1e9 / 2 */
287 s32 r;
288
289 freq >>= LIMIT_SHIFT;
290
291 if (WARN_ON(freq <= 0 || freq > S32_MAX))
292 return def;
293 /*
294 * b could be 0, -2 or -8, so |accinv * b| is always
295 * less than (1 << ds) and thus |r| < 500000000.
296 */
297 r = accinv * b * (uiinv >> LIMIT_SHIFT);
298 r = r / (s32)freq;
299 /* max value of a is 95 */
300 r += accinv * a;
301
302 return r;
303 };
304
305 /* Calculate the delay value for termination enable of clock lane HS Rx */
cio2_csi2_calc_timing(struct cio2_device * cio2,struct cio2_queue * q,struct cio2_csi2_timing * timing,unsigned int bpp,unsigned int lanes)306 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
307 struct cio2_csi2_timing *timing,
308 unsigned int bpp, unsigned int lanes)
309 {
310 struct device *dev = &cio2->pci_dev->dev;
311 s64 freq;
312
313 if (!q->sensor)
314 return -ENODEV;
315
316 freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
317 if (freq < 0) {
318 dev_err(dev, "error %lld, invalid link_freq\n", freq);
319 return freq;
320 }
321
322 timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
323 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
324 freq,
325 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
326 timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
327 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
328 freq,
329 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
330 timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
331 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
332 freq,
333 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
334 timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
335 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
336 freq,
337 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
338
339 dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
340 dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
341 dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
342 dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
343
344 return 0;
345 };
346
cio2_hw_init(struct cio2_device * cio2,struct cio2_queue * q)347 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
348 {
349 static const int NUM_VCS = 4;
350 static const int SID; /* Stream id */
351 static const int ENTRY;
352 static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
353 CIO2_FBPT_SUBENTRY_UNIT);
354 const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
355 const struct ipu3_cio2_fmt *fmt;
356 void __iomem *const base = cio2->base;
357 u8 lanes, csi2bus = q->csi2.port;
358 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
359 struct cio2_csi2_timing timing = { 0 };
360 int i, r;
361
362 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
363 if (!fmt)
364 return -EINVAL;
365
366 lanes = q->csi2.lanes;
367
368 r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
369 if (r)
370 return r;
371
372 writel(timing.clk_termen, q->csi_rx_base +
373 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
374 writel(timing.clk_settle, q->csi_rx_base +
375 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
376
377 for (i = 0; i < lanes; i++) {
378 writel(timing.dat_termen, q->csi_rx_base +
379 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
380 writel(timing.dat_settle, q->csi_rx_base +
381 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
382 }
383
384 writel(CIO2_PBM_WMCTRL1_MIN_2CK |
385 CIO2_PBM_WMCTRL1_MID1_2CK |
386 CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
387 writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
388 CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
389 CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
390 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
391 CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
392 CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
393 writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
394 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
395 CIO2_PBM_ARB_CTRL_LE_EN |
396 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
397 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
398 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
399 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
400 base + CIO2_REG_PBM_ARB_CTRL);
401 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
402 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
403 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
404 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
405
406 writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
407 writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
408
409 /* Configure MIPI backend */
410 for (i = 0; i < NUM_VCS; i++)
411 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
412
413 /* There are 16 short packet LUT entry */
414 for (i = 0; i < 16; i++)
415 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
416 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
417 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
418 q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
419
420 writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
421 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
422 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
423 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
424 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
425 writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
426
427 writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
428 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
429 base + CIO2_REG_INT_EN);
430
431 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
432 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
433 base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
434 writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
435 sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
436 fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
437 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
438 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
439 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
440 writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
441
442 writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
443 writel(CIO2_CGC_PRIM_TGE |
444 CIO2_CGC_SIDE_TGE |
445 CIO2_CGC_XOSC_TGE |
446 CIO2_CGC_D3I3_TGE |
447 CIO2_CGC_CSI2_INTERFRAME_TGE |
448 CIO2_CGC_CSI2_PORT_DCGE |
449 CIO2_CGC_SIDE_DCGE |
450 CIO2_CGC_PRIM_DCGE |
451 CIO2_CGC_ROSC_DCGE |
452 CIO2_CGC_XOSC_DCGE |
453 CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
454 CIO2_CGC_CSI_CLKGATE_HOLDOFF
455 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
456 writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
457 writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
458 CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
459 CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
460 CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
461 base + CIO2_REG_LTRVAL01);
462 writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
463 CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
464 CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
465 CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
466 base + CIO2_REG_LTRVAL23);
467
468 for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
469 writel(0, base + CIO2_REG_CDMABA(i));
470 writel(0, base + CIO2_REG_CDMAC0(i));
471 writel(0, base + CIO2_REG_CDMAC1(i));
472 }
473
474 /* Enable DMA */
475 writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
476
477 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
478 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
479 CIO2_CDMAC0_DMA_INTR_ON_FE |
480 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
481 CIO2_CDMAC0_DMA_EN |
482 CIO2_CDMAC0_DMA_INTR_ON_FS |
483 CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
484
485 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
486 base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
487
488 writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
489
490 writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
491 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
492 CIO2_PXM_FRF_CFG_MSK_ECC_RE |
493 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
494 base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
495
496 /* Clear interrupts */
497 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
498 writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
499 writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
500 writel(~0, base + CIO2_REG_INT_STS);
501
502 /* Enable devices, starting from the last device in the pipe */
503 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
504 writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
505
506 return 0;
507 }
508
cio2_hw_exit(struct cio2_device * cio2,struct cio2_queue * q)509 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
510 {
511 struct device *dev = &cio2->pci_dev->dev;
512 void __iomem *const base = cio2->base;
513 unsigned int i;
514 u32 value;
515 int ret;
516
517 /* Disable CSI receiver and MIPI backend devices */
518 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
519 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
520 writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
521 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
522
523 /* Halt DMA */
524 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
525 ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
526 value, value & CIO2_CDMAC0_DMA_HALTED,
527 4000, 2000000);
528 if (ret)
529 dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
530
531 for (i = 0; i < CIO2_NUM_PORTS; i++) {
532 writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
533 CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
534 writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
535 CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
536 }
537 }
538
cio2_buffer_done(struct cio2_device * cio2,unsigned int dma_chan)539 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
540 {
541 struct device *dev = &cio2->pci_dev->dev;
542 struct cio2_queue *q = cio2->cur_queue;
543 struct cio2_fbpt_entry *entry;
544 u64 ns = ktime_get_ns();
545
546 if (dma_chan >= CIO2_QUEUES) {
547 dev_err(dev, "bad DMA channel %i\n", dma_chan);
548 return;
549 }
550
551 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
552 if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
553 dev_warn(dev, "no ready buffers found on DMA channel %u\n",
554 dma_chan);
555 return;
556 }
557
558 /* Find out which buffer(s) are ready */
559 do {
560 struct cio2_buffer *b;
561
562 b = q->bufs[q->bufs_first];
563 if (b) {
564 unsigned int received = entry[1].second_entry.num_of_bytes;
565 unsigned long payload =
566 vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
567
568 q->bufs[q->bufs_first] = NULL;
569 atomic_dec(&q->bufs_queued);
570 dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
571
572 b->vbb.vb2_buf.timestamp = ns;
573 b->vbb.field = V4L2_FIELD_NONE;
574 b->vbb.sequence = atomic_read(&q->frame_sequence);
575 if (payload != received)
576 dev_warn(dev,
577 "payload length is %lu, received %u\n",
578 payload, received);
579 vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
580 }
581 atomic_inc(&q->frame_sequence);
582 cio2_fbpt_entry_init_dummy(cio2, entry);
583 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
584 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
585 } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
586 }
587
cio2_queue_event_sof(struct cio2_device * cio2,struct cio2_queue * q)588 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
589 {
590 /*
591 * For the user space camera control algorithms it is essential
592 * to know when the reception of a frame has begun. That's often
593 * the best timing information to get from the hardware.
594 */
595 struct v4l2_event event = {
596 .type = V4L2_EVENT_FRAME_SYNC,
597 .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
598 };
599
600 v4l2_event_queue(q->subdev.devnode, &event);
601 }
602
603 static const char *const cio2_irq_errs[] = {
604 "single packet header error corrected",
605 "multiple packet header errors detected",
606 "payload checksum (CRC) error",
607 "fifo overflow",
608 "reserved short packet data type detected",
609 "reserved long packet data type detected",
610 "incomplete long packet detected",
611 "frame sync error",
612 "line sync error",
613 "DPHY start of transmission error",
614 "DPHY synchronization error",
615 "escape mode error",
616 "escape mode trigger event",
617 "escape mode ultra-low power state for data lane(s)",
618 "escape mode ultra-low power state exit for clock lane",
619 "inter-frame short packet discarded",
620 "inter-frame long packet discarded",
621 "non-matching Long Packet stalled",
622 };
623
cio2_irq_log_irq_errs(struct device * dev,u8 port,u32 status)624 static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
625 {
626 unsigned long csi2_status = status;
627 unsigned int i;
628
629 for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
630 dev_err(dev, "CSI-2 receiver port %i: %s\n",
631 port, cio2_irq_errs[i]);
632
633 if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
634 dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
635 csi2_status, port);
636 }
637
638 static const char *const cio2_port_errs[] = {
639 "ECC recoverable",
640 "DPHY not recoverable",
641 "ECC not recoverable",
642 "CRC error",
643 "INTERFRAMEDATA",
644 "PKT2SHORT",
645 "PKT2LONG",
646 };
647
cio2_irq_log_port_errs(struct device * dev,u8 port,u32 status)648 static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
649 {
650 unsigned long port_status = status;
651 unsigned int i;
652
653 for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
654 dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
655 }
656
cio2_irq_handle_once(struct cio2_device * cio2,u32 int_status)657 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
658 {
659 struct device *dev = &cio2->pci_dev->dev;
660 void __iomem *const base = cio2->base;
661
662 if (int_status & CIO2_INT_IOOE) {
663 /*
664 * Interrupt on Output Error:
665 * 1) SRAM is full and FS received, or
666 * 2) An invalid bit detected by DMA.
667 */
668 u32 oe_status, oe_clear;
669
670 oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
671 oe_status = oe_clear;
672
673 if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
674 dev_err(dev, "DMA output error: 0x%x\n",
675 (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
676 >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
677 oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
678 }
679 if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
680 dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
681 (oe_status & CIO2_INT_EXT_OE_OES_MASK)
682 >> CIO2_INT_EXT_OE_OES_SHIFT);
683 oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
684 }
685 writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
686 if (oe_status)
687 dev_warn(dev, "unknown interrupt 0x%x on OE\n",
688 oe_status);
689 int_status &= ~CIO2_INT_IOOE;
690 }
691
692 if (int_status & CIO2_INT_IOC_MASK) {
693 /* DMA IO done -- frame ready */
694 u32 clr = 0;
695 unsigned int d;
696
697 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
698 if (int_status & CIO2_INT_IOC(d)) {
699 clr |= CIO2_INT_IOC(d);
700 cio2_buffer_done(cio2, d);
701 }
702 int_status &= ~clr;
703 }
704
705 if (int_status & CIO2_INT_IOS_IOLN_MASK) {
706 /* DMA IO starts or reached specified line */
707 u32 clr = 0;
708 unsigned int d;
709
710 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
711 if (int_status & CIO2_INT_IOS_IOLN(d)) {
712 clr |= CIO2_INT_IOS_IOLN(d);
713 if (d == CIO2_DMA_CHAN)
714 cio2_queue_event_sof(cio2,
715 cio2->cur_queue);
716 }
717 int_status &= ~clr;
718 }
719
720 if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
721 /* CSI2 receiver (error) interrupt */
722 unsigned int port;
723 u32 ie_status;
724
725 ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
726
727 for (port = 0; port < CIO2_NUM_PORTS; port++) {
728 u32 port_status = (ie_status >> (port * 8)) & 0xff;
729
730 cio2_irq_log_port_errs(dev, port, port_status);
731
732 if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
733 void __iomem *csi_rx_base =
734 base + CIO2_REG_PIPE_BASE(port);
735 u32 csi2_status;
736
737 csi2_status = readl(csi_rx_base +
738 CIO2_REG_IRQCTRL_STATUS);
739
740 cio2_irq_log_irq_errs(dev, port, csi2_status);
741
742 writel(csi2_status,
743 csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
744 }
745 }
746
747 writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
748
749 int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
750 }
751
752 if (int_status)
753 dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
754 }
755
cio2_irq(int irq,void * cio2_ptr)756 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
757 {
758 struct cio2_device *cio2 = cio2_ptr;
759 void __iomem *const base = cio2->base;
760 struct device *dev = &cio2->pci_dev->dev;
761 u32 int_status;
762
763 int_status = readl(base + CIO2_REG_INT_STS);
764 dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
765 if (!int_status)
766 return IRQ_NONE;
767
768 do {
769 writel(int_status, base + CIO2_REG_INT_STS);
770 cio2_irq_handle_once(cio2, int_status);
771 int_status = readl(base + CIO2_REG_INT_STS);
772 if (int_status)
773 dev_dbg(dev, "pending status 0x%x\n", int_status);
774 } while (int_status);
775
776 return IRQ_HANDLED;
777 }
778
779 /**************** Videobuf2 interface ****************/
780
cio2_vb2_return_all_buffers(struct cio2_queue * q,enum vb2_buffer_state state)781 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
782 enum vb2_buffer_state state)
783 {
784 unsigned int i;
785
786 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
787 if (q->bufs[i]) {
788 atomic_dec(&q->bufs_queued);
789 vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
790 state);
791 q->bufs[i] = NULL;
792 }
793 }
794 }
795
cio2_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])796 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
797 unsigned int *num_buffers,
798 unsigned int *num_planes,
799 unsigned int sizes[],
800 struct device *alloc_devs[])
801 {
802 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
803 struct device *dev = &cio2->pci_dev->dev;
804 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
805 unsigned int i;
806
807 if (*num_planes && *num_planes < q->format.num_planes)
808 return -EINVAL;
809
810 for (i = 0; i < q->format.num_planes; ++i) {
811 if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
812 return -EINVAL;
813 sizes[i] = q->format.plane_fmt[i].sizeimage;
814 alloc_devs[i] = dev;
815 }
816
817 *num_planes = q->format.num_planes;
818 *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
819
820 /* Initialize buffer queue */
821 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
822 q->bufs[i] = NULL;
823 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
824 }
825 atomic_set(&q->bufs_queued, 0);
826 q->bufs_first = 0;
827 q->bufs_next = 0;
828
829 return 0;
830 }
831
832 /* Called after each buffer is allocated */
cio2_vb2_buf_init(struct vb2_buffer * vb)833 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
834 {
835 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
836 struct device *dev = &cio2->pci_dev->dev;
837 struct cio2_buffer *b = to_cio2_buffer(vb);
838 unsigned int pages = PFN_UP(vb->planes[0].length);
839 unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
840 struct sg_table *sg;
841 struct sg_dma_page_iter sg_iter;
842 unsigned int i, j;
843
844 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
845 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
846 vb->planes[0].length);
847 return -ENOSPC; /* Should never happen */
848 }
849
850 memset(b->lop, 0, sizeof(b->lop));
851 /* Allocate LOP table */
852 for (i = 0; i < lops; i++) {
853 b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
854 &b->lop_bus_addr[i], GFP_KERNEL);
855 if (!b->lop[i])
856 goto fail;
857 }
858
859 /* Fill LOP */
860 sg = vb2_dma_sg_plane_desc(vb, 0);
861 if (!sg)
862 return -ENOMEM;
863
864 if (sg->nents && sg->sgl)
865 b->offset = sg->sgl->offset;
866
867 i = j = 0;
868 for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
869 if (!pages--)
870 break;
871 b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
872 j++;
873 if (j == CIO2_LOP_ENTRIES) {
874 i++;
875 j = 0;
876 }
877 }
878
879 b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
880 return 0;
881 fail:
882 while (i--)
883 dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
884 return -ENOMEM;
885 }
886
887 /* Transfer buffer ownership to cio2 */
cio2_vb2_buf_queue(struct vb2_buffer * vb)888 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
889 {
890 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
891 struct device *dev = &cio2->pci_dev->dev;
892 struct cio2_queue *q =
893 container_of(vb->vb2_queue, struct cio2_queue, vbq);
894 struct cio2_buffer *b = to_cio2_buffer(vb);
895 struct cio2_fbpt_entry *entry;
896 unsigned long flags;
897 unsigned int i, j, next = q->bufs_next;
898 int bufs_queued = atomic_inc_return(&q->bufs_queued);
899 u32 fbpt_rp;
900
901 dev_dbg(dev, "queue buffer %d\n", vb->index);
902
903 /*
904 * This code queues the buffer to the CIO2 DMA engine, which starts
905 * running once streaming has started. It is possible that this code
906 * gets pre-empted due to increased CPU load. Upon this, the driver
907 * does not get an opportunity to queue new buffers to the CIO2 DMA
908 * engine. When the DMA engine encounters an FBPT entry without the
909 * VALID bit set, the DMA engine halts, which requires a restart of
910 * the DMA engine and sensor, to continue streaming.
911 * This is not desired and is highly unlikely given that there are
912 * 32 FBPT entries that the DMA engine needs to process, to run into
913 * an FBPT entry, without the VALID bit set. We try to mitigate this
914 * by disabling interrupts for the duration of this queueing.
915 */
916 local_irq_save(flags);
917
918 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
919 >> CIO2_CDMARI_FBPT_RP_SHIFT)
920 & CIO2_CDMARI_FBPT_RP_MASK;
921
922 /*
923 * fbpt_rp is the fbpt entry that the dma is currently working
924 * on, but since it could jump to next entry at any time,
925 * assume that we might already be there.
926 */
927 fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
928
929 if (bufs_queued <= 1 || fbpt_rp == next)
930 /* Buffers were drained */
931 next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
932
933 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
934 /*
935 * We have allocated CIO2_MAX_BUFFERS circularly for the
936 * hw, the user has requested N buffer queue. The driver
937 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
938 * user queues a buffer, there necessarily is a free buffer.
939 */
940 if (!q->bufs[next]) {
941 q->bufs[next] = b;
942 entry = &q->fbpt[next * CIO2_MAX_LOPS];
943 cio2_fbpt_entry_init_buf(cio2, b, entry);
944 local_irq_restore(flags);
945 q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
946 for (j = 0; j < vb->num_planes; j++)
947 vb2_set_plane_payload(vb, j,
948 q->format.plane_fmt[j].sizeimage);
949 return;
950 }
951
952 dev_dbg(dev, "entry %i was full!\n", next);
953 next = (next + 1) % CIO2_MAX_BUFFERS;
954 }
955
956 local_irq_restore(flags);
957 dev_err(dev, "error: all cio2 entries were full!\n");
958 atomic_dec(&q->bufs_queued);
959 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
960 }
961
962 /* Called when each buffer is freed */
cio2_vb2_buf_cleanup(struct vb2_buffer * vb)963 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
964 {
965 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
966 struct device *dev = &cio2->pci_dev->dev;
967 struct cio2_buffer *b = to_cio2_buffer(vb);
968 unsigned int i;
969
970 /* Free LOP table */
971 for (i = 0; i < CIO2_MAX_LOPS; i++) {
972 if (b->lop[i])
973 dma_free_coherent(dev, PAGE_SIZE,
974 b->lop[i], b->lop_bus_addr[i]);
975 }
976 }
977
cio2_vb2_start_streaming(struct vb2_queue * vq,unsigned int count)978 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
979 {
980 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
981 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
982 struct device *dev = &cio2->pci_dev->dev;
983 int r;
984
985 cio2->cur_queue = q;
986 atomic_set(&q->frame_sequence, 0);
987
988 r = pm_runtime_resume_and_get(dev);
989 if (r < 0) {
990 dev_info(dev, "failed to set power %d\n", r);
991 return r;
992 }
993
994 r = video_device_pipeline_start(&q->vdev, &q->pipe);
995 if (r)
996 goto fail_pipeline;
997
998 r = cio2_hw_init(cio2, q);
999 if (r)
1000 goto fail_hw;
1001
1002 /* Start streaming on sensor */
1003 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1004 if (r)
1005 goto fail_csi2_subdev;
1006
1007 cio2->streaming = true;
1008
1009 return 0;
1010
1011 fail_csi2_subdev:
1012 cio2_hw_exit(cio2, q);
1013 fail_hw:
1014 video_device_pipeline_stop(&q->vdev);
1015 fail_pipeline:
1016 dev_dbg(dev, "failed to start streaming (%d)\n", r);
1017 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1018 pm_runtime_put(dev);
1019
1020 return r;
1021 }
1022
cio2_vb2_stop_streaming(struct vb2_queue * vq)1023 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1024 {
1025 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1026 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1027 struct device *dev = &cio2->pci_dev->dev;
1028
1029 if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1030 dev_err(dev, "failed to stop sensor streaming\n");
1031
1032 cio2_hw_exit(cio2, q);
1033 synchronize_irq(cio2->pci_dev->irq);
1034 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1035 video_device_pipeline_stop(&q->vdev);
1036 pm_runtime_put(dev);
1037 cio2->streaming = false;
1038 }
1039
1040 static const struct vb2_ops cio2_vb2_ops = {
1041 .buf_init = cio2_vb2_buf_init,
1042 .buf_queue = cio2_vb2_buf_queue,
1043 .buf_cleanup = cio2_vb2_buf_cleanup,
1044 .queue_setup = cio2_vb2_queue_setup,
1045 .start_streaming = cio2_vb2_start_streaming,
1046 .stop_streaming = cio2_vb2_stop_streaming,
1047 .wait_prepare = vb2_ops_wait_prepare,
1048 .wait_finish = vb2_ops_wait_finish,
1049 };
1050
1051 /**************** V4L2 interface ****************/
1052
cio2_v4l2_querycap(struct file * file,void * fh,struct v4l2_capability * cap)1053 static int cio2_v4l2_querycap(struct file *file, void *fh,
1054 struct v4l2_capability *cap)
1055 {
1056 strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1057 strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1058
1059 return 0;
1060 }
1061
cio2_v4l2_enum_fmt(struct file * file,void * fh,struct v4l2_fmtdesc * f)1062 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1063 struct v4l2_fmtdesc *f)
1064 {
1065 if (f->index >= ARRAY_SIZE(formats))
1066 return -EINVAL;
1067
1068 f->pixelformat = formats[f->index].fourcc;
1069
1070 return 0;
1071 }
1072
1073 /* The format is validated in cio2_video_link_validate() */
cio2_v4l2_g_fmt(struct file * file,void * fh,struct v4l2_format * f)1074 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1075 {
1076 struct cio2_queue *q = file_to_cio2_queue(file);
1077
1078 f->fmt.pix_mp = q->format;
1079
1080 return 0;
1081 }
1082
cio2_v4l2_try_fmt(struct file * file,void * fh,struct v4l2_format * f)1083 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1084 {
1085 const struct ipu3_cio2_fmt *fmt;
1086 struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1087
1088 fmt = cio2_find_format(&mpix->pixelformat, NULL);
1089 if (!fmt)
1090 fmt = &formats[0];
1091
1092 /* Only supports up to 4224x3136 */
1093 if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1094 mpix->width = CIO2_IMAGE_MAX_WIDTH;
1095 if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
1096 mpix->height = CIO2_IMAGE_MAX_HEIGHT;
1097
1098 mpix->num_planes = 1;
1099 mpix->pixelformat = fmt->fourcc;
1100 mpix->colorspace = V4L2_COLORSPACE_RAW;
1101 mpix->field = V4L2_FIELD_NONE;
1102 mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1103 mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1104 mpix->height;
1105
1106 /* use default */
1107 mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1108 mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1109 mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1110
1111 return 0;
1112 }
1113
cio2_v4l2_s_fmt(struct file * file,void * fh,struct v4l2_format * f)1114 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1115 {
1116 struct cio2_queue *q = file_to_cio2_queue(file);
1117
1118 cio2_v4l2_try_fmt(file, fh, f);
1119 q->format = f->fmt.pix_mp;
1120
1121 return 0;
1122 }
1123
1124 static int
cio2_video_enum_input(struct file * file,void * fh,struct v4l2_input * input)1125 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1126 {
1127 if (input->index > 0)
1128 return -EINVAL;
1129
1130 strscpy(input->name, "camera", sizeof(input->name));
1131 input->type = V4L2_INPUT_TYPE_CAMERA;
1132
1133 return 0;
1134 }
1135
1136 static int
cio2_video_g_input(struct file * file,void * fh,unsigned int * input)1137 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1138 {
1139 *input = 0;
1140
1141 return 0;
1142 }
1143
1144 static int
cio2_video_s_input(struct file * file,void * fh,unsigned int input)1145 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1146 {
1147 return input == 0 ? 0 : -EINVAL;
1148 }
1149
1150 static const struct v4l2_file_operations cio2_v4l2_fops = {
1151 .owner = THIS_MODULE,
1152 .unlocked_ioctl = video_ioctl2,
1153 .open = v4l2_fh_open,
1154 .release = vb2_fop_release,
1155 .poll = vb2_fop_poll,
1156 .mmap = vb2_fop_mmap,
1157 };
1158
1159 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1160 .vidioc_querycap = cio2_v4l2_querycap,
1161 .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1162 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1163 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1164 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1165 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1166 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1167 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1168 .vidioc_querybuf = vb2_ioctl_querybuf,
1169 .vidioc_qbuf = vb2_ioctl_qbuf,
1170 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1171 .vidioc_streamon = vb2_ioctl_streamon,
1172 .vidioc_streamoff = vb2_ioctl_streamoff,
1173 .vidioc_expbuf = vb2_ioctl_expbuf,
1174 .vidioc_enum_input = cio2_video_enum_input,
1175 .vidioc_g_input = cio2_video_g_input,
1176 .vidioc_s_input = cio2_video_s_input,
1177 };
1178
cio2_subdev_subscribe_event(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)1179 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1180 struct v4l2_fh *fh,
1181 struct v4l2_event_subscription *sub)
1182 {
1183 if (sub->type != V4L2_EVENT_FRAME_SYNC)
1184 return -EINVAL;
1185
1186 /* Line number. For now only zero accepted. */
1187 if (sub->id != 0)
1188 return -EINVAL;
1189
1190 return v4l2_event_subscribe(fh, sub, 0, NULL);
1191 }
1192
cio2_subdev_open(struct v4l2_subdev * sd,struct v4l2_subdev_fh * fh)1193 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1194 {
1195 struct v4l2_mbus_framefmt *format;
1196 const struct v4l2_mbus_framefmt fmt_default = {
1197 .width = 1936,
1198 .height = 1096,
1199 .code = formats[0].mbus_code,
1200 .field = V4L2_FIELD_NONE,
1201 .colorspace = V4L2_COLORSPACE_RAW,
1202 .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1203 .quantization = V4L2_QUANTIZATION_DEFAULT,
1204 .xfer_func = V4L2_XFER_FUNC_DEFAULT,
1205 };
1206
1207 /* Initialize try_fmt */
1208 format = v4l2_subdev_state_get_format(fh->state, CIO2_PAD_SINK);
1209 *format = fmt_default;
1210
1211 /* same as sink */
1212 format = v4l2_subdev_state_get_format(fh->state, CIO2_PAD_SOURCE);
1213 *format = fmt_default;
1214
1215 return 0;
1216 }
1217
cio2_subdev_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1218 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1219 struct v4l2_subdev_state *sd_state,
1220 struct v4l2_subdev_format *fmt)
1221 {
1222 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1223
1224 mutex_lock(&q->subdev_lock);
1225
1226 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1227 fmt->format = *v4l2_subdev_state_get_format(sd_state,
1228 fmt->pad);
1229 else
1230 fmt->format = q->subdev_fmt;
1231
1232 mutex_unlock(&q->subdev_lock);
1233
1234 return 0;
1235 }
1236
cio2_subdev_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1237 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1238 struct v4l2_subdev_state *sd_state,
1239 struct v4l2_subdev_format *fmt)
1240 {
1241 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1242 struct v4l2_mbus_framefmt *mbus;
1243 u32 mbus_code = fmt->format.code;
1244 unsigned int i;
1245
1246 /*
1247 * Only allow setting sink pad format;
1248 * source always propagates from sink
1249 */
1250 if (fmt->pad == CIO2_PAD_SOURCE)
1251 return cio2_subdev_get_fmt(sd, sd_state, fmt);
1252
1253 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1254 mbus = v4l2_subdev_state_get_format(sd_state, fmt->pad);
1255 else
1256 mbus = &q->subdev_fmt;
1257
1258 fmt->format.code = formats[0].mbus_code;
1259
1260 for (i = 0; i < ARRAY_SIZE(formats); i++) {
1261 if (formats[i].mbus_code == mbus_code) {
1262 fmt->format.code = mbus_code;
1263 break;
1264 }
1265 }
1266
1267 fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1268 fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
1269 fmt->format.field = V4L2_FIELD_NONE;
1270
1271 mutex_lock(&q->subdev_lock);
1272 *mbus = fmt->format;
1273 mutex_unlock(&q->subdev_lock);
1274
1275 return 0;
1276 }
1277
cio2_subdev_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_mbus_code_enum * code)1278 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1279 struct v4l2_subdev_state *sd_state,
1280 struct v4l2_subdev_mbus_code_enum *code)
1281 {
1282 if (code->index >= ARRAY_SIZE(formats))
1283 return -EINVAL;
1284
1285 code->code = formats[code->index].mbus_code;
1286 return 0;
1287 }
1288
cio2_subdev_link_validate_get_format(struct media_pad * pad,struct v4l2_subdev_format * fmt)1289 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1290 struct v4l2_subdev_format *fmt)
1291 {
1292 if (is_media_entity_v4l2_subdev(pad->entity)) {
1293 struct v4l2_subdev *sd =
1294 media_entity_to_v4l2_subdev(pad->entity);
1295
1296 memset(fmt, 0, sizeof(*fmt));
1297 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1298 fmt->pad = pad->index;
1299 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1300 }
1301
1302 return -EINVAL;
1303 }
1304
cio2_video_link_validate(struct media_link * link)1305 static int cio2_video_link_validate(struct media_link *link)
1306 {
1307 struct media_entity *entity = link->sink->entity;
1308 struct video_device *vd = media_entity_to_video_device(entity);
1309 struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1310 struct cio2_device *cio2 = video_get_drvdata(vd);
1311 struct device *dev = &cio2->pci_dev->dev;
1312 struct v4l2_subdev_format source_fmt;
1313 int ret;
1314
1315 if (!media_pad_remote_pad_first(entity->pads)) {
1316 dev_info(dev, "video node %s pad not connected\n", vd->name);
1317 return -ENOTCONN;
1318 }
1319
1320 ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1321 if (ret < 0)
1322 return 0;
1323
1324 if (source_fmt.format.width != q->format.width ||
1325 source_fmt.format.height != q->format.height) {
1326 dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
1327 q->format.width, q->format.height,
1328 source_fmt.format.width, source_fmt.format.height);
1329 return -EINVAL;
1330 }
1331
1332 if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1333 return -EINVAL;
1334
1335 return 0;
1336 }
1337
1338 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1339 .subscribe_event = cio2_subdev_subscribe_event,
1340 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
1341 };
1342
1343 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1344 .open = cio2_subdev_open,
1345 };
1346
1347 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1348 .link_validate = v4l2_subdev_link_validate_default,
1349 .get_fmt = cio2_subdev_get_fmt,
1350 .set_fmt = cio2_subdev_set_fmt,
1351 .enum_mbus_code = cio2_subdev_enum_mbus_code,
1352 };
1353
1354 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1355 .core = &cio2_subdev_core_ops,
1356 .pad = &cio2_subdev_pad_ops,
1357 };
1358
1359 /******* V4L2 sub-device asynchronous registration callbacks***********/
1360
1361 struct sensor_async_subdev {
1362 struct v4l2_async_connection asd;
1363 struct csi2_bus_info csi2;
1364 };
1365
1366 #define to_sensor_asd(__asd) \
1367 container_of_const(__asd, struct sensor_async_subdev, asd)
1368
1369 /* The .bound() notifier callback when a match is found */
cio2_notifier_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_connection * asd)1370 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1371 struct v4l2_subdev *sd,
1372 struct v4l2_async_connection *asd)
1373 {
1374 struct cio2_device *cio2 = to_cio2_device(notifier);
1375 struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1376 struct cio2_queue *q;
1377 int ret;
1378
1379 if (cio2->queue[s_asd->csi2.port].sensor)
1380 return -EBUSY;
1381
1382 ret = ipu_bridge_instantiate_vcm(sd->dev);
1383 if (ret)
1384 return ret;
1385
1386 q = &cio2->queue[s_asd->csi2.port];
1387
1388 q->csi2 = s_asd->csi2;
1389 q->sensor = sd;
1390 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1391
1392 return 0;
1393 }
1394
1395 /* The .unbind callback */
cio2_notifier_unbind(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_connection * asd)1396 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1397 struct v4l2_subdev *sd,
1398 struct v4l2_async_connection *asd)
1399 {
1400 struct cio2_device *cio2 = to_cio2_device(notifier);
1401 struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1402
1403 cio2->queue[s_asd->csi2.port].sensor = NULL;
1404 }
1405
1406 /* .complete() is called after all subdevices have been located */
cio2_notifier_complete(struct v4l2_async_notifier * notifier)1407 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1408 {
1409 struct cio2_device *cio2 = to_cio2_device(notifier);
1410 struct device *dev = &cio2->pci_dev->dev;
1411 struct sensor_async_subdev *s_asd;
1412 struct v4l2_async_connection *asd;
1413 struct cio2_queue *q;
1414 int ret;
1415
1416 list_for_each_entry(asd, &cio2->notifier.done_list, asc_entry) {
1417 s_asd = to_sensor_asd(asd);
1418 q = &cio2->queue[s_asd->csi2.port];
1419
1420 ret = media_entity_get_fwnode_pad(&q->sensor->entity,
1421 s_asd->asd.match.fwnode,
1422 MEDIA_PAD_FL_SOURCE);
1423 if (ret < 0) {
1424 dev_err(dev, "no pad for endpoint %pfw (%d)\n",
1425 s_asd->asd.match.fwnode, ret);
1426 return ret;
1427 }
1428
1429 ret = media_create_pad_link(&q->sensor->entity, ret,
1430 &q->subdev.entity, CIO2_PAD_SINK,
1431 0);
1432 if (ret) {
1433 dev_err(dev, "failed to create link for %s (endpoint %pfw, error %d)\n",
1434 q->sensor->name, s_asd->asd.match.fwnode, ret);
1435 return ret;
1436 }
1437 }
1438
1439 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1440 }
1441
1442 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1443 .bound = cio2_notifier_bound,
1444 .unbind = cio2_notifier_unbind,
1445 .complete = cio2_notifier_complete,
1446 };
1447
cio2_parse_firmware(struct cio2_device * cio2)1448 static int cio2_parse_firmware(struct cio2_device *cio2)
1449 {
1450 struct device *dev = &cio2->pci_dev->dev;
1451 unsigned int i;
1452 int ret;
1453
1454 for (i = 0; i < CIO2_NUM_PORTS; i++) {
1455 struct v4l2_fwnode_endpoint vep = {
1456 .bus_type = V4L2_MBUS_CSI2_DPHY
1457 };
1458 struct sensor_async_subdev *s_asd;
1459 struct fwnode_handle *ep;
1460
1461 ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
1462 FWNODE_GRAPH_ENDPOINT_NEXT);
1463 if (!ep)
1464 continue;
1465
1466 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1467 if (ret)
1468 goto err_parse;
1469
1470 s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
1471 struct
1472 sensor_async_subdev);
1473 if (IS_ERR(s_asd)) {
1474 ret = PTR_ERR(s_asd);
1475 goto err_parse;
1476 }
1477
1478 s_asd->csi2.port = vep.base.port;
1479 s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1480
1481 fwnode_handle_put(ep);
1482
1483 continue;
1484
1485 err_parse:
1486 fwnode_handle_put(ep);
1487 return ret;
1488 }
1489
1490 /*
1491 * Proceed even without sensors connected to allow the device to
1492 * suspend.
1493 */
1494 cio2->notifier.ops = &cio2_async_ops;
1495 ret = v4l2_async_nf_register(&cio2->notifier);
1496 if (ret)
1497 dev_err(dev, "failed to register async notifier : %d\n", ret);
1498
1499 return ret;
1500 }
1501
1502 /**************** Queue initialization ****************/
1503 static const struct media_entity_operations cio2_media_ops = {
1504 .link_validate = v4l2_subdev_link_validate,
1505 };
1506
1507 static const struct media_entity_operations cio2_video_entity_ops = {
1508 .link_validate = cio2_video_link_validate,
1509 };
1510
cio2_queue_init(struct cio2_device * cio2,struct cio2_queue * q)1511 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1512 {
1513 static const u32 default_width = 1936;
1514 static const u32 default_height = 1096;
1515 const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1516 struct device *dev = &cio2->pci_dev->dev;
1517 struct video_device *vdev = &q->vdev;
1518 struct vb2_queue *vbq = &q->vbq;
1519 struct v4l2_subdev *subdev = &q->subdev;
1520 struct v4l2_mbus_framefmt *fmt;
1521 int r;
1522
1523 /* Initialize miscellaneous variables */
1524 mutex_init(&q->lock);
1525 mutex_init(&q->subdev_lock);
1526
1527 /* Initialize formats to default values */
1528 fmt = &q->subdev_fmt;
1529 fmt->width = default_width;
1530 fmt->height = default_height;
1531 fmt->code = dflt_fmt.mbus_code;
1532 fmt->field = V4L2_FIELD_NONE;
1533
1534 q->format.width = default_width;
1535 q->format.height = default_height;
1536 q->format.pixelformat = dflt_fmt.fourcc;
1537 q->format.colorspace = V4L2_COLORSPACE_RAW;
1538 q->format.field = V4L2_FIELD_NONE;
1539 q->format.num_planes = 1;
1540 q->format.plane_fmt[0].bytesperline =
1541 cio2_bytesperline(q->format.width);
1542 q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1543 q->format.height;
1544
1545 /* Initialize fbpt */
1546 r = cio2_fbpt_init(cio2, q);
1547 if (r)
1548 goto fail_fbpt;
1549
1550 /* Initialize media entities */
1551 q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1552 MEDIA_PAD_FL_MUST_CONNECT;
1553 q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1554 subdev->entity.ops = &cio2_media_ops;
1555 subdev->internal_ops = &cio2_subdev_internal_ops;
1556 r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1557 if (r) {
1558 dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
1559 goto fail_subdev_media_entity;
1560 }
1561
1562 q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1563 vdev->entity.ops = &cio2_video_entity_ops;
1564 r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1565 if (r) {
1566 dev_err(dev, "failed initialize videodev media entity (%d)\n",
1567 r);
1568 goto fail_vdev_media_entity;
1569 }
1570
1571 /* Initialize subdev */
1572 v4l2_subdev_init(subdev, &cio2_subdev_ops);
1573 subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1574 subdev->owner = THIS_MODULE;
1575 snprintf(subdev->name, sizeof(subdev->name),
1576 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1577 subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1578 v4l2_set_subdevdata(subdev, cio2);
1579 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1580 if (r) {
1581 dev_err(dev, "failed initialize subdev (%d)\n", r);
1582 goto fail_subdev;
1583 }
1584
1585 /* Initialize vbq */
1586 vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1587 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1588 vbq->ops = &cio2_vb2_ops;
1589 vbq->mem_ops = &vb2_dma_sg_memops;
1590 vbq->buf_struct_size = sizeof(struct cio2_buffer);
1591 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1592 vbq->min_queued_buffers = 1;
1593 vbq->drv_priv = cio2;
1594 vbq->lock = &q->lock;
1595 r = vb2_queue_init(vbq);
1596 if (r) {
1597 dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
1598 goto fail_subdev;
1599 }
1600
1601 /* Initialize vdev */
1602 snprintf(vdev->name, sizeof(vdev->name),
1603 "%s %td", CIO2_NAME, q - cio2->queue);
1604 vdev->release = video_device_release_empty;
1605 vdev->fops = &cio2_v4l2_fops;
1606 vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1607 vdev->lock = &cio2->lock;
1608 vdev->v4l2_dev = &cio2->v4l2_dev;
1609 vdev->queue = &q->vbq;
1610 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1611 video_set_drvdata(vdev, cio2);
1612 r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1613 if (r) {
1614 dev_err(dev, "failed to register video device (%d)\n", r);
1615 goto fail_vdev;
1616 }
1617
1618 /* Create link from CIO2 subdev to output node */
1619 r = media_create_pad_link(
1620 &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1621 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1622 if (r)
1623 goto fail_link;
1624
1625 return 0;
1626
1627 fail_link:
1628 vb2_video_unregister_device(&q->vdev);
1629 fail_vdev:
1630 v4l2_device_unregister_subdev(subdev);
1631 fail_subdev:
1632 media_entity_cleanup(&vdev->entity);
1633 fail_vdev_media_entity:
1634 media_entity_cleanup(&subdev->entity);
1635 fail_subdev_media_entity:
1636 cio2_fbpt_exit(q, dev);
1637 fail_fbpt:
1638 mutex_destroy(&q->subdev_lock);
1639 mutex_destroy(&q->lock);
1640
1641 return r;
1642 }
1643
cio2_queue_exit(struct cio2_device * cio2,struct cio2_queue * q)1644 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1645 {
1646 vb2_video_unregister_device(&q->vdev);
1647 media_entity_cleanup(&q->vdev.entity);
1648 v4l2_device_unregister_subdev(&q->subdev);
1649 media_entity_cleanup(&q->subdev.entity);
1650 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1651 mutex_destroy(&q->subdev_lock);
1652 mutex_destroy(&q->lock);
1653 }
1654
cio2_queues_init(struct cio2_device * cio2)1655 static int cio2_queues_init(struct cio2_device *cio2)
1656 {
1657 int i, r;
1658
1659 for (i = 0; i < CIO2_QUEUES; i++) {
1660 r = cio2_queue_init(cio2, &cio2->queue[i]);
1661 if (r)
1662 break;
1663 }
1664
1665 if (i == CIO2_QUEUES)
1666 return 0;
1667
1668 for (i--; i >= 0; i--)
1669 cio2_queue_exit(cio2, &cio2->queue[i]);
1670
1671 return r;
1672 }
1673
cio2_queues_exit(struct cio2_device * cio2)1674 static void cio2_queues_exit(struct cio2_device *cio2)
1675 {
1676 unsigned int i;
1677
1678 for (i = 0; i < CIO2_QUEUES; i++)
1679 cio2_queue_exit(cio2, &cio2->queue[i]);
1680 }
1681
cio2_check_fwnode_graph(struct fwnode_handle * fwnode)1682 static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
1683 {
1684 struct fwnode_handle *endpoint;
1685
1686 if (IS_ERR_OR_NULL(fwnode))
1687 return -EINVAL;
1688
1689 endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
1690 if (endpoint) {
1691 fwnode_handle_put(endpoint);
1692 return 0;
1693 }
1694
1695 return cio2_check_fwnode_graph(fwnode->secondary);
1696 }
1697
1698 /**************** PCI interface ****************/
1699
cio2_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)1700 static int cio2_pci_probe(struct pci_dev *pci_dev,
1701 const struct pci_device_id *id)
1702 {
1703 struct device *dev = &pci_dev->dev;
1704 struct fwnode_handle *fwnode = dev_fwnode(dev);
1705 struct cio2_device *cio2;
1706 int r;
1707
1708 /*
1709 * On some platforms no connections to sensors are defined in firmware,
1710 * if the device has no endpoints then we can try to build those as
1711 * software_nodes parsed from SSDB.
1712 */
1713 r = cio2_check_fwnode_graph(fwnode);
1714 if (r) {
1715 if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
1716 dev_err(dev, "fwnode graph has no endpoints connected\n");
1717 return -EINVAL;
1718 }
1719
1720 r = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
1721 if (r)
1722 return r;
1723 }
1724
1725 cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
1726 if (!cio2)
1727 return -ENOMEM;
1728 cio2->pci_dev = pci_dev;
1729
1730 r = pcim_enable_device(pci_dev);
1731 if (r) {
1732 dev_err(dev, "failed to enable device (%d)\n", r);
1733 return r;
1734 }
1735
1736 dev_info(dev, "device 0x%x (rev: 0x%x)\n",
1737 pci_dev->device, pci_dev->revision);
1738
1739 r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1740 if (r) {
1741 dev_err(dev, "failed to remap I/O memory (%d)\n", r);
1742 return -ENODEV;
1743 }
1744
1745 cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1746
1747 pci_set_drvdata(pci_dev, cio2);
1748
1749 pci_set_master(pci_dev);
1750
1751 r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
1752 if (r) {
1753 dev_err(dev, "failed to set DMA mask (%d)\n", r);
1754 return -ENODEV;
1755 }
1756
1757 r = pci_enable_msi(pci_dev);
1758 if (r) {
1759 dev_err(dev, "failed to enable MSI (%d)\n", r);
1760 return r;
1761 }
1762
1763 r = cio2_fbpt_init_dummy(cio2);
1764 if (r)
1765 return r;
1766
1767 mutex_init(&cio2->lock);
1768
1769 cio2->media_dev.dev = dev;
1770 strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1771 sizeof(cio2->media_dev.model));
1772 cio2->media_dev.hw_revision = 0;
1773
1774 media_device_init(&cio2->media_dev);
1775 r = media_device_register(&cio2->media_dev);
1776 if (r < 0)
1777 goto fail_mutex_destroy;
1778
1779 cio2->v4l2_dev.mdev = &cio2->media_dev;
1780 r = v4l2_device_register(dev, &cio2->v4l2_dev);
1781 if (r) {
1782 dev_err(dev, "failed to register V4L2 device (%d)\n", r);
1783 goto fail_media_device_unregister;
1784 }
1785
1786 r = cio2_queues_init(cio2);
1787 if (r)
1788 goto fail_v4l2_device_unregister;
1789
1790 v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
1791
1792 /* Register notifier for subdevices we care */
1793 r = cio2_parse_firmware(cio2);
1794 if (r)
1795 goto fail_clean_notifier;
1796
1797 r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
1798 CIO2_NAME, cio2);
1799 if (r) {
1800 dev_err(dev, "failed to request IRQ (%d)\n", r);
1801 goto fail_clean_notifier;
1802 }
1803
1804 pm_runtime_put_noidle(dev);
1805 pm_runtime_allow(dev);
1806
1807 return 0;
1808
1809 fail_clean_notifier:
1810 v4l2_async_nf_unregister(&cio2->notifier);
1811 v4l2_async_nf_cleanup(&cio2->notifier);
1812 cio2_queues_exit(cio2);
1813 fail_v4l2_device_unregister:
1814 v4l2_device_unregister(&cio2->v4l2_dev);
1815 fail_media_device_unregister:
1816 media_device_unregister(&cio2->media_dev);
1817 media_device_cleanup(&cio2->media_dev);
1818 fail_mutex_destroy:
1819 mutex_destroy(&cio2->lock);
1820 cio2_fbpt_exit_dummy(cio2);
1821
1822 return r;
1823 }
1824
cio2_pci_remove(struct pci_dev * pci_dev)1825 static void cio2_pci_remove(struct pci_dev *pci_dev)
1826 {
1827 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1828
1829 media_device_unregister(&cio2->media_dev);
1830 v4l2_async_nf_unregister(&cio2->notifier);
1831 v4l2_async_nf_cleanup(&cio2->notifier);
1832 cio2_queues_exit(cio2);
1833 cio2_fbpt_exit_dummy(cio2);
1834 v4l2_device_unregister(&cio2->v4l2_dev);
1835 media_device_cleanup(&cio2->media_dev);
1836 mutex_destroy(&cio2->lock);
1837
1838 pm_runtime_forbid(&pci_dev->dev);
1839 pm_runtime_get_noresume(&pci_dev->dev);
1840 }
1841
cio2_runtime_suspend(struct device * dev)1842 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1843 {
1844 struct pci_dev *pci_dev = to_pci_dev(dev);
1845 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1846 void __iomem *const base = cio2->base;
1847 u16 pm;
1848
1849 writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1850 dev_dbg(dev, "cio2 runtime suspend.\n");
1851
1852 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1853 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1854 pm |= CIO2_PMCSR_D3;
1855 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1856
1857 return 0;
1858 }
1859
cio2_runtime_resume(struct device * dev)1860 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1861 {
1862 struct pci_dev *pci_dev = to_pci_dev(dev);
1863 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1864 void __iomem *const base = cio2->base;
1865 u16 pm;
1866
1867 writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1868 dev_dbg(dev, "cio2 runtime resume.\n");
1869
1870 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1871 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1872 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1873
1874 return 0;
1875 }
1876
1877 /*
1878 * Helper function to advance all the elements of a circular buffer by "start"
1879 * positions
1880 */
arrange(void * ptr,size_t elem_size,size_t elems,size_t start)1881 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1882 {
1883 struct {
1884 size_t begin, end;
1885 } arr[2] = {
1886 { 0, start - 1 },
1887 { start, elems - 1 },
1888 };
1889
1890 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1891
1892 /* Loop as long as we have out-of-place entries */
1893 while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1894 size_t size0, i;
1895
1896 /*
1897 * Find the number of entries that can be arranged on this
1898 * iteration.
1899 */
1900 size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1901
1902 /* Swap the entries in two parts of the array. */
1903 for (i = 0; i < size0; i++) {
1904 u8 *d = ptr + elem_size * (arr[1].begin + i);
1905 u8 *s = ptr + elem_size * (arr[0].begin + i);
1906 size_t j;
1907
1908 for (j = 0; j < elem_size; j++)
1909 swap(d[j], s[j]);
1910 }
1911
1912 if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1913 /* The end of the first array remains unarranged. */
1914 arr[0].begin += size0;
1915 } else {
1916 /*
1917 * The first array is fully arranged so we proceed
1918 * handling the next one.
1919 */
1920 arr[0].begin = arr[1].begin;
1921 arr[0].end = arr[1].begin + size0 - 1;
1922 arr[1].begin += size0;
1923 }
1924 }
1925 }
1926
cio2_fbpt_rearrange(struct cio2_device * cio2,struct cio2_queue * q)1927 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1928 {
1929 unsigned int i, j;
1930
1931 for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1932 i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1933 if (q->bufs[j])
1934 break;
1935
1936 if (i == CIO2_MAX_BUFFERS)
1937 return;
1938
1939 if (j) {
1940 arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1941 CIO2_MAX_BUFFERS, j);
1942 arrange(q->bufs, sizeof(struct cio2_buffer *),
1943 CIO2_MAX_BUFFERS, j);
1944 }
1945
1946 /*
1947 * DMA clears the valid bit when accessing the buffer.
1948 * When stopping stream in suspend callback, some of the buffers
1949 * may be in invalid state. After resume, when DMA meets the invalid
1950 * buffer, it will halt and stop receiving new data.
1951 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1952 */
1953 for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1954 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1955 }
1956
cio2_suspend(struct device * dev)1957 static int __maybe_unused cio2_suspend(struct device *dev)
1958 {
1959 struct pci_dev *pci_dev = to_pci_dev(dev);
1960 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1961 struct cio2_queue *q = cio2->cur_queue;
1962 int r;
1963
1964 dev_dbg(dev, "cio2 suspend\n");
1965 if (!cio2->streaming)
1966 return 0;
1967
1968 /* Stop stream */
1969 r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
1970 if (r) {
1971 dev_err(dev, "failed to stop sensor streaming\n");
1972 return r;
1973 }
1974
1975 cio2_hw_exit(cio2, q);
1976 synchronize_irq(pci_dev->irq);
1977
1978 pm_runtime_force_suspend(dev);
1979
1980 /*
1981 * Upon resume, hw starts to process the fbpt entries from beginning,
1982 * so relocate the queued buffs to the fbpt head before suspend.
1983 */
1984 cio2_fbpt_rearrange(cio2, q);
1985 q->bufs_first = 0;
1986 q->bufs_next = 0;
1987
1988 return 0;
1989 }
1990
cio2_resume(struct device * dev)1991 static int __maybe_unused cio2_resume(struct device *dev)
1992 {
1993 struct cio2_device *cio2 = dev_get_drvdata(dev);
1994 struct cio2_queue *q = cio2->cur_queue;
1995 int r;
1996
1997 dev_dbg(dev, "cio2 resume\n");
1998 if (!cio2->streaming)
1999 return 0;
2000 /* Start stream */
2001 r = pm_runtime_force_resume(dev);
2002 if (r < 0) {
2003 dev_err(dev, "failed to set power %d\n", r);
2004 return r;
2005 }
2006
2007 r = cio2_hw_init(cio2, q);
2008 if (r) {
2009 dev_err(dev, "fail to init cio2 hw\n");
2010 return r;
2011 }
2012
2013 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
2014 if (r) {
2015 dev_err(dev, "fail to start sensor streaming\n");
2016 cio2_hw_exit(cio2, q);
2017 }
2018
2019 return r;
2020 }
2021
2022 static const struct dev_pm_ops cio2_pm_ops = {
2023 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2024 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2025 };
2026
2027 static const struct pci_device_id cio2_pci_id_table[] = {
2028 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2029 { }
2030 };
2031
2032 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2033
2034 static struct pci_driver cio2_pci_driver = {
2035 .name = CIO2_NAME,
2036 .id_table = cio2_pci_id_table,
2037 .probe = cio2_pci_probe,
2038 .remove = cio2_pci_remove,
2039 .driver = {
2040 .pm = &cio2_pm_ops,
2041 },
2042 };
2043
2044 module_pci_driver(cio2_pci_driver);
2045
2046 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2047 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2048 MODULE_AUTHOR("Jian Xu Zheng");
2049 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2050 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2051 MODULE_LICENSE("GPL v2");
2052 MODULE_DESCRIPTION("IPU3 CIO2 driver");
2053 MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
2054