1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017,2020 Intel Corporation
4  *
5  * Based partially on Intel IPU4 driver written by
6  *  Sakari Ailus <sakari.ailus@linux.intel.com>
7  *  Samu Onkalo
8  *  Jouni Högander <jouni.hogander@intel.com>
9  *  Jouni Ukkonen
10  *  Antti Laakso <antti.laakso@intel.com>
11  * et al.
12  */
13 
14 #include <linux/bitops.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/pfn.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/property.h>
24 #include <linux/vmalloc.h>
25 
26 #include <media/ipu-bridge.h>
27 #include <media/v4l2-ctrls.h>
28 #include <media/v4l2-device.h>
29 #include <media/v4l2-event.h>
30 #include <media/v4l2-fwnode.h>
31 #include <media/v4l2-mc.h>
32 #include <media/v4l2-ioctl.h>
33 #include <media/videobuf2-dma-sg.h>
34 
35 #include "ipu3-cio2.h"
36 
37 struct ipu3_cio2_fmt {
38 	u32 mbus_code;
39 	u32 fourcc;
40 	u8 mipicode;
41 	u8 bpp;
42 };
43 
44 /*
45  * These are raw formats used in Intel's third generation of
46  * Image Processing Unit known as IPU3.
47  * 10bit raw bayer packed, 32 bytes for every 25 pixels,
48  * last LSB 6 bits unused.
49  */
50 static const struct ipu3_cio2_fmt formats[] = {
51 	{	/* put default entry at beginning */
52 		.mbus_code	= MEDIA_BUS_FMT_SGRBG10_1X10,
53 		.fourcc		= V4L2_PIX_FMT_IPU3_SGRBG10,
54 		.mipicode	= 0x2b,
55 		.bpp		= 10,
56 	}, {
57 		.mbus_code	= MEDIA_BUS_FMT_SGBRG10_1X10,
58 		.fourcc		= V4L2_PIX_FMT_IPU3_SGBRG10,
59 		.mipicode	= 0x2b,
60 		.bpp		= 10,
61 	}, {
62 		.mbus_code	= MEDIA_BUS_FMT_SBGGR10_1X10,
63 		.fourcc		= V4L2_PIX_FMT_IPU3_SBGGR10,
64 		.mipicode	= 0x2b,
65 		.bpp		= 10,
66 	}, {
67 		.mbus_code	= MEDIA_BUS_FMT_SRGGB10_1X10,
68 		.fourcc		= V4L2_PIX_FMT_IPU3_SRGGB10,
69 		.mipicode	= 0x2b,
70 		.bpp		= 10,
71 	}, {
72 		.mbus_code	= MEDIA_BUS_FMT_Y10_1X10,
73 		.fourcc		= V4L2_PIX_FMT_IPU3_Y10,
74 		.mipicode	= 0x2b,
75 		.bpp		= 10,
76 	},
77 };
78 
79 /*
80  * cio2_find_format - lookup color format by fourcc or/and media bus code
81  * @pixelformat: fourcc to match, ignored if null
82  * @mbus_code: media bus code to match, ignored if null
83  */
cio2_find_format(const u32 * pixelformat,const u32 * mbus_code)84 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
85 						    const u32 *mbus_code)
86 {
87 	unsigned int i;
88 
89 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
90 		if (pixelformat && *pixelformat != formats[i].fourcc)
91 			continue;
92 		if (mbus_code && *mbus_code != formats[i].mbus_code)
93 			continue;
94 
95 		return &formats[i];
96 	}
97 
98 	return NULL;
99 }
100 
cio2_bytesperline(const unsigned int width)101 static inline u32 cio2_bytesperline(const unsigned int width)
102 {
103 	/*
104 	 * 64 bytes for every 50 pixels, the line length
105 	 * in bytes is multiple of 64 (line end alignment).
106 	 */
107 	return DIV_ROUND_UP(width, 50) * 64;
108 }
109 
110 /**************** FBPT operations ****************/
111 
cio2_fbpt_exit_dummy(struct cio2_device * cio2)112 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
113 {
114 	struct device *dev = &cio2->pci_dev->dev;
115 
116 	if (cio2->dummy_lop) {
117 		dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
118 				  cio2->dummy_lop_bus_addr);
119 		cio2->dummy_lop = NULL;
120 	}
121 	if (cio2->dummy_page) {
122 		dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
123 				  cio2->dummy_page_bus_addr);
124 		cio2->dummy_page = NULL;
125 	}
126 }
127 
cio2_fbpt_init_dummy(struct cio2_device * cio2)128 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
129 {
130 	struct device *dev = &cio2->pci_dev->dev;
131 	unsigned int i;
132 
133 	cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
134 					      &cio2->dummy_page_bus_addr,
135 					      GFP_KERNEL);
136 	cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
137 					     &cio2->dummy_lop_bus_addr,
138 					     GFP_KERNEL);
139 	if (!cio2->dummy_page || !cio2->dummy_lop) {
140 		cio2_fbpt_exit_dummy(cio2);
141 		return -ENOMEM;
142 	}
143 	/*
144 	 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
145 	 * Initialize each entry to dummy_page bus base address.
146 	 */
147 	for (i = 0; i < CIO2_LOP_ENTRIES; i++)
148 		cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
149 
150 	return 0;
151 }
152 
cio2_fbpt_entry_enable(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])153 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
154 				   struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
155 {
156 	/*
157 	 * The CPU first initializes some fields in fbpt, then sets
158 	 * the VALID bit, this barrier is to ensure that the DMA(device)
159 	 * does not see the VALID bit enabled before other fields are
160 	 * initialized; otherwise it could lead to havoc.
161 	 */
162 	dma_wmb();
163 
164 	/*
165 	 * Request interrupts for start and completion
166 	 * Valid bit is applicable only to 1st entry
167 	 */
168 	entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
169 		CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
170 }
171 
172 /* Initialize fpbt entries to point to dummy frame */
cio2_fbpt_entry_init_dummy(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])173 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
174 				       struct cio2_fbpt_entry
175 				       entry[CIO2_MAX_LOPS])
176 {
177 	unsigned int i;
178 
179 	entry[0].first_entry.first_page_offset = 0;
180 	entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
181 	entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
182 
183 	for (i = 0; i < CIO2_MAX_LOPS; i++)
184 		entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
185 
186 	cio2_fbpt_entry_enable(cio2, entry);
187 }
188 
189 /* Initialize fpbt entries to point to a given buffer */
cio2_fbpt_entry_init_buf(struct cio2_device * cio2,struct cio2_buffer * b,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])190 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
191 				     struct cio2_buffer *b,
192 				     struct cio2_fbpt_entry
193 				     entry[CIO2_MAX_LOPS])
194 {
195 	struct vb2_buffer *vb = &b->vbb.vb2_buf;
196 	unsigned int length = vb->planes[0].length;
197 	int remaining, i;
198 
199 	entry[0].first_entry.first_page_offset = b->offset;
200 	remaining = length + entry[0].first_entry.first_page_offset;
201 	entry[1].second_entry.num_of_pages = PFN_UP(remaining);
202 	/*
203 	 * last_page_available_bytes has the offset of the last byte in the
204 	 * last page which is still accessible by DMA. DMA cannot access
205 	 * beyond this point. Valid range for this is from 0 to 4095.
206 	 * 0 indicates 1st byte in the page is DMA accessible.
207 	 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
208 	 * is available for DMA transfer.
209 	 */
210 	remaining = offset_in_page(remaining) ?: PAGE_SIZE;
211 	entry[1].second_entry.last_page_available_bytes = remaining - 1;
212 	/* Fill FBPT */
213 	remaining = length;
214 	i = 0;
215 	while (remaining > 0) {
216 		entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
217 		remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
218 		entry++;
219 		i++;
220 	}
221 
222 	/*
223 	 * The first not meaningful FBPT entry should point to a valid LOP
224 	 */
225 	entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
226 
227 	cio2_fbpt_entry_enable(cio2, entry);
228 }
229 
cio2_fbpt_init(struct cio2_device * cio2,struct cio2_queue * q)230 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
231 {
232 	struct device *dev = &cio2->pci_dev->dev;
233 
234 	q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
235 				     GFP_KERNEL);
236 	if (!q->fbpt)
237 		return -ENOMEM;
238 
239 	return 0;
240 }
241 
cio2_fbpt_exit(struct cio2_queue * q,struct device * dev)242 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
243 {
244 	dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
245 }
246 
247 /**************** CSI2 hardware setup ****************/
248 
249 /*
250  * The CSI2 receiver has several parameters affecting
251  * the receiver timings. These depend on the MIPI bus frequency
252  * F in Hz (sensor transmitter rate) as follows:
253  *     register value = (A/1e9 + B * UI) / COUNT_ACC
254  * where
255  *      UI = 1 / (2 * F) in seconds
256  *      COUNT_ACC = counter accuracy in seconds
257  *      For IPU3 COUNT_ACC = 0.0625
258  *
259  * A and B are coefficients from the table below,
260  * depending whether the register minimum or maximum value is
261  * calculated.
262  *                                     Minimum     Maximum
263  * Clock lane                          A     B     A     B
264  * reg_rx_csi_dly_cnt_termen_clane     0     0    38     0
265  * reg_rx_csi_dly_cnt_settle_clane    95    -8   300   -16
266  * Data lanes
267  * reg_rx_csi_dly_cnt_termen_dlane0    0     0    35     4
268  * reg_rx_csi_dly_cnt_settle_dlane0   85    -2   145    -6
269  * reg_rx_csi_dly_cnt_termen_dlane1    0     0    35     4
270  * reg_rx_csi_dly_cnt_settle_dlane1   85    -2   145    -6
271  * reg_rx_csi_dly_cnt_termen_dlane2    0     0    35     4
272  * reg_rx_csi_dly_cnt_settle_dlane2   85    -2   145    -6
273  * reg_rx_csi_dly_cnt_termen_dlane3    0     0    35     4
274  * reg_rx_csi_dly_cnt_settle_dlane3   85    -2   145    -6
275  *
276  * We use the minimum values of both A and B.
277  */
278 
279 /*
280  * shift for keeping value range suitable for 32-bit integer arithmetic
281  */
282 #define LIMIT_SHIFT	8
283 
cio2_rx_timing(s32 a,s32 b,s64 freq,int def)284 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
285 {
286 	const u32 accinv = 16; /* invert of counter resolution */
287 	const u32 uiinv = 500000000; /* 1e9 / 2 */
288 	s32 r;
289 
290 	freq >>= LIMIT_SHIFT;
291 
292 	if (WARN_ON(freq <= 0 || freq > S32_MAX))
293 		return def;
294 	/*
295 	 * b could be 0, -2 or -8, so |accinv * b| is always
296 	 * less than (1 << ds) and thus |r| < 500000000.
297 	 */
298 	r = accinv * b * (uiinv >> LIMIT_SHIFT);
299 	r = r / (s32)freq;
300 	/* max value of a is 95 */
301 	r += accinv * a;
302 
303 	return r;
304 };
305 
306 /* Calculate the delay value for termination enable of clock lane HS Rx */
cio2_csi2_calc_timing(struct cio2_device * cio2,struct cio2_queue * q,struct cio2_csi2_timing * timing,unsigned int bpp,unsigned int lanes)307 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
308 				 struct cio2_csi2_timing *timing,
309 				 unsigned int bpp, unsigned int lanes)
310 {
311 	struct device *dev = &cio2->pci_dev->dev;
312 	struct media_pad *src_pad;
313 	s64 freq;
314 
315 	src_pad = media_entity_remote_source_pad_unique(&q->subdev.entity);
316 	if (IS_ERR(src_pad)) {
317 		dev_err(dev, "can't get source pad of %s (%ld)\n",
318 			q->subdev.name, PTR_ERR(src_pad));
319 		return PTR_ERR(src_pad);
320 	}
321 
322 	freq = v4l2_get_link_freq(src_pad, bpp, lanes * 2);
323 	if (freq < 0) {
324 		dev_err(dev, "error %lld, invalid link_freq\n", freq);
325 		return freq;
326 	}
327 
328 	timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
329 					    CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
330 					    freq,
331 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
332 	timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
333 					    CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
334 					    freq,
335 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
336 	timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
337 					    CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
338 					    freq,
339 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
340 	timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
341 					    CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
342 					    freq,
343 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
344 
345 	dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
346 	dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
347 	dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
348 	dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
349 
350 	return 0;
351 };
352 
cio2_hw_init(struct cio2_device * cio2,struct cio2_queue * q)353 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
354 {
355 	static const int NUM_VCS = 4;
356 	static const int SID;	/* Stream id */
357 	static const int ENTRY;
358 	static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
359 					CIO2_FBPT_SUBENTRY_UNIT);
360 	const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
361 	const struct ipu3_cio2_fmt *fmt;
362 	void __iomem *const base = cio2->base;
363 	u8 lanes, csi2bus = q->csi2.port;
364 	u8 sensor_vc = SENSOR_VIR_CH_DFLT;
365 	struct cio2_csi2_timing timing = { 0 };
366 	int i, r;
367 
368 	fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
369 	if (!fmt)
370 		return -EINVAL;
371 
372 	lanes = q->csi2.lanes;
373 
374 	r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
375 	if (r)
376 		return r;
377 
378 	writel(timing.clk_termen, q->csi_rx_base +
379 		CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
380 	writel(timing.clk_settle, q->csi_rx_base +
381 		CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
382 
383 	for (i = 0; i < lanes; i++) {
384 		writel(timing.dat_termen, q->csi_rx_base +
385 			CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
386 		writel(timing.dat_settle, q->csi_rx_base +
387 			CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
388 	}
389 
390 	writel(CIO2_PBM_WMCTRL1_MIN_2CK |
391 	       CIO2_PBM_WMCTRL1_MID1_2CK |
392 	       CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
393 	writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
394 	       CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
395 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
396 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
397 	       CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
398 	       CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
399 	writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
400 	       CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
401 	       CIO2_PBM_ARB_CTRL_LE_EN |
402 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
403 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
404 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
405 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
406 	       base + CIO2_REG_PBM_ARB_CTRL);
407 	writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
408 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
409 	writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
410 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
411 
412 	writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
413 	writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
414 
415 	/* Configure MIPI backend */
416 	for (i = 0; i < NUM_VCS; i++)
417 		writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
418 
419 	/* There are 16 short packet LUT entry */
420 	for (i = 0; i < 16; i++)
421 		writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
422 		       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
423 	writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
424 	       q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
425 
426 	writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
427 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
428 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
429 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
430 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
431 	writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
432 
433 	writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
434 	       CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
435 	       base + CIO2_REG_INT_EN);
436 
437 	writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
438 	       << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
439 	       base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
440 	writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
441 	       sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
442 	       fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
443 	       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
444 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
445 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
446 	writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
447 
448 	writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
449 	writel(CIO2_CGC_PRIM_TGE |
450 	       CIO2_CGC_SIDE_TGE |
451 	       CIO2_CGC_XOSC_TGE |
452 	       CIO2_CGC_D3I3_TGE |
453 	       CIO2_CGC_CSI2_INTERFRAME_TGE |
454 	       CIO2_CGC_CSI2_PORT_DCGE |
455 	       CIO2_CGC_SIDE_DCGE |
456 	       CIO2_CGC_PRIM_DCGE |
457 	       CIO2_CGC_ROSC_DCGE |
458 	       CIO2_CGC_XOSC_DCGE |
459 	       CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
460 	       CIO2_CGC_CSI_CLKGATE_HOLDOFF
461 	       << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
462 	writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
463 	writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
464 	       CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
465 	       CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
466 	       CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
467 	       base + CIO2_REG_LTRVAL01);
468 	writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
469 	       CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
470 	       CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
471 	       CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
472 	       base + CIO2_REG_LTRVAL23);
473 
474 	for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
475 		writel(0, base + CIO2_REG_CDMABA(i));
476 		writel(0, base + CIO2_REG_CDMAC0(i));
477 		writel(0, base + CIO2_REG_CDMAC1(i));
478 	}
479 
480 	/* Enable DMA */
481 	writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
482 
483 	writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
484 	       FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
485 	       CIO2_CDMAC0_DMA_INTR_ON_FE |
486 	       CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
487 	       CIO2_CDMAC0_DMA_EN |
488 	       CIO2_CDMAC0_DMA_INTR_ON_FS |
489 	       CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
490 
491 	writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
492 	       base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
493 
494 	writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
495 
496 	writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
497 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
498 	       CIO2_PXM_FRF_CFG_MSK_ECC_RE |
499 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
500 	       base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
501 
502 	/* Clear interrupts */
503 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
504 	writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
505 	writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
506 	writel(~0, base + CIO2_REG_INT_STS);
507 
508 	/* Enable devices, starting from the last device in the pipe */
509 	writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
510 	writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
511 
512 	return 0;
513 }
514 
cio2_hw_exit(struct cio2_device * cio2,struct cio2_queue * q)515 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
516 {
517 	struct device *dev = &cio2->pci_dev->dev;
518 	void __iomem *const base = cio2->base;
519 	unsigned int i;
520 	u32 value;
521 	int ret;
522 
523 	/* Disable CSI receiver and MIPI backend devices */
524 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
525 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
526 	writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
527 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
528 
529 	/* Halt DMA */
530 	writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
531 	ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
532 				 value, value & CIO2_CDMAC0_DMA_HALTED,
533 				 4000, 2000000);
534 	if (ret)
535 		dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
536 
537 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
538 		writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
539 		       CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
540 		writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
541 		       CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
542 	}
543 }
544 
cio2_buffer_done(struct cio2_device * cio2,unsigned int dma_chan)545 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
546 {
547 	struct device *dev = &cio2->pci_dev->dev;
548 	struct cio2_queue *q = cio2->cur_queue;
549 	struct cio2_fbpt_entry *entry;
550 	u64 ns = ktime_get_ns();
551 
552 	if (dma_chan >= CIO2_QUEUES) {
553 		dev_err(dev, "bad DMA channel %i\n", dma_chan);
554 		return;
555 	}
556 
557 	entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
558 	if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
559 		dev_warn(dev, "no ready buffers found on DMA channel %u\n",
560 			 dma_chan);
561 		return;
562 	}
563 
564 	/* Find out which buffer(s) are ready */
565 	do {
566 		struct cio2_buffer *b;
567 
568 		b = q->bufs[q->bufs_first];
569 		if (b) {
570 			unsigned int received = entry[1].second_entry.num_of_bytes;
571 			unsigned long payload =
572 				vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
573 
574 			q->bufs[q->bufs_first] = NULL;
575 			atomic_dec(&q->bufs_queued);
576 			dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
577 
578 			b->vbb.vb2_buf.timestamp = ns;
579 			b->vbb.field = V4L2_FIELD_NONE;
580 			b->vbb.sequence = atomic_read(&q->frame_sequence);
581 			if (payload != received)
582 				dev_warn(dev,
583 					 "payload length is %lu, received %u\n",
584 					 payload, received);
585 			vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
586 		}
587 		atomic_inc(&q->frame_sequence);
588 		cio2_fbpt_entry_init_dummy(cio2, entry);
589 		q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
590 		entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
591 	} while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
592 }
593 
cio2_queue_event_sof(struct cio2_device * cio2,struct cio2_queue * q)594 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
595 {
596 	/*
597 	 * For the user space camera control algorithms it is essential
598 	 * to know when the reception of a frame has begun. That's often
599 	 * the best timing information to get from the hardware.
600 	 */
601 	struct v4l2_event event = {
602 		.type = V4L2_EVENT_FRAME_SYNC,
603 		.u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
604 	};
605 
606 	v4l2_event_queue(q->subdev.devnode, &event);
607 }
608 
609 static const char *const cio2_irq_errs[] = {
610 	"single packet header error corrected",
611 	"multiple packet header errors detected",
612 	"payload checksum (CRC) error",
613 	"fifo overflow",
614 	"reserved short packet data type detected",
615 	"reserved long packet data type detected",
616 	"incomplete long packet detected",
617 	"frame sync error",
618 	"line sync error",
619 	"DPHY start of transmission error",
620 	"DPHY synchronization error",
621 	"escape mode error",
622 	"escape mode trigger event",
623 	"escape mode ultra-low power state for data lane(s)",
624 	"escape mode ultra-low power state exit for clock lane",
625 	"inter-frame short packet discarded",
626 	"inter-frame long packet discarded",
627 	"non-matching Long Packet stalled",
628 };
629 
cio2_irq_log_irq_errs(struct device * dev,u8 port,u32 status)630 static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
631 {
632 	unsigned long csi2_status = status;
633 	unsigned int i;
634 
635 	for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
636 		dev_err(dev, "CSI-2 receiver port %i: %s\n",
637 			port, cio2_irq_errs[i]);
638 
639 	if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
640 		dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
641 			 csi2_status, port);
642 }
643 
644 static const char *const cio2_port_errs[] = {
645 	"ECC recoverable",
646 	"DPHY not recoverable",
647 	"ECC not recoverable",
648 	"CRC error",
649 	"INTERFRAMEDATA",
650 	"PKT2SHORT",
651 	"PKT2LONG",
652 };
653 
cio2_irq_log_port_errs(struct device * dev,u8 port,u32 status)654 static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
655 {
656 	unsigned long port_status = status;
657 	unsigned int i;
658 
659 	for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
660 		dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
661 }
662 
cio2_irq_handle_once(struct cio2_device * cio2,u32 int_status)663 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
664 {
665 	struct device *dev = &cio2->pci_dev->dev;
666 	void __iomem *const base = cio2->base;
667 
668 	if (int_status & CIO2_INT_IOOE) {
669 		/*
670 		 * Interrupt on Output Error:
671 		 * 1) SRAM is full and FS received, or
672 		 * 2) An invalid bit detected by DMA.
673 		 */
674 		u32 oe_status, oe_clear;
675 
676 		oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
677 		oe_status = oe_clear;
678 
679 		if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
680 			dev_err(dev, "DMA output error: 0x%x\n",
681 				(oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
682 				>> CIO2_INT_EXT_OE_DMAOE_SHIFT);
683 			oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
684 		}
685 		if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
686 			dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
687 				(oe_status & CIO2_INT_EXT_OE_OES_MASK)
688 				>> CIO2_INT_EXT_OE_OES_SHIFT);
689 			oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
690 		}
691 		writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
692 		if (oe_status)
693 			dev_warn(dev, "unknown interrupt 0x%x on OE\n",
694 				 oe_status);
695 		int_status &= ~CIO2_INT_IOOE;
696 	}
697 
698 	if (int_status & CIO2_INT_IOC_MASK) {
699 		/* DMA IO done -- frame ready */
700 		u32 clr = 0;
701 		unsigned int d;
702 
703 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
704 			if (int_status & CIO2_INT_IOC(d)) {
705 				clr |= CIO2_INT_IOC(d);
706 				cio2_buffer_done(cio2, d);
707 			}
708 		int_status &= ~clr;
709 	}
710 
711 	if (int_status & CIO2_INT_IOS_IOLN_MASK) {
712 		/* DMA IO starts or reached specified line */
713 		u32 clr = 0;
714 		unsigned int d;
715 
716 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
717 			if (int_status & CIO2_INT_IOS_IOLN(d)) {
718 				clr |= CIO2_INT_IOS_IOLN(d);
719 				if (d == CIO2_DMA_CHAN)
720 					cio2_queue_event_sof(cio2,
721 							     cio2->cur_queue);
722 			}
723 		int_status &= ~clr;
724 	}
725 
726 	if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
727 		/* CSI2 receiver (error) interrupt */
728 		unsigned int port;
729 		u32 ie_status;
730 
731 		ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
732 
733 		for (port = 0; port < CIO2_NUM_PORTS; port++) {
734 			u32 port_status = (ie_status >> (port * 8)) & 0xff;
735 
736 			cio2_irq_log_port_errs(dev, port, port_status);
737 
738 			if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
739 				void __iomem *csi_rx_base =
740 						base + CIO2_REG_PIPE_BASE(port);
741 				u32 csi2_status;
742 
743 				csi2_status = readl(csi_rx_base +
744 						CIO2_REG_IRQCTRL_STATUS);
745 
746 				cio2_irq_log_irq_errs(dev, port, csi2_status);
747 
748 				writel(csi2_status,
749 				       csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
750 			}
751 		}
752 
753 		writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
754 
755 		int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
756 	}
757 
758 	if (int_status)
759 		dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
760 }
761 
cio2_irq(int irq,void * cio2_ptr)762 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
763 {
764 	struct cio2_device *cio2 = cio2_ptr;
765 	void __iomem *const base = cio2->base;
766 	struct device *dev = &cio2->pci_dev->dev;
767 	u32 int_status;
768 
769 	int_status = readl(base + CIO2_REG_INT_STS);
770 	dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
771 	if (!int_status)
772 		return IRQ_NONE;
773 
774 	do {
775 		writel(int_status, base + CIO2_REG_INT_STS);
776 		cio2_irq_handle_once(cio2, int_status);
777 		int_status = readl(base + CIO2_REG_INT_STS);
778 		if (int_status)
779 			dev_dbg(dev, "pending status 0x%x\n", int_status);
780 	} while (int_status);
781 
782 	return IRQ_HANDLED;
783 }
784 
785 /**************** Videobuf2 interface ****************/
786 
cio2_vb2_return_all_buffers(struct cio2_queue * q,enum vb2_buffer_state state)787 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
788 					enum vb2_buffer_state state)
789 {
790 	unsigned int i;
791 
792 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
793 		if (q->bufs[i]) {
794 			atomic_dec(&q->bufs_queued);
795 			vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
796 					state);
797 			q->bufs[i] = NULL;
798 		}
799 	}
800 }
801 
cio2_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])802 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
803 				unsigned int *num_buffers,
804 				unsigned int *num_planes,
805 				unsigned int sizes[],
806 				struct device *alloc_devs[])
807 {
808 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
809 	struct device *dev = &cio2->pci_dev->dev;
810 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
811 	unsigned int i;
812 
813 	if (*num_planes && *num_planes < q->format.num_planes)
814 		return -EINVAL;
815 
816 	for (i = 0; i < q->format.num_planes; ++i) {
817 		if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
818 			return -EINVAL;
819 		sizes[i] = q->format.plane_fmt[i].sizeimage;
820 		alloc_devs[i] = dev;
821 	}
822 
823 	*num_planes = q->format.num_planes;
824 	*num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
825 
826 	/* Initialize buffer queue */
827 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
828 		q->bufs[i] = NULL;
829 		cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
830 	}
831 	atomic_set(&q->bufs_queued, 0);
832 	q->bufs_first = 0;
833 	q->bufs_next = 0;
834 
835 	return 0;
836 }
837 
838 /* Called after each buffer is allocated */
cio2_vb2_buf_init(struct vb2_buffer * vb)839 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
840 {
841 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
842 	struct device *dev = &cio2->pci_dev->dev;
843 	struct cio2_buffer *b = to_cio2_buffer(vb);
844 	unsigned int pages = PFN_UP(vb->planes[0].length);
845 	unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
846 	struct sg_table *sg;
847 	struct sg_dma_page_iter sg_iter;
848 	unsigned int i, j;
849 
850 	if (lops <= 0 || lops > CIO2_MAX_LOPS) {
851 		dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
852 			vb->planes[0].length);
853 		return -ENOSPC;		/* Should never happen */
854 	}
855 
856 	memset(b->lop, 0, sizeof(b->lop));
857 	/* Allocate LOP table */
858 	for (i = 0; i < lops; i++) {
859 		b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
860 					       &b->lop_bus_addr[i], GFP_KERNEL);
861 		if (!b->lop[i])
862 			goto fail;
863 	}
864 
865 	/* Fill LOP */
866 	sg = vb2_dma_sg_plane_desc(vb, 0);
867 	if (!sg)
868 		return -ENOMEM;
869 
870 	if (sg->nents && sg->sgl)
871 		b->offset = sg->sgl->offset;
872 
873 	i = j = 0;
874 	for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
875 		if (!pages--)
876 			break;
877 		b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
878 		j++;
879 		if (j == CIO2_LOP_ENTRIES) {
880 			i++;
881 			j = 0;
882 		}
883 	}
884 
885 	b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
886 	return 0;
887 fail:
888 	while (i--)
889 		dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
890 	return -ENOMEM;
891 }
892 
893 /* Transfer buffer ownership to cio2 */
cio2_vb2_buf_queue(struct vb2_buffer * vb)894 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
895 {
896 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
897 	struct device *dev = &cio2->pci_dev->dev;
898 	struct cio2_queue *q =
899 		container_of(vb->vb2_queue, struct cio2_queue, vbq);
900 	struct cio2_buffer *b = to_cio2_buffer(vb);
901 	struct cio2_fbpt_entry *entry;
902 	unsigned long flags;
903 	unsigned int i, j, next = q->bufs_next;
904 	int bufs_queued = atomic_inc_return(&q->bufs_queued);
905 	u32 fbpt_rp;
906 
907 	dev_dbg(dev, "queue buffer %d\n", vb->index);
908 
909 	/*
910 	 * This code queues the buffer to the CIO2 DMA engine, which starts
911 	 * running once streaming has started. It is possible that this code
912 	 * gets pre-empted due to increased CPU load. Upon this, the driver
913 	 * does not get an opportunity to queue new buffers to the CIO2 DMA
914 	 * engine. When the DMA engine encounters an FBPT entry without the
915 	 * VALID bit set, the DMA engine halts, which requires a restart of
916 	 * the DMA engine and sensor, to continue streaming.
917 	 * This is not desired and is highly unlikely given that there are
918 	 * 32 FBPT entries that the DMA engine needs to process, to run into
919 	 * an FBPT entry, without the VALID bit set. We try to mitigate this
920 	 * by disabling interrupts for the duration of this queueing.
921 	 */
922 	local_irq_save(flags);
923 
924 	fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
925 		   >> CIO2_CDMARI_FBPT_RP_SHIFT)
926 		   & CIO2_CDMARI_FBPT_RP_MASK;
927 
928 	/*
929 	 * fbpt_rp is the fbpt entry that the dma is currently working
930 	 * on, but since it could jump to next entry at any time,
931 	 * assume that we might already be there.
932 	 */
933 	fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
934 
935 	if (bufs_queued <= 1 || fbpt_rp == next)
936 		/* Buffers were drained */
937 		next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
938 
939 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
940 		/*
941 		 * We have allocated CIO2_MAX_BUFFERS circularly for the
942 		 * hw, the user has requested N buffer queue. The driver
943 		 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
944 		 * user queues a buffer, there necessarily is a free buffer.
945 		 */
946 		if (!q->bufs[next]) {
947 			q->bufs[next] = b;
948 			entry = &q->fbpt[next * CIO2_MAX_LOPS];
949 			cio2_fbpt_entry_init_buf(cio2, b, entry);
950 			local_irq_restore(flags);
951 			q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
952 			for (j = 0; j < vb->num_planes; j++)
953 				vb2_set_plane_payload(vb, j,
954 					q->format.plane_fmt[j].sizeimage);
955 			return;
956 		}
957 
958 		dev_dbg(dev, "entry %i was full!\n", next);
959 		next = (next + 1) % CIO2_MAX_BUFFERS;
960 	}
961 
962 	local_irq_restore(flags);
963 	dev_err(dev, "error: all cio2 entries were full!\n");
964 	atomic_dec(&q->bufs_queued);
965 	vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
966 }
967 
968 /* Called when each buffer is freed */
cio2_vb2_buf_cleanup(struct vb2_buffer * vb)969 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
970 {
971 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
972 	struct device *dev = &cio2->pci_dev->dev;
973 	struct cio2_buffer *b = to_cio2_buffer(vb);
974 	unsigned int i;
975 
976 	/* Free LOP table */
977 	for (i = 0; i < CIO2_MAX_LOPS; i++) {
978 		if (b->lop[i])
979 			dma_free_coherent(dev, PAGE_SIZE,
980 					  b->lop[i], b->lop_bus_addr[i]);
981 	}
982 }
983 
cio2_vb2_start_streaming(struct vb2_queue * vq,unsigned int count)984 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
985 {
986 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
987 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
988 	struct device *dev = &cio2->pci_dev->dev;
989 	int r;
990 
991 	cio2->cur_queue = q;
992 	atomic_set(&q->frame_sequence, 0);
993 
994 	r = pm_runtime_resume_and_get(dev);
995 	if (r < 0) {
996 		dev_info(dev, "failed to set power %d\n", r);
997 		return r;
998 	}
999 
1000 	r = video_device_pipeline_start(&q->vdev, &q->pipe);
1001 	if (r)
1002 		goto fail_pipeline;
1003 
1004 	r = cio2_hw_init(cio2, q);
1005 	if (r)
1006 		goto fail_hw;
1007 
1008 	/* Start streaming on sensor */
1009 	r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1010 	if (r)
1011 		goto fail_csi2_subdev;
1012 
1013 	cio2->streaming = true;
1014 
1015 	return 0;
1016 
1017 fail_csi2_subdev:
1018 	cio2_hw_exit(cio2, q);
1019 fail_hw:
1020 	video_device_pipeline_stop(&q->vdev);
1021 fail_pipeline:
1022 	dev_dbg(dev, "failed to start streaming (%d)\n", r);
1023 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1024 	pm_runtime_put(dev);
1025 
1026 	return r;
1027 }
1028 
cio2_vb2_stop_streaming(struct vb2_queue * vq)1029 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1030 {
1031 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1032 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1033 	struct device *dev = &cio2->pci_dev->dev;
1034 
1035 	if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1036 		dev_err(dev, "failed to stop sensor streaming\n");
1037 
1038 	cio2_hw_exit(cio2, q);
1039 	synchronize_irq(cio2->pci_dev->irq);
1040 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1041 	video_device_pipeline_stop(&q->vdev);
1042 	pm_runtime_put(dev);
1043 	cio2->streaming = false;
1044 }
1045 
1046 static const struct vb2_ops cio2_vb2_ops = {
1047 	.buf_init = cio2_vb2_buf_init,
1048 	.buf_queue = cio2_vb2_buf_queue,
1049 	.buf_cleanup = cio2_vb2_buf_cleanup,
1050 	.queue_setup = cio2_vb2_queue_setup,
1051 	.start_streaming = cio2_vb2_start_streaming,
1052 	.stop_streaming = cio2_vb2_stop_streaming,
1053 };
1054 
1055 /**************** V4L2 interface ****************/
1056 
cio2_v4l2_querycap(struct file * file,void * fh,struct v4l2_capability * cap)1057 static int cio2_v4l2_querycap(struct file *file, void *fh,
1058 			      struct v4l2_capability *cap)
1059 {
1060 	strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1061 	strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1062 
1063 	return 0;
1064 }
1065 
cio2_v4l2_enum_fmt(struct file * file,void * fh,struct v4l2_fmtdesc * f)1066 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1067 			      struct v4l2_fmtdesc *f)
1068 {
1069 	if (f->index >= ARRAY_SIZE(formats))
1070 		return -EINVAL;
1071 
1072 	f->pixelformat = formats[f->index].fourcc;
1073 
1074 	return 0;
1075 }
1076 
1077 /* The format is validated in cio2_video_link_validate() */
cio2_v4l2_g_fmt(struct file * file,void * fh,struct v4l2_format * f)1078 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1079 {
1080 	struct cio2_queue *q = file_to_cio2_queue(file);
1081 
1082 	f->fmt.pix_mp = q->format;
1083 
1084 	return 0;
1085 }
1086 
cio2_v4l2_try_fmt(struct file * file,void * fh,struct v4l2_format * f)1087 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1088 {
1089 	const struct ipu3_cio2_fmt *fmt;
1090 	struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1091 
1092 	fmt = cio2_find_format(&mpix->pixelformat, NULL);
1093 	if (!fmt)
1094 		fmt = &formats[0];
1095 
1096 	/* Only supports up to 4224x3136 */
1097 	if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1098 		mpix->width = CIO2_IMAGE_MAX_WIDTH;
1099 	if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
1100 		mpix->height = CIO2_IMAGE_MAX_HEIGHT;
1101 
1102 	mpix->num_planes = 1;
1103 	mpix->pixelformat = fmt->fourcc;
1104 	mpix->colorspace = V4L2_COLORSPACE_RAW;
1105 	mpix->field = V4L2_FIELD_NONE;
1106 	mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1107 	mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1108 							mpix->height;
1109 
1110 	/* use default */
1111 	mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1112 	mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1113 	mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1114 
1115 	return 0;
1116 }
1117 
cio2_v4l2_s_fmt(struct file * file,void * fh,struct v4l2_format * f)1118 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1119 {
1120 	struct cio2_queue *q = file_to_cio2_queue(file);
1121 
1122 	cio2_v4l2_try_fmt(file, fh, f);
1123 	q->format = f->fmt.pix_mp;
1124 
1125 	return 0;
1126 }
1127 
1128 static int
cio2_video_enum_input(struct file * file,void * fh,struct v4l2_input * input)1129 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1130 {
1131 	if (input->index > 0)
1132 		return -EINVAL;
1133 
1134 	strscpy(input->name, "camera", sizeof(input->name));
1135 	input->type = V4L2_INPUT_TYPE_CAMERA;
1136 
1137 	return 0;
1138 }
1139 
1140 static int
cio2_video_g_input(struct file * file,void * fh,unsigned int * input)1141 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1142 {
1143 	*input = 0;
1144 
1145 	return 0;
1146 }
1147 
1148 static int
cio2_video_s_input(struct file * file,void * fh,unsigned int input)1149 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1150 {
1151 	return input == 0 ? 0 : -EINVAL;
1152 }
1153 
1154 static const struct v4l2_file_operations cio2_v4l2_fops = {
1155 	.owner = THIS_MODULE,
1156 	.unlocked_ioctl = video_ioctl2,
1157 	.open = v4l2_fh_open,
1158 	.release = vb2_fop_release,
1159 	.poll = vb2_fop_poll,
1160 	.mmap = vb2_fop_mmap,
1161 };
1162 
1163 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1164 	.vidioc_querycap = cio2_v4l2_querycap,
1165 	.vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1166 	.vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1167 	.vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1168 	.vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1169 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
1170 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
1171 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1172 	.vidioc_querybuf = vb2_ioctl_querybuf,
1173 	.vidioc_qbuf = vb2_ioctl_qbuf,
1174 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
1175 	.vidioc_streamon = vb2_ioctl_streamon,
1176 	.vidioc_streamoff = vb2_ioctl_streamoff,
1177 	.vidioc_expbuf = vb2_ioctl_expbuf,
1178 	.vidioc_enum_input = cio2_video_enum_input,
1179 	.vidioc_g_input	= cio2_video_g_input,
1180 	.vidioc_s_input	= cio2_video_s_input,
1181 };
1182 
cio2_subdev_subscribe_event(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)1183 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1184 				       struct v4l2_fh *fh,
1185 				       struct v4l2_event_subscription *sub)
1186 {
1187 	if (sub->type != V4L2_EVENT_FRAME_SYNC)
1188 		return -EINVAL;
1189 
1190 	/* Line number. For now only zero accepted. */
1191 	if (sub->id != 0)
1192 		return -EINVAL;
1193 
1194 	return v4l2_event_subscribe(fh, sub, 0, NULL);
1195 }
1196 
cio2_subdev_open(struct v4l2_subdev * sd,struct v4l2_subdev_fh * fh)1197 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1198 {
1199 	struct v4l2_mbus_framefmt *format;
1200 	const struct v4l2_mbus_framefmt fmt_default = {
1201 		.width = 1936,
1202 		.height = 1096,
1203 		.code = formats[0].mbus_code,
1204 		.field = V4L2_FIELD_NONE,
1205 		.colorspace = V4L2_COLORSPACE_RAW,
1206 		.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1207 		.quantization = V4L2_QUANTIZATION_DEFAULT,
1208 		.xfer_func = V4L2_XFER_FUNC_DEFAULT,
1209 	};
1210 
1211 	/* Initialize try_fmt */
1212 	format = v4l2_subdev_state_get_format(fh->state, CIO2_PAD_SINK);
1213 	*format = fmt_default;
1214 
1215 	/* same as sink */
1216 	format = v4l2_subdev_state_get_format(fh->state, CIO2_PAD_SOURCE);
1217 	*format = fmt_default;
1218 
1219 	return 0;
1220 }
1221 
cio2_subdev_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1222 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1223 			       struct v4l2_subdev_state *sd_state,
1224 			       struct v4l2_subdev_format *fmt)
1225 {
1226 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1227 
1228 	mutex_lock(&q->subdev_lock);
1229 
1230 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1231 		fmt->format = *v4l2_subdev_state_get_format(sd_state,
1232 							    fmt->pad);
1233 	else
1234 		fmt->format = q->subdev_fmt;
1235 
1236 	mutex_unlock(&q->subdev_lock);
1237 
1238 	return 0;
1239 }
1240 
cio2_subdev_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1241 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1242 			       struct v4l2_subdev_state *sd_state,
1243 			       struct v4l2_subdev_format *fmt)
1244 {
1245 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1246 	struct v4l2_mbus_framefmt *mbus;
1247 	u32 mbus_code = fmt->format.code;
1248 	unsigned int i;
1249 
1250 	/*
1251 	 * Only allow setting sink pad format;
1252 	 * source always propagates from sink
1253 	 */
1254 	if (fmt->pad == CIO2_PAD_SOURCE)
1255 		return cio2_subdev_get_fmt(sd, sd_state, fmt);
1256 
1257 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1258 		mbus = v4l2_subdev_state_get_format(sd_state, fmt->pad);
1259 	else
1260 		mbus = &q->subdev_fmt;
1261 
1262 	fmt->format.code = formats[0].mbus_code;
1263 
1264 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
1265 		if (formats[i].mbus_code == mbus_code) {
1266 			fmt->format.code = mbus_code;
1267 			break;
1268 		}
1269 	}
1270 
1271 	fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1272 	fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
1273 	fmt->format.field = V4L2_FIELD_NONE;
1274 
1275 	mutex_lock(&q->subdev_lock);
1276 	*mbus = fmt->format;
1277 	mutex_unlock(&q->subdev_lock);
1278 
1279 	return 0;
1280 }
1281 
cio2_subdev_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_mbus_code_enum * code)1282 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1283 				      struct v4l2_subdev_state *sd_state,
1284 				      struct v4l2_subdev_mbus_code_enum *code)
1285 {
1286 	if (code->index >= ARRAY_SIZE(formats))
1287 		return -EINVAL;
1288 
1289 	code->code = formats[code->index].mbus_code;
1290 	return 0;
1291 }
1292 
cio2_subdev_link_validate_get_format(struct media_pad * pad,struct v4l2_subdev_format * fmt)1293 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1294 						struct v4l2_subdev_format *fmt)
1295 {
1296 	if (is_media_entity_v4l2_subdev(pad->entity)) {
1297 		struct v4l2_subdev *sd =
1298 			media_entity_to_v4l2_subdev(pad->entity);
1299 
1300 		memset(fmt, 0, sizeof(*fmt));
1301 		fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1302 		fmt->pad = pad->index;
1303 		return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1304 	}
1305 
1306 	return -EINVAL;
1307 }
1308 
cio2_video_link_validate(struct media_link * link)1309 static int cio2_video_link_validate(struct media_link *link)
1310 {
1311 	struct media_entity *entity = link->sink->entity;
1312 	struct video_device *vd = media_entity_to_video_device(entity);
1313 	struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1314 	struct cio2_device *cio2 = video_get_drvdata(vd);
1315 	struct device *dev = &cio2->pci_dev->dev;
1316 	struct v4l2_subdev_format source_fmt;
1317 	int ret;
1318 
1319 	if (!media_pad_remote_pad_first(entity->pads)) {
1320 		dev_info(dev, "video node %s pad not connected\n", vd->name);
1321 		return -ENOTCONN;
1322 	}
1323 
1324 	ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1325 	if (ret < 0)
1326 		return 0;
1327 
1328 	if (source_fmt.format.width != q->format.width ||
1329 	    source_fmt.format.height != q->format.height) {
1330 		dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
1331 			q->format.width, q->format.height,
1332 			source_fmt.format.width, source_fmt.format.height);
1333 		return -EINVAL;
1334 	}
1335 
1336 	if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1337 		return -EINVAL;
1338 
1339 	return 0;
1340 }
1341 
1342 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1343 	.subscribe_event = cio2_subdev_subscribe_event,
1344 	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
1345 };
1346 
1347 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1348 	.open = cio2_subdev_open,
1349 };
1350 
1351 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1352 	.link_validate = v4l2_subdev_link_validate_default,
1353 	.get_fmt = cio2_subdev_get_fmt,
1354 	.set_fmt = cio2_subdev_set_fmt,
1355 	.enum_mbus_code = cio2_subdev_enum_mbus_code,
1356 };
1357 
1358 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1359 	.core = &cio2_subdev_core_ops,
1360 	.pad = &cio2_subdev_pad_ops,
1361 };
1362 
1363 /******* V4L2 sub-device asynchronous registration callbacks***********/
1364 
1365 struct sensor_async_subdev {
1366 	struct v4l2_async_connection asd;
1367 	struct csi2_bus_info csi2;
1368 };
1369 
1370 #define to_sensor_asd(__asd)	\
1371 	container_of_const(__asd, struct sensor_async_subdev, asd)
1372 
1373 /* The .bound() notifier callback when a match is found */
cio2_notifier_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_connection * asd)1374 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1375 			       struct v4l2_subdev *sd,
1376 			       struct v4l2_async_connection *asd)
1377 {
1378 	struct cio2_device *cio2 = to_cio2_device(notifier);
1379 	struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1380 	struct cio2_queue *q;
1381 	int ret;
1382 
1383 	if (cio2->queue[s_asd->csi2.port].sensor)
1384 		return -EBUSY;
1385 
1386 	ret = ipu_bridge_instantiate_vcm(sd->dev);
1387 	if (ret)
1388 		return ret;
1389 
1390 	q = &cio2->queue[s_asd->csi2.port];
1391 
1392 	q->csi2 = s_asd->csi2;
1393 	q->sensor = sd;
1394 	q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1395 
1396 	return 0;
1397 }
1398 
1399 /* The .unbind callback */
cio2_notifier_unbind(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_connection * asd)1400 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1401 				 struct v4l2_subdev *sd,
1402 				 struct v4l2_async_connection *asd)
1403 {
1404 	struct cio2_device *cio2 = to_cio2_device(notifier);
1405 	struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1406 
1407 	cio2->queue[s_asd->csi2.port].sensor = NULL;
1408 }
1409 
1410 /* .complete() is called after all subdevices have been located */
cio2_notifier_complete(struct v4l2_async_notifier * notifier)1411 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1412 {
1413 	struct cio2_device *cio2 = to_cio2_device(notifier);
1414 	struct sensor_async_subdev *s_asd;
1415 	struct v4l2_async_connection *asd;
1416 	struct cio2_queue *q;
1417 	int ret;
1418 
1419 	list_for_each_entry(asd, &cio2->notifier.done_list, asc_entry) {
1420 		s_asd = to_sensor_asd(asd);
1421 		q = &cio2->queue[s_asd->csi2.port];
1422 
1423 		ret = v4l2_create_fwnode_links_to_pad(asd->sd,
1424 						      &q->subdev_pads[CIO2_PAD_SINK], 0);
1425 		if (ret)
1426 			return ret;
1427 	}
1428 
1429 	return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1430 }
1431 
1432 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1433 	.bound = cio2_notifier_bound,
1434 	.unbind = cio2_notifier_unbind,
1435 	.complete = cio2_notifier_complete,
1436 };
1437 
cio2_parse_firmware(struct cio2_device * cio2)1438 static int cio2_parse_firmware(struct cio2_device *cio2)
1439 {
1440 	struct device *dev = &cio2->pci_dev->dev;
1441 	unsigned int i;
1442 	int ret;
1443 
1444 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
1445 		struct v4l2_fwnode_endpoint vep = {
1446 			.bus_type = V4L2_MBUS_CSI2_DPHY
1447 		};
1448 		struct sensor_async_subdev *s_asd;
1449 		struct fwnode_handle *ep;
1450 
1451 		ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
1452 						FWNODE_GRAPH_ENDPOINT_NEXT);
1453 		if (!ep)
1454 			continue;
1455 
1456 		ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1457 		if (ret)
1458 			goto err_parse;
1459 
1460 		s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
1461 							struct
1462 							sensor_async_subdev);
1463 		if (IS_ERR(s_asd)) {
1464 			ret = PTR_ERR(s_asd);
1465 			goto err_parse;
1466 		}
1467 
1468 		s_asd->csi2.port = vep.base.port;
1469 		s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1470 
1471 		fwnode_handle_put(ep);
1472 
1473 		continue;
1474 
1475 err_parse:
1476 		fwnode_handle_put(ep);
1477 		return ret;
1478 	}
1479 
1480 	/*
1481 	 * Proceed even without sensors connected to allow the device to
1482 	 * suspend.
1483 	 */
1484 	cio2->notifier.ops = &cio2_async_ops;
1485 	ret = v4l2_async_nf_register(&cio2->notifier);
1486 	if (ret)
1487 		dev_err(dev, "failed to register async notifier : %d\n", ret);
1488 
1489 	return ret;
1490 }
1491 
1492 /**************** Queue initialization ****************/
1493 static const struct media_entity_operations cio2_media_ops = {
1494 	.link_validate = v4l2_subdev_link_validate,
1495 };
1496 
1497 static const struct media_entity_operations cio2_video_entity_ops = {
1498 	.link_validate = cio2_video_link_validate,
1499 };
1500 
cio2_queue_init(struct cio2_device * cio2,struct cio2_queue * q)1501 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1502 {
1503 	static const u32 default_width = 1936;
1504 	static const u32 default_height = 1096;
1505 	const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1506 	struct device *dev = &cio2->pci_dev->dev;
1507 	struct video_device *vdev = &q->vdev;
1508 	struct vb2_queue *vbq = &q->vbq;
1509 	struct v4l2_subdev *subdev = &q->subdev;
1510 	struct v4l2_mbus_framefmt *fmt;
1511 	int r;
1512 
1513 	/* Initialize miscellaneous variables */
1514 	mutex_init(&q->lock);
1515 	mutex_init(&q->subdev_lock);
1516 
1517 	/* Initialize formats to default values */
1518 	fmt = &q->subdev_fmt;
1519 	fmt->width = default_width;
1520 	fmt->height = default_height;
1521 	fmt->code = dflt_fmt.mbus_code;
1522 	fmt->field = V4L2_FIELD_NONE;
1523 
1524 	q->format.width = default_width;
1525 	q->format.height = default_height;
1526 	q->format.pixelformat = dflt_fmt.fourcc;
1527 	q->format.colorspace = V4L2_COLORSPACE_RAW;
1528 	q->format.field = V4L2_FIELD_NONE;
1529 	q->format.num_planes = 1;
1530 	q->format.plane_fmt[0].bytesperline =
1531 				cio2_bytesperline(q->format.width);
1532 	q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1533 						q->format.height;
1534 
1535 	/* Initialize fbpt */
1536 	r = cio2_fbpt_init(cio2, q);
1537 	if (r)
1538 		goto fail_fbpt;
1539 
1540 	/* Initialize media entities */
1541 	q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1542 		MEDIA_PAD_FL_MUST_CONNECT;
1543 	q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1544 	subdev->entity.ops = &cio2_media_ops;
1545 	subdev->internal_ops = &cio2_subdev_internal_ops;
1546 	r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1547 	if (r) {
1548 		dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
1549 		goto fail_subdev_media_entity;
1550 	}
1551 
1552 	q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1553 	vdev->entity.ops = &cio2_video_entity_ops;
1554 	r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1555 	if (r) {
1556 		dev_err(dev, "failed initialize videodev media entity (%d)\n",
1557 			r);
1558 		goto fail_vdev_media_entity;
1559 	}
1560 
1561 	/* Initialize subdev */
1562 	v4l2_subdev_init(subdev, &cio2_subdev_ops);
1563 	subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1564 	subdev->owner = THIS_MODULE;
1565 	subdev->dev = dev;
1566 	snprintf(subdev->name, sizeof(subdev->name),
1567 		 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1568 	subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1569 	v4l2_set_subdevdata(subdev, cio2);
1570 	r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1571 	if (r) {
1572 		dev_err(dev, "failed initialize subdev (%d)\n", r);
1573 		goto fail_subdev;
1574 	}
1575 
1576 	/* Initialize vbq */
1577 	vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1578 	vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1579 	vbq->ops = &cio2_vb2_ops;
1580 	vbq->mem_ops = &vb2_dma_sg_memops;
1581 	vbq->buf_struct_size = sizeof(struct cio2_buffer);
1582 	vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1583 	vbq->min_queued_buffers = 1;
1584 	vbq->drv_priv = cio2;
1585 	vbq->lock = &q->lock;
1586 	r = vb2_queue_init(vbq);
1587 	if (r) {
1588 		dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
1589 		goto fail_subdev;
1590 	}
1591 
1592 	/* Initialize vdev */
1593 	snprintf(vdev->name, sizeof(vdev->name),
1594 		 "%s %td", CIO2_NAME, q - cio2->queue);
1595 	vdev->release = video_device_release_empty;
1596 	vdev->fops = &cio2_v4l2_fops;
1597 	vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1598 	vdev->lock = &cio2->lock;
1599 	vdev->v4l2_dev = &cio2->v4l2_dev;
1600 	vdev->queue = &q->vbq;
1601 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1602 	video_set_drvdata(vdev, cio2);
1603 	r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1604 	if (r) {
1605 		dev_err(dev, "failed to register video device (%d)\n", r);
1606 		goto fail_vdev;
1607 	}
1608 
1609 	/* Create link from CIO2 subdev to output node */
1610 	r = media_create_pad_link(
1611 		&subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1612 		MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1613 	if (r)
1614 		goto fail_link;
1615 
1616 	return 0;
1617 
1618 fail_link:
1619 	vb2_video_unregister_device(&q->vdev);
1620 fail_vdev:
1621 	v4l2_device_unregister_subdev(subdev);
1622 fail_subdev:
1623 	media_entity_cleanup(&vdev->entity);
1624 fail_vdev_media_entity:
1625 	media_entity_cleanup(&subdev->entity);
1626 fail_subdev_media_entity:
1627 	cio2_fbpt_exit(q, dev);
1628 fail_fbpt:
1629 	mutex_destroy(&q->subdev_lock);
1630 	mutex_destroy(&q->lock);
1631 
1632 	return r;
1633 }
1634 
cio2_queue_exit(struct cio2_device * cio2,struct cio2_queue * q)1635 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1636 {
1637 	vb2_video_unregister_device(&q->vdev);
1638 	media_entity_cleanup(&q->vdev.entity);
1639 	v4l2_device_unregister_subdev(&q->subdev);
1640 	media_entity_cleanup(&q->subdev.entity);
1641 	cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1642 	mutex_destroy(&q->subdev_lock);
1643 	mutex_destroy(&q->lock);
1644 }
1645 
cio2_queues_init(struct cio2_device * cio2)1646 static int cio2_queues_init(struct cio2_device *cio2)
1647 {
1648 	int i, r;
1649 
1650 	for (i = 0; i < CIO2_QUEUES; i++) {
1651 		r = cio2_queue_init(cio2, &cio2->queue[i]);
1652 		if (r)
1653 			break;
1654 	}
1655 
1656 	if (i == CIO2_QUEUES)
1657 		return 0;
1658 
1659 	for (i--; i >= 0; i--)
1660 		cio2_queue_exit(cio2, &cio2->queue[i]);
1661 
1662 	return r;
1663 }
1664 
cio2_queues_exit(struct cio2_device * cio2)1665 static void cio2_queues_exit(struct cio2_device *cio2)
1666 {
1667 	unsigned int i;
1668 
1669 	for (i = 0; i < CIO2_QUEUES; i++)
1670 		cio2_queue_exit(cio2, &cio2->queue[i]);
1671 }
1672 
1673 /**************** PCI interface ****************/
1674 
cio2_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)1675 static int cio2_pci_probe(struct pci_dev *pci_dev,
1676 			  const struct pci_device_id *id)
1677 {
1678 	struct device *dev = &pci_dev->dev;
1679 	struct cio2_device *cio2;
1680 	int r;
1681 
1682 	/*
1683 	 * On some platforms no connections to sensors are defined in firmware,
1684 	 * if the device has no endpoints then we can try to build those as
1685 	 * software_nodes parsed from SSDB.
1686 	 */
1687 	r = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
1688 	if (r)
1689 		return r;
1690 
1691 	cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
1692 	if (!cio2)
1693 		return -ENOMEM;
1694 	cio2->pci_dev = pci_dev;
1695 
1696 	r = pcim_enable_device(pci_dev);
1697 	if (r) {
1698 		dev_err(dev, "failed to enable device (%d)\n", r);
1699 		return r;
1700 	}
1701 
1702 	dev_info(dev, "device 0x%x (rev: 0x%x)\n",
1703 		 pci_dev->device, pci_dev->revision);
1704 
1705 	r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1706 	if (r) {
1707 		dev_err(dev, "failed to remap I/O memory (%d)\n", r);
1708 		return -ENODEV;
1709 	}
1710 
1711 	cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1712 
1713 	pci_set_drvdata(pci_dev, cio2);
1714 
1715 	pci_set_master(pci_dev);
1716 
1717 	r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
1718 	if (r) {
1719 		dev_err(dev, "failed to set DMA mask (%d)\n", r);
1720 		return -ENODEV;
1721 	}
1722 
1723 	r = pci_enable_msi(pci_dev);
1724 	if (r) {
1725 		dev_err(dev, "failed to enable MSI (%d)\n", r);
1726 		return r;
1727 	}
1728 
1729 	r = cio2_fbpt_init_dummy(cio2);
1730 	if (r)
1731 		return r;
1732 
1733 	mutex_init(&cio2->lock);
1734 
1735 	cio2->media_dev.dev = dev;
1736 	strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1737 		sizeof(cio2->media_dev.model));
1738 	cio2->media_dev.hw_revision = 0;
1739 
1740 	media_device_init(&cio2->media_dev);
1741 	r = media_device_register(&cio2->media_dev);
1742 	if (r < 0)
1743 		goto fail_mutex_destroy;
1744 
1745 	cio2->v4l2_dev.mdev = &cio2->media_dev;
1746 	r = v4l2_device_register(dev, &cio2->v4l2_dev);
1747 	if (r) {
1748 		dev_err(dev, "failed to register V4L2 device (%d)\n", r);
1749 		goto fail_media_device_unregister;
1750 	}
1751 
1752 	r = cio2_queues_init(cio2);
1753 	if (r)
1754 		goto fail_v4l2_device_unregister;
1755 
1756 	v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
1757 
1758 	r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
1759 			     CIO2_NAME, cio2);
1760 	if (r) {
1761 		dev_err(dev, "failed to request IRQ (%d)\n", r);
1762 		goto fail_clean_notifier;
1763 	}
1764 
1765 	/* Register notifier for subdevices we care */
1766 	r = cio2_parse_firmware(cio2);
1767 	if (r)
1768 		goto fail_clean_notifier;
1769 
1770 	pm_runtime_put_noidle(dev);
1771 	pm_runtime_allow(dev);
1772 
1773 	return 0;
1774 
1775 fail_clean_notifier:
1776 	v4l2_async_nf_unregister(&cio2->notifier);
1777 	v4l2_async_nf_cleanup(&cio2->notifier);
1778 	cio2_queues_exit(cio2);
1779 fail_v4l2_device_unregister:
1780 	v4l2_device_unregister(&cio2->v4l2_dev);
1781 fail_media_device_unregister:
1782 	media_device_unregister(&cio2->media_dev);
1783 	media_device_cleanup(&cio2->media_dev);
1784 fail_mutex_destroy:
1785 	mutex_destroy(&cio2->lock);
1786 	cio2_fbpt_exit_dummy(cio2);
1787 
1788 	return r;
1789 }
1790 
cio2_pci_remove(struct pci_dev * pci_dev)1791 static void cio2_pci_remove(struct pci_dev *pci_dev)
1792 {
1793 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1794 
1795 	media_device_unregister(&cio2->media_dev);
1796 	v4l2_async_nf_unregister(&cio2->notifier);
1797 	v4l2_async_nf_cleanup(&cio2->notifier);
1798 	cio2_queues_exit(cio2);
1799 	cio2_fbpt_exit_dummy(cio2);
1800 	v4l2_device_unregister(&cio2->v4l2_dev);
1801 	media_device_cleanup(&cio2->media_dev);
1802 	mutex_destroy(&cio2->lock);
1803 
1804 	pm_runtime_forbid(&pci_dev->dev);
1805 	pm_runtime_get_noresume(&pci_dev->dev);
1806 }
1807 
cio2_runtime_suspend(struct device * dev)1808 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1809 {
1810 	struct pci_dev *pci_dev = to_pci_dev(dev);
1811 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1812 	void __iomem *const base = cio2->base;
1813 
1814 	writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1815 	dev_dbg(dev, "cio2 runtime suspend.\n");
1816 
1817 	return 0;
1818 }
1819 
cio2_runtime_resume(struct device * dev)1820 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1821 {
1822 	struct pci_dev *pci_dev = to_pci_dev(dev);
1823 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1824 	void __iomem *const base = cio2->base;
1825 
1826 	writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1827 	dev_dbg(dev, "cio2 runtime resume.\n");
1828 
1829 	return 0;
1830 }
1831 
1832 /*
1833  * Helper function to advance all the elements of a circular buffer by "start"
1834  * positions
1835  */
arrange(void * ptr,size_t elem_size,size_t elems,size_t start)1836 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1837 {
1838 	struct {
1839 		size_t begin, end;
1840 	} arr[2] = {
1841 		{ 0, start - 1 },
1842 		{ start, elems - 1 },
1843 	};
1844 
1845 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1846 
1847 	/* Loop as long as we have out-of-place entries */
1848 	while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1849 		size_t size0, i;
1850 
1851 		/*
1852 		 * Find the number of entries that can be arranged on this
1853 		 * iteration.
1854 		 */
1855 		size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1856 
1857 		/* Swap the entries in two parts of the array. */
1858 		for (i = 0; i < size0; i++) {
1859 			u8 *d = ptr + elem_size * (arr[1].begin + i);
1860 			u8 *s = ptr + elem_size * (arr[0].begin + i);
1861 			size_t j;
1862 
1863 			for (j = 0; j < elem_size; j++)
1864 				swap(d[j], s[j]);
1865 		}
1866 
1867 		if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1868 			/* The end of the first array remains unarranged. */
1869 			arr[0].begin += size0;
1870 		} else {
1871 			/*
1872 			 * The first array is fully arranged so we proceed
1873 			 * handling the next one.
1874 			 */
1875 			arr[0].begin = arr[1].begin;
1876 			arr[0].end = arr[1].begin + size0 - 1;
1877 			arr[1].begin += size0;
1878 		}
1879 	}
1880 }
1881 
cio2_fbpt_rearrange(struct cio2_device * cio2,struct cio2_queue * q)1882 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1883 {
1884 	unsigned int i, j;
1885 
1886 	for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1887 		i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1888 		if (q->bufs[j])
1889 			break;
1890 
1891 	if (i == CIO2_MAX_BUFFERS)
1892 		return;
1893 
1894 	if (j) {
1895 		arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1896 			CIO2_MAX_BUFFERS, j);
1897 		arrange(q->bufs, sizeof(struct cio2_buffer *),
1898 			CIO2_MAX_BUFFERS, j);
1899 	}
1900 
1901 	/*
1902 	 * DMA clears the valid bit when accessing the buffer.
1903 	 * When stopping stream in suspend callback, some of the buffers
1904 	 * may be in invalid state. After resume, when DMA meets the invalid
1905 	 * buffer, it will halt and stop receiving new data.
1906 	 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1907 	 */
1908 	for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1909 		cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1910 }
1911 
cio2_suspend(struct device * dev)1912 static int __maybe_unused cio2_suspend(struct device *dev)
1913 {
1914 	struct pci_dev *pci_dev = to_pci_dev(dev);
1915 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1916 	struct cio2_queue *q = cio2->cur_queue;
1917 	int r;
1918 
1919 	dev_dbg(dev, "cio2 suspend\n");
1920 	if (!cio2->streaming)
1921 		return 0;
1922 
1923 	/* Stop stream */
1924 	r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
1925 	if (r) {
1926 		dev_err(dev, "failed to stop sensor streaming\n");
1927 		return r;
1928 	}
1929 
1930 	cio2_hw_exit(cio2, q);
1931 	synchronize_irq(pci_dev->irq);
1932 
1933 	pm_runtime_force_suspend(dev);
1934 
1935 	/*
1936 	 * Upon resume, hw starts to process the fbpt entries from beginning,
1937 	 * so relocate the queued buffs to the fbpt head before suspend.
1938 	 */
1939 	cio2_fbpt_rearrange(cio2, q);
1940 	q->bufs_first = 0;
1941 	q->bufs_next = 0;
1942 
1943 	return 0;
1944 }
1945 
cio2_resume(struct device * dev)1946 static int __maybe_unused cio2_resume(struct device *dev)
1947 {
1948 	struct cio2_device *cio2 = dev_get_drvdata(dev);
1949 	struct cio2_queue *q = cio2->cur_queue;
1950 	int r;
1951 
1952 	dev_dbg(dev, "cio2 resume\n");
1953 	if (!cio2->streaming)
1954 		return 0;
1955 	/* Start stream */
1956 	r = pm_runtime_force_resume(dev);
1957 	if (r < 0) {
1958 		dev_err(dev, "failed to set power %d\n", r);
1959 		return r;
1960 	}
1961 
1962 	r = cio2_hw_init(cio2, q);
1963 	if (r) {
1964 		dev_err(dev, "fail to init cio2 hw\n");
1965 		return r;
1966 	}
1967 
1968 	r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1969 	if (r) {
1970 		dev_err(dev, "fail to start sensor streaming\n");
1971 		cio2_hw_exit(cio2, q);
1972 	}
1973 
1974 	return r;
1975 }
1976 
1977 static const struct dev_pm_ops cio2_pm_ops = {
1978 	SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
1979 	SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
1980 };
1981 
1982 static const struct pci_device_id cio2_pci_id_table[] = {
1983 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
1984 	{ }
1985 };
1986 
1987 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
1988 
1989 static struct pci_driver cio2_pci_driver = {
1990 	.name = CIO2_NAME,
1991 	.id_table = cio2_pci_id_table,
1992 	.probe = cio2_pci_probe,
1993 	.remove = cio2_pci_remove,
1994 	.driver = {
1995 		.pm = &cio2_pm_ops,
1996 	},
1997 };
1998 
1999 module_pci_driver(cio2_pci_driver);
2000 
2001 MODULE_AUTHOR("Tuukka Toivonen");
2002 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2003 MODULE_AUTHOR("Jian Xu Zheng");
2004 MODULE_AUTHOR("Yuning Pu");
2005 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2006 MODULE_LICENSE("GPL v2");
2007 MODULE_DESCRIPTION("IPU3 CIO2 driver");
2008 MODULE_IMPORT_NS("INTEL_IPU_BRIDGE");
2009