1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * isp.c
4  *
5  * TI OMAP3 ISP - Core
6  *
7  * Copyright (C) 2006-2010 Nokia Corporation
8  * Copyright (C) 2007-2009 Texas Instruments, Inc.
9  *
10  * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
11  *	     Sakari Ailus <sakari.ailus@iki.fi>
12  *
13  * Contributors:
14  *	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
15  *	Sakari Ailus <sakari.ailus@iki.fi>
16  *	David Cohen <dacohen@gmail.com>
17  *	Stanimir Varbanov <svarbanov@mm-sol.com>
18  *	Vimarsh Zutshi <vimarsh.zutshi@gmail.com>
19  *	Tuukka Toivonen <tuukkat76@gmail.com>
20  *	Sergio Aguirre <saaguirre@ti.com>
21  *	Antti Koskipaa <akoskipa@gmail.com>
22  *	Ivan T. Ivanov <iivanov@mm-sol.com>
23  *	RaniSuneela <r-m@ti.com>
24  *	Atanas Filipov <afilipov@mm-sol.com>
25  *	Gjorgji Rosikopulos <grosikopulos@mm-sol.com>
26  *	Hiroshi DOYU <hiroshi.doyu@nokia.com>
27  *	Nayden Kanchev <nkanchev@mm-sol.com>
28  *	Phil Carmody <ext-phil.2.carmody@nokia.com>
29  *	Artem Bityutskiy <artem.bityutskiy@nokia.com>
30  *	Dominic Curran <dcurran@ti.com>
31  *	Ilkka Myllyperkio <ilkka.myllyperkio@sofica.fi>
32  *	Pallavi Kulkarni <p-kulkarni@ti.com>
33  *	Vaibhav Hiremath <hvaibhav@ti.com>
34  *	Mohit Jalori <mjalori@ti.com>
35  *	Sameer Venkatraman <sameerv@ti.com>
36  *	Senthilvadivu Guruswamy <svadivu@ti.com>
37  *	Thara Gopinath <thara@ti.com>
38  *	Toni Leinonen <toni.leinonen@nokia.com>
39  *	Troy Laramy <t-laramy@ti.com>
40  */
41 
42 #include <linux/clk.h>
43 #include <linux/clkdev.h>
44 #include <linux/delay.h>
45 #include <linux/device.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/i2c.h>
48 #include <linux/interrupt.h>
49 #include <linux/mfd/syscon.h>
50 #include <linux/module.h>
51 #include <linux/omap-iommu.h>
52 #include <linux/platform_device.h>
53 #include <linux/property.h>
54 #include <linux/regulator/consumer.h>
55 #include <linux/slab.h>
56 #include <linux/sched.h>
57 #include <linux/vmalloc.h>
58 
59 #ifdef CONFIG_ARM_DMA_USE_IOMMU
60 #include <asm/dma-iommu.h>
61 #endif
62 
63 #include <media/v4l2-common.h>
64 #include <media/v4l2-fwnode.h>
65 #include <media/v4l2-device.h>
66 #include <media/v4l2-mc.h>
67 
68 #include "isp.h"
69 #include "ispreg.h"
70 #include "ispccdc.h"
71 #include "isppreview.h"
72 #include "ispresizer.h"
73 #include "ispcsi2.h"
74 #include "ispccp2.h"
75 #include "isph3a.h"
76 #include "isphist.h"
77 
78 static unsigned int autoidle;
79 module_param(autoidle, int, 0444);
80 MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support");
81 
82 static void isp_save_ctx(struct isp_device *isp);
83 
84 static void isp_restore_ctx(struct isp_device *isp);
85 
86 static const struct isp_res_mapping isp_res_maps[] = {
87 	{
88 		.isp_rev = ISP_REVISION_2_0,
89 		.offset = {
90 			/* first MMIO area */
91 			0x0000, /* base, len 0x0070 */
92 			0x0400, /* ccp2, len 0x01f0 */
93 			0x0600, /* ccdc, len 0x00a8 */
94 			0x0a00, /* hist, len 0x0048 */
95 			0x0c00, /* h3a, len 0x0060 */
96 			0x0e00, /* preview, len 0x00a0 */
97 			0x1000, /* resizer, len 0x00ac */
98 			0x1200, /* sbl, len 0x00fc */
99 			/* second MMIO area */
100 			0x0000, /* csi2a, len 0x0170 */
101 			0x0170, /* csiphy2, len 0x000c */
102 		},
103 		.phy_type = ISP_PHY_TYPE_3430,
104 	},
105 	{
106 		.isp_rev = ISP_REVISION_15_0,
107 		.offset = {
108 			/* first MMIO area */
109 			0x0000, /* base, len 0x0070 */
110 			0x0400, /* ccp2, len 0x01f0 */
111 			0x0600, /* ccdc, len 0x00a8 */
112 			0x0a00, /* hist, len 0x0048 */
113 			0x0c00, /* h3a, len 0x0060 */
114 			0x0e00, /* preview, len 0x00a0 */
115 			0x1000, /* resizer, len 0x00ac */
116 			0x1200, /* sbl, len 0x00fc */
117 			/* second MMIO area */
118 			0x0000, /* csi2a, len 0x0170 (1st area) */
119 			0x0170, /* csiphy2, len 0x000c */
120 			0x01c0, /* csi2a, len 0x0040 (2nd area) */
121 			0x0400, /* csi2c, len 0x0170 (1st area) */
122 			0x0570, /* csiphy1, len 0x000c */
123 			0x05c0, /* csi2c, len 0x0040 (2nd area) */
124 		},
125 		.phy_type = ISP_PHY_TYPE_3630,
126 	},
127 };
128 
129 /* Structure for saving/restoring ISP module registers */
130 static struct isp_reg isp_reg_list[] = {
131 	{OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG, 0},
132 	{OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 0},
133 	{OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, 0},
134 	{0, ISP_TOK_TERM, 0}
135 };
136 
137 /*
138  * omap3isp_flush - Post pending L3 bus writes by doing a register readback
139  * @isp: OMAP3 ISP device
140  *
141  * In order to force posting of pending writes, we need to write and
142  * readback the same register, in this case the revision register.
143  *
144  * See this link for reference:
145  *   https://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
146  */
omap3isp_flush(struct isp_device * isp)147 void omap3isp_flush(struct isp_device *isp)
148 {
149 	isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
150 	isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
151 }
152 
153 /* -----------------------------------------------------------------------------
154  * XCLK
155  */
156 
157 #define to_isp_xclk(_hw)	container_of(_hw, struct isp_xclk, hw)
158 
isp_xclk_update(struct isp_xclk * xclk,u32 divider)159 static void isp_xclk_update(struct isp_xclk *xclk, u32 divider)
160 {
161 	switch (xclk->id) {
162 	case ISP_XCLK_A:
163 		isp_reg_clr_set(xclk->isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
164 				ISPTCTRL_CTRL_DIVA_MASK,
165 				divider << ISPTCTRL_CTRL_DIVA_SHIFT);
166 		break;
167 	case ISP_XCLK_B:
168 		isp_reg_clr_set(xclk->isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
169 				ISPTCTRL_CTRL_DIVB_MASK,
170 				divider << ISPTCTRL_CTRL_DIVB_SHIFT);
171 		break;
172 	}
173 }
174 
isp_xclk_prepare(struct clk_hw * hw)175 static int isp_xclk_prepare(struct clk_hw *hw)
176 {
177 	struct isp_xclk *xclk = to_isp_xclk(hw);
178 
179 	omap3isp_get(xclk->isp);
180 
181 	return 0;
182 }
183 
isp_xclk_unprepare(struct clk_hw * hw)184 static void isp_xclk_unprepare(struct clk_hw *hw)
185 {
186 	struct isp_xclk *xclk = to_isp_xclk(hw);
187 
188 	omap3isp_put(xclk->isp);
189 }
190 
isp_xclk_enable(struct clk_hw * hw)191 static int isp_xclk_enable(struct clk_hw *hw)
192 {
193 	struct isp_xclk *xclk = to_isp_xclk(hw);
194 	unsigned long flags;
195 
196 	spin_lock_irqsave(&xclk->lock, flags);
197 	isp_xclk_update(xclk, xclk->divider);
198 	xclk->enabled = true;
199 	spin_unlock_irqrestore(&xclk->lock, flags);
200 
201 	return 0;
202 }
203 
isp_xclk_disable(struct clk_hw * hw)204 static void isp_xclk_disable(struct clk_hw *hw)
205 {
206 	struct isp_xclk *xclk = to_isp_xclk(hw);
207 	unsigned long flags;
208 
209 	spin_lock_irqsave(&xclk->lock, flags);
210 	isp_xclk_update(xclk, 0);
211 	xclk->enabled = false;
212 	spin_unlock_irqrestore(&xclk->lock, flags);
213 }
214 
isp_xclk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)215 static unsigned long isp_xclk_recalc_rate(struct clk_hw *hw,
216 					  unsigned long parent_rate)
217 {
218 	struct isp_xclk *xclk = to_isp_xclk(hw);
219 
220 	return parent_rate / xclk->divider;
221 }
222 
isp_xclk_calc_divider(unsigned long * rate,unsigned long parent_rate)223 static u32 isp_xclk_calc_divider(unsigned long *rate, unsigned long parent_rate)
224 {
225 	u32 divider;
226 
227 	if (*rate >= parent_rate) {
228 		*rate = parent_rate;
229 		return ISPTCTRL_CTRL_DIV_BYPASS;
230 	}
231 
232 	if (*rate == 0)
233 		*rate = 1;
234 
235 	divider = DIV_ROUND_CLOSEST(parent_rate, *rate);
236 	if (divider >= ISPTCTRL_CTRL_DIV_BYPASS)
237 		divider = ISPTCTRL_CTRL_DIV_BYPASS - 1;
238 
239 	*rate = parent_rate / divider;
240 	return divider;
241 }
242 
isp_xclk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)243 static long isp_xclk_round_rate(struct clk_hw *hw, unsigned long rate,
244 				unsigned long *parent_rate)
245 {
246 	isp_xclk_calc_divider(&rate, *parent_rate);
247 	return rate;
248 }
249 
isp_xclk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)250 static int isp_xclk_set_rate(struct clk_hw *hw, unsigned long rate,
251 			     unsigned long parent_rate)
252 {
253 	struct isp_xclk *xclk = to_isp_xclk(hw);
254 	unsigned long flags;
255 	u32 divider;
256 
257 	divider = isp_xclk_calc_divider(&rate, parent_rate);
258 
259 	spin_lock_irqsave(&xclk->lock, flags);
260 
261 	xclk->divider = divider;
262 	if (xclk->enabled)
263 		isp_xclk_update(xclk, divider);
264 
265 	spin_unlock_irqrestore(&xclk->lock, flags);
266 
267 	dev_dbg(xclk->isp->dev, "%s: cam_xclk%c set to %lu Hz (div %u)\n",
268 		__func__, xclk->id == ISP_XCLK_A ? 'a' : 'b', rate, divider);
269 	return 0;
270 }
271 
272 static const struct clk_ops isp_xclk_ops = {
273 	.prepare = isp_xclk_prepare,
274 	.unprepare = isp_xclk_unprepare,
275 	.enable = isp_xclk_enable,
276 	.disable = isp_xclk_disable,
277 	.recalc_rate = isp_xclk_recalc_rate,
278 	.round_rate = isp_xclk_round_rate,
279 	.set_rate = isp_xclk_set_rate,
280 };
281 
282 static const char *isp_xclk_parent_name = "cam_mclk";
283 
isp_xclk_src_get(struct of_phandle_args * clkspec,void * data)284 static struct clk *isp_xclk_src_get(struct of_phandle_args *clkspec, void *data)
285 {
286 	unsigned int idx = clkspec->args[0];
287 	struct isp_device *isp = data;
288 
289 	if (idx >= ARRAY_SIZE(isp->xclks))
290 		return ERR_PTR(-ENOENT);
291 
292 	return isp->xclks[idx].clk;
293 }
294 
isp_xclk_init(struct isp_device * isp)295 static int isp_xclk_init(struct isp_device *isp)
296 {
297 	struct device_node *np = isp->dev->of_node;
298 	struct clk_init_data init = {};
299 	unsigned int i;
300 
301 	for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
302 		isp->xclks[i].clk = ERR_PTR(-EINVAL);
303 
304 	for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
305 		struct isp_xclk *xclk = &isp->xclks[i];
306 
307 		xclk->isp = isp;
308 		xclk->id = i == 0 ? ISP_XCLK_A : ISP_XCLK_B;
309 		xclk->divider = 1;
310 		spin_lock_init(&xclk->lock);
311 
312 		init.name = i == 0 ? "cam_xclka" : "cam_xclkb";
313 		init.ops = &isp_xclk_ops;
314 		init.parent_names = &isp_xclk_parent_name;
315 		init.num_parents = 1;
316 
317 		xclk->hw.init = &init;
318 		/*
319 		 * The first argument is NULL in order to avoid circular
320 		 * reference, as this driver takes reference on the
321 		 * sensor subdevice modules and the sensors would take
322 		 * reference on this module through clk_get().
323 		 */
324 		xclk->clk = clk_register(NULL, &xclk->hw);
325 		if (IS_ERR(xclk->clk))
326 			return PTR_ERR(xclk->clk);
327 	}
328 
329 	if (np)
330 		of_clk_add_provider(np, isp_xclk_src_get, isp);
331 
332 	return 0;
333 }
334 
isp_xclk_cleanup(struct isp_device * isp)335 static void isp_xclk_cleanup(struct isp_device *isp)
336 {
337 	struct device_node *np = isp->dev->of_node;
338 	unsigned int i;
339 
340 	if (np)
341 		of_clk_del_provider(np);
342 
343 	for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) {
344 		struct isp_xclk *xclk = &isp->xclks[i];
345 
346 		if (!IS_ERR(xclk->clk))
347 			clk_unregister(xclk->clk);
348 	}
349 }
350 
351 /* -----------------------------------------------------------------------------
352  * Interrupts
353  */
354 
355 /*
356  * isp_enable_interrupts - Enable ISP interrupts.
357  * @isp: OMAP3 ISP device
358  */
isp_enable_interrupts(struct isp_device * isp)359 static void isp_enable_interrupts(struct isp_device *isp)
360 {
361 	static const u32 irq = IRQ0ENABLE_CSIA_IRQ
362 			     | IRQ0ENABLE_CSIB_IRQ
363 			     | IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ
364 			     | IRQ0ENABLE_CCDC_LSC_DONE_IRQ
365 			     | IRQ0ENABLE_CCDC_VD0_IRQ
366 			     | IRQ0ENABLE_CCDC_VD1_IRQ
367 			     | IRQ0ENABLE_HS_VS_IRQ
368 			     | IRQ0ENABLE_HIST_DONE_IRQ
369 			     | IRQ0ENABLE_H3A_AWB_DONE_IRQ
370 			     | IRQ0ENABLE_H3A_AF_DONE_IRQ
371 			     | IRQ0ENABLE_PRV_DONE_IRQ
372 			     | IRQ0ENABLE_RSZ_DONE_IRQ;
373 
374 	isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
375 	isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
376 }
377 
378 /*
379  * isp_disable_interrupts - Disable ISP interrupts.
380  * @isp: OMAP3 ISP device
381  */
isp_disable_interrupts(struct isp_device * isp)382 static void isp_disable_interrupts(struct isp_device *isp)
383 {
384 	isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE);
385 }
386 
387 /*
388  * isp_core_init - ISP core settings
389  * @isp: OMAP3 ISP device
390  * @idle: Consider idle state.
391  *
392  * Set the power settings for the ISP and SBL bus and configure the HS/VS
393  * interrupt source.
394  *
395  * We need to configure the HS/VS interrupt source before interrupts get
396  * enabled, as the sensor might be free-running and the ISP default setting
397  * (HS edge) would put an unnecessary burden on the CPU.
398  */
isp_core_init(struct isp_device * isp,int idle)399 static void isp_core_init(struct isp_device *isp, int idle)
400 {
401 	isp_reg_writel(isp,
402 		       ((idle ? ISP_SYSCONFIG_MIDLEMODE_SMARTSTANDBY :
403 				ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY) <<
404 			ISP_SYSCONFIG_MIDLEMODE_SHIFT) |
405 			((isp->revision == ISP_REVISION_15_0) ?
406 			  ISP_SYSCONFIG_AUTOIDLE : 0),
407 		       OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
408 
409 	isp_reg_writel(isp,
410 		       (isp->autoidle ? ISPCTRL_SBL_AUTOIDLE : 0) |
411 		       ISPCTRL_SYNC_DETECT_VSRISE,
412 		       OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
413 }
414 
415 /*
416  * Configure the bridge and lane shifter. Valid inputs are
417  *
418  * CCDC_INPUT_PARALLEL: Parallel interface
419  * CCDC_INPUT_CSI2A: CSI2a receiver
420  * CCDC_INPUT_CCP2B: CCP2b receiver
421  * CCDC_INPUT_CSI2C: CSI2c receiver
422  *
423  * The bridge and lane shifter are configured according to the selected input
424  * and the ISP platform data.
425  */
omap3isp_configure_bridge(struct isp_device * isp,enum ccdc_input_entity input,const struct isp_parallel_cfg * parcfg,unsigned int shift,unsigned int bridge)426 void omap3isp_configure_bridge(struct isp_device *isp,
427 			       enum ccdc_input_entity input,
428 			       const struct isp_parallel_cfg *parcfg,
429 			       unsigned int shift, unsigned int bridge)
430 {
431 	u32 ispctrl_val;
432 
433 	ispctrl_val  = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
434 	ispctrl_val &= ~ISPCTRL_SHIFT_MASK;
435 	ispctrl_val &= ~ISPCTRL_PAR_CLK_POL_INV;
436 	ispctrl_val &= ~ISPCTRL_PAR_SER_CLK_SEL_MASK;
437 	ispctrl_val &= ~ISPCTRL_PAR_BRIDGE_MASK;
438 	ispctrl_val |= bridge;
439 
440 	switch (input) {
441 	case CCDC_INPUT_PARALLEL:
442 		ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL;
443 		ispctrl_val |= parcfg->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT;
444 		shift += parcfg->data_lane_shift;
445 		break;
446 
447 	case CCDC_INPUT_CSI2A:
448 		ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIA;
449 		break;
450 
451 	case CCDC_INPUT_CCP2B:
452 		ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIB;
453 		break;
454 
455 	case CCDC_INPUT_CSI2C:
456 		ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIC;
457 		break;
458 
459 	default:
460 		return;
461 	}
462 
463 	ispctrl_val |= ((shift/2) << ISPCTRL_SHIFT_SHIFT) & ISPCTRL_SHIFT_MASK;
464 
465 	isp_reg_writel(isp, ispctrl_val, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
466 }
467 
omap3isp_hist_dma_done(struct isp_device * isp)468 void omap3isp_hist_dma_done(struct isp_device *isp)
469 {
470 	if (omap3isp_ccdc_busy(&isp->isp_ccdc) ||
471 	    omap3isp_stat_pcr_busy(&isp->isp_hist)) {
472 		/* Histogram cannot be enabled in this frame anymore */
473 		atomic_set(&isp->isp_hist.buf_err, 1);
474 		dev_dbg(isp->dev,
475 			"hist: Out of synchronization with CCDC. Ignoring next buffer.\n");
476 	}
477 }
478 
isp_isr_dbg(struct isp_device * isp,u32 irqstatus)479 static inline void __maybe_unused isp_isr_dbg(struct isp_device *isp,
480 					      u32 irqstatus)
481 {
482 	static const char *name[] = {
483 		"CSIA_IRQ",
484 		"res1",
485 		"res2",
486 		"CSIB_LCM_IRQ",
487 		"CSIB_IRQ",
488 		"res5",
489 		"res6",
490 		"res7",
491 		"CCDC_VD0_IRQ",
492 		"CCDC_VD1_IRQ",
493 		"CCDC_VD2_IRQ",
494 		"CCDC_ERR_IRQ",
495 		"H3A_AF_DONE_IRQ",
496 		"H3A_AWB_DONE_IRQ",
497 		"res14",
498 		"res15",
499 		"HIST_DONE_IRQ",
500 		"CCDC_LSC_DONE",
501 		"CCDC_LSC_PREFETCH_COMPLETED",
502 		"CCDC_LSC_PREFETCH_ERROR",
503 		"PRV_DONE_IRQ",
504 		"CBUFF_IRQ",
505 		"res22",
506 		"res23",
507 		"RSZ_DONE_IRQ",
508 		"OVF_IRQ",
509 		"res26",
510 		"res27",
511 		"MMU_ERR_IRQ",
512 		"OCP_ERR_IRQ",
513 		"SEC_ERR_IRQ",
514 		"HS_VS_IRQ",
515 	};
516 	int i;
517 
518 	dev_dbg(isp->dev, "ISP IRQ: ");
519 
520 	for (i = 0; i < ARRAY_SIZE(name); i++) {
521 		if ((1 << i) & irqstatus)
522 			printk(KERN_CONT "%s ", name[i]);
523 	}
524 	printk(KERN_CONT "\n");
525 }
526 
isp_isr_sbl(struct isp_device * isp)527 static void isp_isr_sbl(struct isp_device *isp)
528 {
529 	struct device *dev = isp->dev;
530 	struct isp_pipeline *pipe;
531 	u32 sbl_pcr;
532 
533 	/*
534 	 * Handle shared buffer logic overflows for video buffers.
535 	 * ISPSBL_PCR_CCDCPRV_2_RSZ_OVF can be safely ignored.
536 	 */
537 	sbl_pcr = isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
538 	isp_reg_writel(isp, sbl_pcr, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR);
539 	sbl_pcr &= ~ISPSBL_PCR_CCDCPRV_2_RSZ_OVF;
540 
541 	if (sbl_pcr)
542 		dev_dbg(dev, "SBL overflow (PCR = 0x%08x)\n", sbl_pcr);
543 
544 	if (sbl_pcr & ISPSBL_PCR_CSIB_WBL_OVF) {
545 		pipe = to_isp_pipeline(&isp->isp_ccp2.subdev.entity);
546 		if (pipe != NULL)
547 			pipe->error = true;
548 	}
549 
550 	if (sbl_pcr & ISPSBL_PCR_CSIA_WBL_OVF) {
551 		pipe = to_isp_pipeline(&isp->isp_csi2a.subdev.entity);
552 		if (pipe != NULL)
553 			pipe->error = true;
554 	}
555 
556 	if (sbl_pcr & ISPSBL_PCR_CCDC_WBL_OVF) {
557 		pipe = to_isp_pipeline(&isp->isp_ccdc.subdev.entity);
558 		if (pipe != NULL)
559 			pipe->error = true;
560 	}
561 
562 	if (sbl_pcr & ISPSBL_PCR_PRV_WBL_OVF) {
563 		pipe = to_isp_pipeline(&isp->isp_prev.subdev.entity);
564 		if (pipe != NULL)
565 			pipe->error = true;
566 	}
567 
568 	if (sbl_pcr & (ISPSBL_PCR_RSZ1_WBL_OVF
569 		       | ISPSBL_PCR_RSZ2_WBL_OVF
570 		       | ISPSBL_PCR_RSZ3_WBL_OVF
571 		       | ISPSBL_PCR_RSZ4_WBL_OVF)) {
572 		pipe = to_isp_pipeline(&isp->isp_res.subdev.entity);
573 		if (pipe != NULL)
574 			pipe->error = true;
575 	}
576 
577 	if (sbl_pcr & ISPSBL_PCR_H3A_AF_WBL_OVF)
578 		omap3isp_stat_sbl_overflow(&isp->isp_af);
579 
580 	if (sbl_pcr & ISPSBL_PCR_H3A_AEAWB_WBL_OVF)
581 		omap3isp_stat_sbl_overflow(&isp->isp_aewb);
582 }
583 
584 /*
585  * isp_isr - Interrupt Service Routine for Camera ISP module.
586  * @irq: Not used currently.
587  * @_isp: Pointer to the OMAP3 ISP device
588  *
589  * Handles the corresponding callback if plugged in.
590  */
isp_isr(int irq,void * _isp)591 static irqreturn_t isp_isr(int irq, void *_isp)
592 {
593 	static const u32 ccdc_events = IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ |
594 				       IRQ0STATUS_CCDC_LSC_DONE_IRQ |
595 				       IRQ0STATUS_CCDC_VD0_IRQ |
596 				       IRQ0STATUS_CCDC_VD1_IRQ |
597 				       IRQ0STATUS_HS_VS_IRQ;
598 	struct isp_device *isp = _isp;
599 	u32 irqstatus;
600 
601 	irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
602 	isp_reg_writel(isp, irqstatus, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS);
603 
604 	isp_isr_sbl(isp);
605 
606 	if (irqstatus & IRQ0STATUS_CSIA_IRQ)
607 		omap3isp_csi2_isr(&isp->isp_csi2a);
608 
609 	if (irqstatus & IRQ0STATUS_CSIB_IRQ)
610 		omap3isp_ccp2_isr(&isp->isp_ccp2);
611 
612 	if (irqstatus & IRQ0STATUS_CCDC_VD0_IRQ) {
613 		if (isp->isp_ccdc.output & CCDC_OUTPUT_PREVIEW)
614 			omap3isp_preview_isr_frame_sync(&isp->isp_prev);
615 		if (isp->isp_ccdc.output & CCDC_OUTPUT_RESIZER)
616 			omap3isp_resizer_isr_frame_sync(&isp->isp_res);
617 		omap3isp_stat_isr_frame_sync(&isp->isp_aewb);
618 		omap3isp_stat_isr_frame_sync(&isp->isp_af);
619 		omap3isp_stat_isr_frame_sync(&isp->isp_hist);
620 	}
621 
622 	if (irqstatus & ccdc_events)
623 		omap3isp_ccdc_isr(&isp->isp_ccdc, irqstatus & ccdc_events);
624 
625 	if (irqstatus & IRQ0STATUS_PRV_DONE_IRQ) {
626 		if (isp->isp_prev.output & PREVIEW_OUTPUT_RESIZER)
627 			omap3isp_resizer_isr_frame_sync(&isp->isp_res);
628 		omap3isp_preview_isr(&isp->isp_prev);
629 	}
630 
631 	if (irqstatus & IRQ0STATUS_RSZ_DONE_IRQ)
632 		omap3isp_resizer_isr(&isp->isp_res);
633 
634 	if (irqstatus & IRQ0STATUS_H3A_AWB_DONE_IRQ)
635 		omap3isp_stat_isr(&isp->isp_aewb);
636 
637 	if (irqstatus & IRQ0STATUS_H3A_AF_DONE_IRQ)
638 		omap3isp_stat_isr(&isp->isp_af);
639 
640 	if (irqstatus & IRQ0STATUS_HIST_DONE_IRQ)
641 		omap3isp_stat_isr(&isp->isp_hist);
642 
643 	omap3isp_flush(isp);
644 
645 #if defined(DEBUG) && defined(ISP_ISR_DEBUG)
646 	isp_isr_dbg(isp, irqstatus);
647 #endif
648 
649 	return IRQ_HANDLED;
650 }
651 
652 static const struct media_device_ops isp_media_ops = {
653 	.link_notify = v4l2_pipeline_link_notify,
654 };
655 
656 /* -----------------------------------------------------------------------------
657  * Pipeline stream management
658  */
659 
660 /*
661  * isp_pipeline_enable - Enable streaming on a pipeline
662  * @pipe: ISP pipeline
663  * @mode: Stream mode (single shot or continuous)
664  *
665  * Walk the entities chain starting at the pipeline output video node and start
666  * all modules in the chain in the given mode.
667  *
668  * Return 0 if successful, or the return value of the failed video::s_stream
669  * operation otherwise.
670  */
isp_pipeline_enable(struct isp_pipeline * pipe,enum isp_pipeline_stream_state mode)671 static int isp_pipeline_enable(struct isp_pipeline *pipe,
672 			       enum isp_pipeline_stream_state mode)
673 {
674 	struct isp_device *isp = pipe->output->isp;
675 	struct media_entity *entity;
676 	struct media_pad *pad;
677 	struct v4l2_subdev *subdev;
678 	unsigned long flags;
679 	int ret;
680 
681 	/* Refuse to start streaming if an entity included in the pipeline has
682 	 * crashed. This check must be performed before the loop below to avoid
683 	 * starting entities if the pipeline won't start anyway (those entities
684 	 * would then likely fail to stop, making the problem worse).
685 	 */
686 	if (media_entity_enum_intersects(&pipe->ent_enum, &isp->crashed))
687 		return -EIO;
688 
689 	spin_lock_irqsave(&pipe->lock, flags);
690 	pipe->state &= ~(ISP_PIPELINE_IDLE_INPUT | ISP_PIPELINE_IDLE_OUTPUT);
691 	spin_unlock_irqrestore(&pipe->lock, flags);
692 
693 	pipe->do_propagation = false;
694 
695 	mutex_lock(&isp->media_dev.graph_mutex);
696 
697 	entity = &pipe->output->video.entity;
698 	while (1) {
699 		pad = &entity->pads[0];
700 		if (!(pad->flags & MEDIA_PAD_FL_SINK))
701 			break;
702 
703 		pad = media_pad_remote_pad_first(pad);
704 		if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
705 			break;
706 
707 		entity = pad->entity;
708 		subdev = media_entity_to_v4l2_subdev(entity);
709 
710 		ret = v4l2_subdev_call(subdev, video, s_stream, mode);
711 		if (ret < 0 && ret != -ENOIOCTLCMD) {
712 			mutex_unlock(&isp->media_dev.graph_mutex);
713 			return ret;
714 		}
715 
716 		if (subdev == &isp->isp_ccdc.subdev) {
717 			v4l2_subdev_call(&isp->isp_aewb.subdev, video,
718 					s_stream, mode);
719 			v4l2_subdev_call(&isp->isp_af.subdev, video,
720 					s_stream, mode);
721 			v4l2_subdev_call(&isp->isp_hist.subdev, video,
722 					s_stream, mode);
723 			pipe->do_propagation = true;
724 		}
725 
726 		/* Stop at the first external sub-device. */
727 		if (subdev->dev != isp->dev)
728 			break;
729 	}
730 
731 	mutex_unlock(&isp->media_dev.graph_mutex);
732 
733 	return 0;
734 }
735 
isp_pipeline_wait_resizer(struct isp_device * isp)736 static int isp_pipeline_wait_resizer(struct isp_device *isp)
737 {
738 	return omap3isp_resizer_busy(&isp->isp_res);
739 }
740 
isp_pipeline_wait_preview(struct isp_device * isp)741 static int isp_pipeline_wait_preview(struct isp_device *isp)
742 {
743 	return omap3isp_preview_busy(&isp->isp_prev);
744 }
745 
isp_pipeline_wait_ccdc(struct isp_device * isp)746 static int isp_pipeline_wait_ccdc(struct isp_device *isp)
747 {
748 	return omap3isp_stat_busy(&isp->isp_af)
749 	    || omap3isp_stat_busy(&isp->isp_aewb)
750 	    || omap3isp_stat_busy(&isp->isp_hist)
751 	    || omap3isp_ccdc_busy(&isp->isp_ccdc);
752 }
753 
754 #define ISP_STOP_TIMEOUT	msecs_to_jiffies(1000)
755 
isp_pipeline_wait(struct isp_device * isp,int (* busy)(struct isp_device * isp))756 static int isp_pipeline_wait(struct isp_device *isp,
757 			     int(*busy)(struct isp_device *isp))
758 {
759 	unsigned long timeout = jiffies + ISP_STOP_TIMEOUT;
760 
761 	while (!time_after(jiffies, timeout)) {
762 		if (!busy(isp))
763 			return 0;
764 	}
765 
766 	return 1;
767 }
768 
769 /*
770  * isp_pipeline_disable - Disable streaming on a pipeline
771  * @pipe: ISP pipeline
772  *
773  * Walk the entities chain starting at the pipeline output video node and stop
774  * all modules in the chain. Wait synchronously for the modules to be stopped if
775  * necessary.
776  *
777  * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module
778  * can't be stopped (in which case a software reset of the ISP is probably
779  * necessary).
780  */
isp_pipeline_disable(struct isp_pipeline * pipe)781 static int isp_pipeline_disable(struct isp_pipeline *pipe)
782 {
783 	struct isp_device *isp = pipe->output->isp;
784 	struct media_entity *entity;
785 	struct media_pad *pad;
786 	struct v4l2_subdev *subdev;
787 	int failure = 0;
788 	int ret;
789 
790 	/*
791 	 * We need to stop all the modules after CCDC first or they'll
792 	 * never stop since they may not get a full frame from CCDC.
793 	 */
794 	entity = &pipe->output->video.entity;
795 	while (1) {
796 		pad = &entity->pads[0];
797 		if (!(pad->flags & MEDIA_PAD_FL_SINK))
798 			break;
799 
800 		pad = media_pad_remote_pad_first(pad);
801 		if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
802 			break;
803 
804 		entity = pad->entity;
805 		subdev = media_entity_to_v4l2_subdev(entity);
806 
807 		if (subdev == &isp->isp_ccdc.subdev) {
808 			v4l2_subdev_call(&isp->isp_aewb.subdev,
809 					 video, s_stream, 0);
810 			v4l2_subdev_call(&isp->isp_af.subdev,
811 					 video, s_stream, 0);
812 			v4l2_subdev_call(&isp->isp_hist.subdev,
813 					 video, s_stream, 0);
814 		}
815 
816 		ret = v4l2_subdev_call(subdev, video, s_stream, 0);
817 
818 		/* Stop at the first external sub-device. */
819 		if (subdev->dev != isp->dev)
820 			break;
821 
822 		if (subdev == &isp->isp_res.subdev)
823 			ret |= isp_pipeline_wait(isp, isp_pipeline_wait_resizer);
824 		else if (subdev == &isp->isp_prev.subdev)
825 			ret |= isp_pipeline_wait(isp, isp_pipeline_wait_preview);
826 		else if (subdev == &isp->isp_ccdc.subdev)
827 			ret |= isp_pipeline_wait(isp, isp_pipeline_wait_ccdc);
828 
829 		/* Handle stop failures. An entity that fails to stop can
830 		 * usually just be restarted. Flag the stop failure nonetheless
831 		 * to trigger an ISP reset the next time the device is released,
832 		 * just in case.
833 		 *
834 		 * The preview engine is a special case. A failure to stop can
835 		 * mean a hardware crash. When that happens the preview engine
836 		 * won't respond to read/write operations on the L4 bus anymore,
837 		 * resulting in a bus fault and a kernel oops next time it gets
838 		 * accessed. Mark it as crashed to prevent pipelines including
839 		 * it from being started.
840 		 */
841 		if (ret) {
842 			dev_info(isp->dev, "Unable to stop %s\n", subdev->name);
843 			isp->stop_failure = true;
844 			if (subdev == &isp->isp_prev.subdev)
845 				media_entity_enum_set(&isp->crashed,
846 						      &subdev->entity);
847 			failure = -ETIMEDOUT;
848 		}
849 	}
850 
851 	return failure;
852 }
853 
854 /*
855  * omap3isp_pipeline_set_stream - Enable/disable streaming on a pipeline
856  * @pipe: ISP pipeline
857  * @state: Stream state (stopped, single shot or continuous)
858  *
859  * Set the pipeline to the given stream state. Pipelines can be started in
860  * single-shot or continuous mode.
861  *
862  * Return 0 if successful, or the return value of the failed video::s_stream
863  * operation otherwise. The pipeline state is not updated when the operation
864  * fails, except when stopping the pipeline.
865  */
omap3isp_pipeline_set_stream(struct isp_pipeline * pipe,enum isp_pipeline_stream_state state)866 int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
867 				 enum isp_pipeline_stream_state state)
868 {
869 	int ret;
870 
871 	if (state == ISP_PIPELINE_STREAM_STOPPED)
872 		ret = isp_pipeline_disable(pipe);
873 	else
874 		ret = isp_pipeline_enable(pipe, state);
875 
876 	if (ret == 0 || state == ISP_PIPELINE_STREAM_STOPPED)
877 		pipe->stream_state = state;
878 
879 	return ret;
880 }
881 
882 /*
883  * omap3isp_pipeline_cancel_stream - Cancel stream on a pipeline
884  * @pipe: ISP pipeline
885  *
886  * Cancelling a stream mark all buffers on all video nodes in the pipeline as
887  * erroneous and makes sure no new buffer can be queued. This function is called
888  * when a fatal error that prevents any further operation on the pipeline
889  * occurs.
890  */
omap3isp_pipeline_cancel_stream(struct isp_pipeline * pipe)891 void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe)
892 {
893 	if (pipe->input)
894 		omap3isp_video_cancel_stream(pipe->input);
895 	if (pipe->output)
896 		omap3isp_video_cancel_stream(pipe->output);
897 }
898 
899 /*
900  * isp_pipeline_resume - Resume streaming on a pipeline
901  * @pipe: ISP pipeline
902  *
903  * Resume video output and input and re-enable pipeline.
904  */
isp_pipeline_resume(struct isp_pipeline * pipe)905 static void isp_pipeline_resume(struct isp_pipeline *pipe)
906 {
907 	int singleshot = pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT;
908 
909 	omap3isp_video_resume(pipe->output, !singleshot);
910 	if (singleshot)
911 		omap3isp_video_resume(pipe->input, 0);
912 	isp_pipeline_enable(pipe, pipe->stream_state);
913 }
914 
915 /*
916  * isp_pipeline_suspend - Suspend streaming on a pipeline
917  * @pipe: ISP pipeline
918  *
919  * Suspend pipeline.
920  */
isp_pipeline_suspend(struct isp_pipeline * pipe)921 static void isp_pipeline_suspend(struct isp_pipeline *pipe)
922 {
923 	isp_pipeline_disable(pipe);
924 }
925 
926 /*
927  * isp_pipeline_is_last - Verify if entity has an enabled link to the output
928  *			  video node
929  * @me: ISP module's media entity
930  *
931  * Returns 1 if the entity has an enabled link to the output video node or 0
932  * otherwise. It's true only while pipeline can have no more than one output
933  * node.
934  */
isp_pipeline_is_last(struct media_entity * me)935 static int isp_pipeline_is_last(struct media_entity *me)
936 {
937 	struct isp_pipeline *pipe;
938 	struct media_pad *pad;
939 
940 	pipe = to_isp_pipeline(me);
941 	if (!pipe || pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED)
942 		return 0;
943 	pad = media_pad_remote_pad_first(&pipe->output->pad);
944 	return pad->entity == me;
945 }
946 
947 /*
948  * isp_suspend_module_pipeline - Suspend pipeline to which belongs the module
949  * @me: ISP module's media entity
950  *
951  * Suspend the whole pipeline if module's entity has an enabled link to the
952  * output video node. It works only while pipeline can have no more than one
953  * output node.
954  */
isp_suspend_module_pipeline(struct media_entity * me)955 static void isp_suspend_module_pipeline(struct media_entity *me)
956 {
957 	if (isp_pipeline_is_last(me))
958 		isp_pipeline_suspend(to_isp_pipeline(me));
959 }
960 
961 /*
962  * isp_resume_module_pipeline - Resume pipeline to which belongs the module
963  * @me: ISP module's media entity
964  *
965  * Resume the whole pipeline if module's entity has an enabled link to the
966  * output video node. It works only while pipeline can have no more than one
967  * output node.
968  */
isp_resume_module_pipeline(struct media_entity * me)969 static void isp_resume_module_pipeline(struct media_entity *me)
970 {
971 	if (isp_pipeline_is_last(me))
972 		isp_pipeline_resume(to_isp_pipeline(me));
973 }
974 
975 /*
976  * isp_suspend_modules - Suspend ISP submodules.
977  * @isp: OMAP3 ISP device
978  *
979  * Returns 0 if suspend left in idle state all the submodules properly,
980  * or returns 1 if a general Reset is required to suspend the submodules.
981  */
isp_suspend_modules(struct isp_device * isp)982 static int __maybe_unused isp_suspend_modules(struct isp_device *isp)
983 {
984 	unsigned long timeout;
985 
986 	omap3isp_stat_suspend(&isp->isp_aewb);
987 	omap3isp_stat_suspend(&isp->isp_af);
988 	omap3isp_stat_suspend(&isp->isp_hist);
989 	isp_suspend_module_pipeline(&isp->isp_res.subdev.entity);
990 	isp_suspend_module_pipeline(&isp->isp_prev.subdev.entity);
991 	isp_suspend_module_pipeline(&isp->isp_ccdc.subdev.entity);
992 	isp_suspend_module_pipeline(&isp->isp_csi2a.subdev.entity);
993 	isp_suspend_module_pipeline(&isp->isp_ccp2.subdev.entity);
994 
995 	timeout = jiffies + ISP_STOP_TIMEOUT;
996 	while (omap3isp_stat_busy(&isp->isp_af)
997 	    || omap3isp_stat_busy(&isp->isp_aewb)
998 	    || omap3isp_stat_busy(&isp->isp_hist)
999 	    || omap3isp_preview_busy(&isp->isp_prev)
1000 	    || omap3isp_resizer_busy(&isp->isp_res)
1001 	    || omap3isp_ccdc_busy(&isp->isp_ccdc)) {
1002 		if (time_after(jiffies, timeout)) {
1003 			dev_info(isp->dev, "can't stop modules.\n");
1004 			return 1;
1005 		}
1006 		msleep(1);
1007 	}
1008 
1009 	return 0;
1010 }
1011 
1012 /*
1013  * isp_resume_modules - Resume ISP submodules.
1014  * @isp: OMAP3 ISP device
1015  */
isp_resume_modules(struct isp_device * isp)1016 static void __maybe_unused isp_resume_modules(struct isp_device *isp)
1017 {
1018 	omap3isp_stat_resume(&isp->isp_aewb);
1019 	omap3isp_stat_resume(&isp->isp_af);
1020 	omap3isp_stat_resume(&isp->isp_hist);
1021 	isp_resume_module_pipeline(&isp->isp_res.subdev.entity);
1022 	isp_resume_module_pipeline(&isp->isp_prev.subdev.entity);
1023 	isp_resume_module_pipeline(&isp->isp_ccdc.subdev.entity);
1024 	isp_resume_module_pipeline(&isp->isp_csi2a.subdev.entity);
1025 	isp_resume_module_pipeline(&isp->isp_ccp2.subdev.entity);
1026 }
1027 
1028 /*
1029  * isp_reset - Reset ISP with a timeout wait for idle.
1030  * @isp: OMAP3 ISP device
1031  */
isp_reset(struct isp_device * isp)1032 static int isp_reset(struct isp_device *isp)
1033 {
1034 	unsigned long timeout = 0;
1035 
1036 	isp_reg_writel(isp,
1037 		       isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG)
1038 		       | ISP_SYSCONFIG_SOFTRESET,
1039 		       OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG);
1040 	while (!(isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN,
1041 			       ISP_SYSSTATUS) & 0x1)) {
1042 		if (timeout++ > 10000) {
1043 			dev_alert(isp->dev, "cannot reset ISP\n");
1044 			return -ETIMEDOUT;
1045 		}
1046 		udelay(1);
1047 	}
1048 
1049 	isp->stop_failure = false;
1050 	media_entity_enum_zero(&isp->crashed);
1051 	return 0;
1052 }
1053 
1054 /*
1055  * isp_save_context - Saves the values of the ISP module registers.
1056  * @isp: OMAP3 ISP device
1057  * @reg_list: Structure containing pairs of register address and value to
1058  *            modify on OMAP.
1059  */
1060 static void
isp_save_context(struct isp_device * isp,struct isp_reg * reg_list)1061 isp_save_context(struct isp_device *isp, struct isp_reg *reg_list)
1062 {
1063 	struct isp_reg *next = reg_list;
1064 
1065 	for (; next->reg != ISP_TOK_TERM; next++)
1066 		next->val = isp_reg_readl(isp, next->mmio_range, next->reg);
1067 }
1068 
1069 /*
1070  * isp_restore_context - Restores the values of the ISP module registers.
1071  * @isp: OMAP3 ISP device
1072  * @reg_list: Structure containing pairs of register address and value to
1073  *            modify on OMAP.
1074  */
1075 static void
isp_restore_context(struct isp_device * isp,struct isp_reg * reg_list)1076 isp_restore_context(struct isp_device *isp, struct isp_reg *reg_list)
1077 {
1078 	struct isp_reg *next = reg_list;
1079 
1080 	for (; next->reg != ISP_TOK_TERM; next++)
1081 		isp_reg_writel(isp, next->val, next->mmio_range, next->reg);
1082 }
1083 
1084 /*
1085  * isp_save_ctx - Saves ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
1086  * @isp: OMAP3 ISP device
1087  *
1088  * Routine for saving the context of each module in the ISP.
1089  * CCDC, HIST, H3A, PREV, RESZ and MMU.
1090  */
isp_save_ctx(struct isp_device * isp)1091 static void isp_save_ctx(struct isp_device *isp)
1092 {
1093 	isp_save_context(isp, isp_reg_list);
1094 	omap_iommu_save_ctx(isp->dev);
1095 }
1096 
1097 /*
1098  * isp_restore_ctx - Restores ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context.
1099  * @isp: OMAP3 ISP device
1100  *
1101  * Routine for restoring the context of each module in the ISP.
1102  * CCDC, HIST, H3A, PREV, RESZ and MMU.
1103  */
isp_restore_ctx(struct isp_device * isp)1104 static void isp_restore_ctx(struct isp_device *isp)
1105 {
1106 	isp_restore_context(isp, isp_reg_list);
1107 	omap_iommu_restore_ctx(isp->dev);
1108 	omap3isp_ccdc_restore_context(isp);
1109 	omap3isp_preview_restore_context(isp);
1110 }
1111 
1112 /* -----------------------------------------------------------------------------
1113  * SBL resources management
1114  */
1115 #define OMAP3_ISP_SBL_READ	(OMAP3_ISP_SBL_CSI1_READ | \
1116 				 OMAP3_ISP_SBL_CCDC_LSC_READ | \
1117 				 OMAP3_ISP_SBL_PREVIEW_READ | \
1118 				 OMAP3_ISP_SBL_RESIZER_READ)
1119 #define OMAP3_ISP_SBL_WRITE	(OMAP3_ISP_SBL_CSI1_WRITE | \
1120 				 OMAP3_ISP_SBL_CSI2A_WRITE | \
1121 				 OMAP3_ISP_SBL_CSI2C_WRITE | \
1122 				 OMAP3_ISP_SBL_CCDC_WRITE | \
1123 				 OMAP3_ISP_SBL_PREVIEW_WRITE)
1124 
omap3isp_sbl_enable(struct isp_device * isp,enum isp_sbl_resource res)1125 void omap3isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res)
1126 {
1127 	u32 sbl = 0;
1128 
1129 	isp->sbl_resources |= res;
1130 
1131 	if (isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ)
1132 		sbl |= ISPCTRL_SBL_SHARED_RPORTA;
1133 
1134 	if (isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ)
1135 		sbl |= ISPCTRL_SBL_SHARED_RPORTB;
1136 
1137 	if (isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE)
1138 		sbl |= ISPCTRL_SBL_SHARED_WPORTC;
1139 
1140 	if (isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE)
1141 		sbl |= ISPCTRL_SBL_WR0_RAM_EN;
1142 
1143 	if (isp->sbl_resources & OMAP3_ISP_SBL_WRITE)
1144 		sbl |= ISPCTRL_SBL_WR1_RAM_EN;
1145 
1146 	if (isp->sbl_resources & OMAP3_ISP_SBL_READ)
1147 		sbl |= ISPCTRL_SBL_RD_RAM_EN;
1148 
1149 	isp_reg_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl);
1150 }
1151 
omap3isp_sbl_disable(struct isp_device * isp,enum isp_sbl_resource res)1152 void omap3isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res)
1153 {
1154 	u32 sbl = 0;
1155 
1156 	isp->sbl_resources &= ~res;
1157 
1158 	if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ))
1159 		sbl |= ISPCTRL_SBL_SHARED_RPORTA;
1160 
1161 	if (!(isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ))
1162 		sbl |= ISPCTRL_SBL_SHARED_RPORTB;
1163 
1164 	if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE))
1165 		sbl |= ISPCTRL_SBL_SHARED_WPORTC;
1166 
1167 	if (!(isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE))
1168 		sbl |= ISPCTRL_SBL_WR0_RAM_EN;
1169 
1170 	if (!(isp->sbl_resources & OMAP3_ISP_SBL_WRITE))
1171 		sbl |= ISPCTRL_SBL_WR1_RAM_EN;
1172 
1173 	if (!(isp->sbl_resources & OMAP3_ISP_SBL_READ))
1174 		sbl |= ISPCTRL_SBL_RD_RAM_EN;
1175 
1176 	isp_reg_clr(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl);
1177 }
1178 
1179 /*
1180  * isp_module_sync_idle - Helper to sync module with its idle state
1181  * @me: ISP submodule's media entity
1182  * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization
1183  * @stopping: flag which tells module wants to stop
1184  *
1185  * This function checks if ISP submodule needs to wait for next interrupt. If
1186  * yes, makes the caller to sleep while waiting for such event.
1187  */
omap3isp_module_sync_idle(struct media_entity * me,wait_queue_head_t * wait,atomic_t * stopping)1188 int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
1189 			      atomic_t *stopping)
1190 {
1191 	struct isp_pipeline *pipe = to_isp_pipeline(me);
1192 
1193 	if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED ||
1194 	    (pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT &&
1195 	     !isp_pipeline_ready(pipe)))
1196 		return 0;
1197 
1198 	/*
1199 	 * atomic_set() doesn't include memory barrier on ARM platform for SMP
1200 	 * scenario. We'll call it here to avoid race conditions.
1201 	 */
1202 	atomic_set(stopping, 1);
1203 	smp_mb();
1204 
1205 	/*
1206 	 * If module is the last one, it's writing to memory. In this case,
1207 	 * it's necessary to check if the module is already paused due to
1208 	 * DMA queue underrun or if it has to wait for next interrupt to be
1209 	 * idle.
1210 	 * If it isn't the last one, the function won't sleep but *stopping
1211 	 * will still be set to warn next submodule caller's interrupt the
1212 	 * module wants to be idle.
1213 	 */
1214 	if (isp_pipeline_is_last(me)) {
1215 		struct isp_video *video = pipe->output;
1216 		unsigned long flags;
1217 		spin_lock_irqsave(&video->irqlock, flags);
1218 		if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) {
1219 			spin_unlock_irqrestore(&video->irqlock, flags);
1220 			atomic_set(stopping, 0);
1221 			smp_mb();
1222 			return 0;
1223 		}
1224 		spin_unlock_irqrestore(&video->irqlock, flags);
1225 		if (!wait_event_timeout(*wait, !atomic_read(stopping),
1226 					msecs_to_jiffies(1000))) {
1227 			atomic_set(stopping, 0);
1228 			smp_mb();
1229 			return -ETIMEDOUT;
1230 		}
1231 	}
1232 
1233 	return 0;
1234 }
1235 
1236 /*
1237  * omap3isp_module_sync_is_stopping - Helper to verify if module was stopping
1238  * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization
1239  * @stopping: flag which tells module wants to stop
1240  *
1241  * This function checks if ISP submodule was stopping. In case of yes, it
1242  * notices the caller by setting stopping to 0 and waking up the wait queue.
1243  * Returns 1 if it was stopping or 0 otherwise.
1244  */
omap3isp_module_sync_is_stopping(wait_queue_head_t * wait,atomic_t * stopping)1245 int omap3isp_module_sync_is_stopping(wait_queue_head_t *wait,
1246 				     atomic_t *stopping)
1247 {
1248 	if (atomic_cmpxchg(stopping, 1, 0)) {
1249 		wake_up(wait);
1250 		return 1;
1251 	}
1252 
1253 	return 0;
1254 }
1255 
1256 /* --------------------------------------------------------------------------
1257  * Clock management
1258  */
1259 
1260 #define ISPCTRL_CLKS_MASK	(ISPCTRL_H3A_CLK_EN | \
1261 				 ISPCTRL_HIST_CLK_EN | \
1262 				 ISPCTRL_RSZ_CLK_EN | \
1263 				 (ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN) | \
1264 				 (ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN))
1265 
__isp_subclk_update(struct isp_device * isp)1266 static void __isp_subclk_update(struct isp_device *isp)
1267 {
1268 	u32 clk = 0;
1269 
1270 	/* AEWB and AF share the same clock. */
1271 	if (isp->subclk_resources &
1272 	    (OMAP3_ISP_SUBCLK_AEWB | OMAP3_ISP_SUBCLK_AF))
1273 		clk |= ISPCTRL_H3A_CLK_EN;
1274 
1275 	if (isp->subclk_resources & OMAP3_ISP_SUBCLK_HIST)
1276 		clk |= ISPCTRL_HIST_CLK_EN;
1277 
1278 	if (isp->subclk_resources & OMAP3_ISP_SUBCLK_RESIZER)
1279 		clk |= ISPCTRL_RSZ_CLK_EN;
1280 
1281 	/* NOTE: For CCDC & Preview submodules, we need to affect internal
1282 	 *       RAM as well.
1283 	 */
1284 	if (isp->subclk_resources & OMAP3_ISP_SUBCLK_CCDC)
1285 		clk |= ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN;
1286 
1287 	if (isp->subclk_resources & OMAP3_ISP_SUBCLK_PREVIEW)
1288 		clk |= ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN;
1289 
1290 	isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL,
1291 			ISPCTRL_CLKS_MASK, clk);
1292 }
1293 
omap3isp_subclk_enable(struct isp_device * isp,enum isp_subclk_resource res)1294 void omap3isp_subclk_enable(struct isp_device *isp,
1295 			    enum isp_subclk_resource res)
1296 {
1297 	isp->subclk_resources |= res;
1298 
1299 	__isp_subclk_update(isp);
1300 }
1301 
omap3isp_subclk_disable(struct isp_device * isp,enum isp_subclk_resource res)1302 void omap3isp_subclk_disable(struct isp_device *isp,
1303 			     enum isp_subclk_resource res)
1304 {
1305 	isp->subclk_resources &= ~res;
1306 
1307 	__isp_subclk_update(isp);
1308 }
1309 
1310 /*
1311  * isp_enable_clocks - Enable ISP clocks
1312  * @isp: OMAP3 ISP device
1313  *
1314  * Return 0 if successful, or clk_prepare_enable return value if any of them
1315  * fails.
1316  */
isp_enable_clocks(struct isp_device * isp)1317 static int isp_enable_clocks(struct isp_device *isp)
1318 {
1319 	int r;
1320 	unsigned long rate;
1321 
1322 	r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_ICK]);
1323 	if (r) {
1324 		dev_err(isp->dev, "failed to enable cam_ick clock\n");
1325 		goto out_clk_enable_ick;
1326 	}
1327 	r = clk_set_rate(isp->clock[ISP_CLK_CAM_MCLK], CM_CAM_MCLK_HZ);
1328 	if (r) {
1329 		dev_err(isp->dev, "clk_set_rate for cam_mclk failed\n");
1330 		goto out_clk_enable_mclk;
1331 	}
1332 	r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_MCLK]);
1333 	if (r) {
1334 		dev_err(isp->dev, "failed to enable cam_mclk clock\n");
1335 		goto out_clk_enable_mclk;
1336 	}
1337 	rate = clk_get_rate(isp->clock[ISP_CLK_CAM_MCLK]);
1338 	if (rate != CM_CAM_MCLK_HZ)
1339 		dev_warn(isp->dev, "unexpected cam_mclk rate:\n"
1340 				   " expected : %d\n"
1341 				   " actual   : %ld\n", CM_CAM_MCLK_HZ, rate);
1342 	r = clk_prepare_enable(isp->clock[ISP_CLK_CSI2_FCK]);
1343 	if (r) {
1344 		dev_err(isp->dev, "failed to enable csi2_fck clock\n");
1345 		goto out_clk_enable_csi2_fclk;
1346 	}
1347 	return 0;
1348 
1349 out_clk_enable_csi2_fclk:
1350 	clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
1351 out_clk_enable_mclk:
1352 	clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
1353 out_clk_enable_ick:
1354 	return r;
1355 }
1356 
1357 /*
1358  * isp_disable_clocks - Disable ISP clocks
1359  * @isp: OMAP3 ISP device
1360  */
isp_disable_clocks(struct isp_device * isp)1361 static void isp_disable_clocks(struct isp_device *isp)
1362 {
1363 	clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]);
1364 	clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]);
1365 	clk_disable_unprepare(isp->clock[ISP_CLK_CSI2_FCK]);
1366 }
1367 
1368 static const char *isp_clocks[] = {
1369 	"cam_ick",
1370 	"cam_mclk",
1371 	"csi2_96m_fck",
1372 	"l3_ick",
1373 };
1374 
isp_get_clocks(struct isp_device * isp)1375 static int isp_get_clocks(struct isp_device *isp)
1376 {
1377 	struct clk *clk;
1378 	unsigned int i;
1379 
1380 	for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) {
1381 		clk = devm_clk_get(isp->dev, isp_clocks[i]);
1382 		if (IS_ERR(clk)) {
1383 			dev_err(isp->dev, "clk_get %s failed\n", isp_clocks[i]);
1384 			return PTR_ERR(clk);
1385 		}
1386 
1387 		isp->clock[i] = clk;
1388 	}
1389 
1390 	return 0;
1391 }
1392 
1393 /*
1394  * omap3isp_get - Acquire the ISP resource.
1395  *
1396  * Initializes the clocks for the first acquire.
1397  *
1398  * Increment the reference count on the ISP. If the first reference is taken,
1399  * enable clocks and power-up all submodules.
1400  *
1401  * Return a pointer to the ISP device structure, or NULL if an error occurred.
1402  */
__omap3isp_get(struct isp_device * isp,bool irq)1403 static struct isp_device *__omap3isp_get(struct isp_device *isp, bool irq)
1404 {
1405 	struct isp_device *__isp = isp;
1406 
1407 	if (isp == NULL)
1408 		return NULL;
1409 
1410 	mutex_lock(&isp->isp_mutex);
1411 	if (isp->ref_count > 0)
1412 		goto out;
1413 
1414 	if (isp_enable_clocks(isp) < 0) {
1415 		__isp = NULL;
1416 		goto out;
1417 	}
1418 
1419 	/* We don't want to restore context before saving it! */
1420 	if (isp->has_context)
1421 		isp_restore_ctx(isp);
1422 
1423 	if (irq)
1424 		isp_enable_interrupts(isp);
1425 
1426 out:
1427 	if (__isp != NULL)
1428 		isp->ref_count++;
1429 	mutex_unlock(&isp->isp_mutex);
1430 
1431 	return __isp;
1432 }
1433 
omap3isp_get(struct isp_device * isp)1434 struct isp_device *omap3isp_get(struct isp_device *isp)
1435 {
1436 	return __omap3isp_get(isp, true);
1437 }
1438 
1439 /*
1440  * omap3isp_put - Release the ISP
1441  *
1442  * Decrement the reference count on the ISP. If the last reference is released,
1443  * power-down all submodules, disable clocks and free temporary buffers.
1444  */
__omap3isp_put(struct isp_device * isp,bool save_ctx)1445 static void __omap3isp_put(struct isp_device *isp, bool save_ctx)
1446 {
1447 	if (isp == NULL)
1448 		return;
1449 
1450 	mutex_lock(&isp->isp_mutex);
1451 	BUG_ON(isp->ref_count == 0);
1452 	if (--isp->ref_count == 0) {
1453 		isp_disable_interrupts(isp);
1454 		if (save_ctx) {
1455 			isp_save_ctx(isp);
1456 			isp->has_context = 1;
1457 		}
1458 		/* Reset the ISP if an entity has failed to stop. This is the
1459 		 * only way to recover from such conditions.
1460 		 */
1461 		if (!media_entity_enum_empty(&isp->crashed) ||
1462 		    isp->stop_failure)
1463 			isp_reset(isp);
1464 		isp_disable_clocks(isp);
1465 	}
1466 	mutex_unlock(&isp->isp_mutex);
1467 }
1468 
omap3isp_put(struct isp_device * isp)1469 void omap3isp_put(struct isp_device *isp)
1470 {
1471 	__omap3isp_put(isp, true);
1472 }
1473 
1474 /* --------------------------------------------------------------------------
1475  * Platform device driver
1476  */
1477 
1478 #ifdef CONFIG_PM
1479 
1480 /*
1481  * Power management support.
1482  *
1483  * As the ISP can't properly handle an input video stream interruption on a non
1484  * frame boundary, the ISP pipelines need to be stopped before sensors get
1485  * suspended. However, as suspending the sensors can require a running clock,
1486  * which can be provided by the ISP, the ISP can't be completely suspended
1487  * before the sensor.
1488  *
1489  * To solve this problem power management support is split into prepare/complete
1490  * and suspend/resume operations. The pipelines are stopped in prepare() and the
1491  * ISP clocks get disabled in suspend(). Similarly, the clocks are re-enabled in
1492  * resume(), and the pipelines are restarted in complete().
1493  *
1494  * TODO: PM dependencies between the ISP and sensors are not modelled explicitly
1495  * yet.
1496  */
isp_pm_prepare(struct device * dev)1497 static int isp_pm_prepare(struct device *dev)
1498 {
1499 	struct isp_device *isp = dev_get_drvdata(dev);
1500 	int reset;
1501 
1502 	WARN_ON(mutex_is_locked(&isp->isp_mutex));
1503 
1504 	if (isp->ref_count == 0)
1505 		return 0;
1506 
1507 	reset = isp_suspend_modules(isp);
1508 	isp_disable_interrupts(isp);
1509 	isp_save_ctx(isp);
1510 	if (reset)
1511 		isp_reset(isp);
1512 
1513 	return 0;
1514 }
1515 
isp_pm_suspend(struct device * dev)1516 static int isp_pm_suspend(struct device *dev)
1517 {
1518 	struct isp_device *isp = dev_get_drvdata(dev);
1519 
1520 	WARN_ON(mutex_is_locked(&isp->isp_mutex));
1521 
1522 	if (isp->ref_count)
1523 		isp_disable_clocks(isp);
1524 
1525 	return 0;
1526 }
1527 
isp_pm_resume(struct device * dev)1528 static int isp_pm_resume(struct device *dev)
1529 {
1530 	struct isp_device *isp = dev_get_drvdata(dev);
1531 
1532 	if (isp->ref_count == 0)
1533 		return 0;
1534 
1535 	return isp_enable_clocks(isp);
1536 }
1537 
isp_pm_complete(struct device * dev)1538 static void isp_pm_complete(struct device *dev)
1539 {
1540 	struct isp_device *isp = dev_get_drvdata(dev);
1541 
1542 	if (isp->ref_count == 0)
1543 		return;
1544 
1545 	isp_restore_ctx(isp);
1546 	isp_enable_interrupts(isp);
1547 	isp_resume_modules(isp);
1548 }
1549 
1550 #else
1551 
1552 #define isp_pm_prepare	NULL
1553 #define isp_pm_suspend	NULL
1554 #define isp_pm_resume	NULL
1555 #define isp_pm_complete	NULL
1556 
1557 #endif /* CONFIG_PM */
1558 
isp_unregister_entities(struct isp_device * isp)1559 static void isp_unregister_entities(struct isp_device *isp)
1560 {
1561 	media_device_unregister(&isp->media_dev);
1562 
1563 	omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
1564 	omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
1565 	omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
1566 	omap3isp_preview_unregister_entities(&isp->isp_prev);
1567 	omap3isp_resizer_unregister_entities(&isp->isp_res);
1568 	omap3isp_stat_unregister_entities(&isp->isp_aewb);
1569 	omap3isp_stat_unregister_entities(&isp->isp_af);
1570 	omap3isp_stat_unregister_entities(&isp->isp_hist);
1571 
1572 	v4l2_device_unregister(&isp->v4l2_dev);
1573 	media_device_cleanup(&isp->media_dev);
1574 }
1575 
isp_link_entity(struct isp_device * isp,struct media_entity * entity,enum isp_interface_type interface)1576 static int isp_link_entity(
1577 	struct isp_device *isp, struct media_entity *entity,
1578 	enum isp_interface_type interface)
1579 {
1580 	struct media_entity *input;
1581 	unsigned int flags;
1582 	unsigned int pad;
1583 	unsigned int i;
1584 
1585 	/* Connect the sensor to the correct interface module.
1586 	 * Parallel sensors are connected directly to the CCDC, while
1587 	 * serial sensors are connected to the CSI2a, CCP2b or CSI2c
1588 	 * receiver through CSIPHY1 or CSIPHY2.
1589 	 */
1590 	switch (interface) {
1591 	case ISP_INTERFACE_PARALLEL:
1592 		input = &isp->isp_ccdc.subdev.entity;
1593 		pad = CCDC_PAD_SINK;
1594 		flags = 0;
1595 		break;
1596 
1597 	case ISP_INTERFACE_CSI2A_PHY2:
1598 		input = &isp->isp_csi2a.subdev.entity;
1599 		pad = CSI2_PAD_SINK;
1600 		flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
1601 		break;
1602 
1603 	case ISP_INTERFACE_CCP2B_PHY1:
1604 	case ISP_INTERFACE_CCP2B_PHY2:
1605 		input = &isp->isp_ccp2.subdev.entity;
1606 		pad = CCP2_PAD_SINK;
1607 		flags = 0;
1608 		break;
1609 
1610 	case ISP_INTERFACE_CSI2C_PHY1:
1611 		input = &isp->isp_csi2c.subdev.entity;
1612 		pad = CSI2_PAD_SINK;
1613 		flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
1614 		break;
1615 
1616 	default:
1617 		dev_err(isp->dev, "%s: invalid interface type %u\n", __func__,
1618 			interface);
1619 		return -EINVAL;
1620 	}
1621 
1622 	/*
1623 	 * Not all interfaces are available on all revisions of the
1624 	 * ISP. The sub-devices of those interfaces aren't initialised
1625 	 * in such a case. Check this by ensuring the num_pads is
1626 	 * non-zero.
1627 	 */
1628 	if (!input->num_pads) {
1629 		dev_err(isp->dev, "%s: invalid input %u\n", entity->name,
1630 			interface);
1631 		return -EINVAL;
1632 	}
1633 
1634 	for (i = 0; i < entity->num_pads; i++) {
1635 		if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE)
1636 			break;
1637 	}
1638 	if (i == entity->num_pads) {
1639 		dev_err(isp->dev, "%s: no source pad in external entity %s\n",
1640 			__func__, entity->name);
1641 		return -EINVAL;
1642 	}
1643 
1644 	return media_create_pad_link(entity, i, input, pad, flags);
1645 }
1646 
isp_register_entities(struct isp_device * isp)1647 static int isp_register_entities(struct isp_device *isp)
1648 {
1649 	int ret;
1650 
1651 	isp->media_dev.dev = isp->dev;
1652 	strscpy(isp->media_dev.model, "TI OMAP3 ISP",
1653 		sizeof(isp->media_dev.model));
1654 	isp->media_dev.hw_revision = isp->revision;
1655 	isp->media_dev.ops = &isp_media_ops;
1656 	media_device_init(&isp->media_dev);
1657 
1658 	isp->v4l2_dev.mdev = &isp->media_dev;
1659 	ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
1660 	if (ret < 0) {
1661 		dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n",
1662 			__func__, ret);
1663 		goto done;
1664 	}
1665 
1666 	/* Register internal entities */
1667 	ret = omap3isp_ccp2_register_entities(&isp->isp_ccp2, &isp->v4l2_dev);
1668 	if (ret < 0)
1669 		goto done;
1670 
1671 	ret = omap3isp_csi2_register_entities(&isp->isp_csi2a, &isp->v4l2_dev);
1672 	if (ret < 0)
1673 		goto done;
1674 
1675 	ret = omap3isp_ccdc_register_entities(&isp->isp_ccdc, &isp->v4l2_dev);
1676 	if (ret < 0)
1677 		goto done;
1678 
1679 	ret = omap3isp_preview_register_entities(&isp->isp_prev,
1680 						 &isp->v4l2_dev);
1681 	if (ret < 0)
1682 		goto done;
1683 
1684 	ret = omap3isp_resizer_register_entities(&isp->isp_res, &isp->v4l2_dev);
1685 	if (ret < 0)
1686 		goto done;
1687 
1688 	ret = omap3isp_stat_register_entities(&isp->isp_aewb, &isp->v4l2_dev);
1689 	if (ret < 0)
1690 		goto done;
1691 
1692 	ret = omap3isp_stat_register_entities(&isp->isp_af, &isp->v4l2_dev);
1693 	if (ret < 0)
1694 		goto done;
1695 
1696 	ret = omap3isp_stat_register_entities(&isp->isp_hist, &isp->v4l2_dev);
1697 	if (ret < 0)
1698 		goto done;
1699 
1700 done:
1701 	if (ret < 0)
1702 		isp_unregister_entities(isp);
1703 
1704 	return ret;
1705 }
1706 
1707 /*
1708  * isp_create_links() - Create links for internal and external ISP entities
1709  * @isp : Pointer to ISP device
1710  *
1711  * This function creates all links between ISP internal and external entities.
1712  *
1713  * Return: A negative error code on failure or zero on success. Possible error
1714  * codes are those returned by media_create_pad_link().
1715  */
isp_create_links(struct isp_device * isp)1716 static int isp_create_links(struct isp_device *isp)
1717 {
1718 	int ret;
1719 
1720 	/* Create links between entities and video nodes. */
1721 	ret = media_create_pad_link(
1722 			&isp->isp_csi2a.subdev.entity, CSI2_PAD_SOURCE,
1723 			&isp->isp_csi2a.video_out.video.entity, 0, 0);
1724 	if (ret < 0)
1725 		return ret;
1726 
1727 	ret = media_create_pad_link(
1728 			&isp->isp_ccp2.video_in.video.entity, 0,
1729 			&isp->isp_ccp2.subdev.entity, CCP2_PAD_SINK, 0);
1730 	if (ret < 0)
1731 		return ret;
1732 
1733 	ret = media_create_pad_link(
1734 			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_OF,
1735 			&isp->isp_ccdc.video_out.video.entity, 0, 0);
1736 	if (ret < 0)
1737 		return ret;
1738 
1739 	ret = media_create_pad_link(
1740 			&isp->isp_prev.video_in.video.entity, 0,
1741 			&isp->isp_prev.subdev.entity, PREV_PAD_SINK, 0);
1742 	if (ret < 0)
1743 		return ret;
1744 
1745 	ret = media_create_pad_link(
1746 			&isp->isp_prev.subdev.entity, PREV_PAD_SOURCE,
1747 			&isp->isp_prev.video_out.video.entity, 0, 0);
1748 	if (ret < 0)
1749 		return ret;
1750 
1751 	ret = media_create_pad_link(
1752 			&isp->isp_res.video_in.video.entity, 0,
1753 			&isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
1754 	if (ret < 0)
1755 		return ret;
1756 
1757 	ret = media_create_pad_link(
1758 			&isp->isp_res.subdev.entity, RESZ_PAD_SOURCE,
1759 			&isp->isp_res.video_out.video.entity, 0, 0);
1760 
1761 	if (ret < 0)
1762 		return ret;
1763 
1764 	/* Create links between entities. */
1765 	ret = media_create_pad_link(
1766 			&isp->isp_csi2a.subdev.entity, CSI2_PAD_SOURCE,
1767 			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
1768 	if (ret < 0)
1769 		return ret;
1770 
1771 	ret = media_create_pad_link(
1772 			&isp->isp_ccp2.subdev.entity, CCP2_PAD_SOURCE,
1773 			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0);
1774 	if (ret < 0)
1775 		return ret;
1776 
1777 	ret = media_create_pad_link(
1778 			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
1779 			&isp->isp_prev.subdev.entity, PREV_PAD_SINK, 0);
1780 	if (ret < 0)
1781 		return ret;
1782 
1783 	ret = media_create_pad_link(
1784 			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_OF,
1785 			&isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
1786 	if (ret < 0)
1787 		return ret;
1788 
1789 	ret = media_create_pad_link(
1790 			&isp->isp_prev.subdev.entity, PREV_PAD_SOURCE,
1791 			&isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0);
1792 	if (ret < 0)
1793 		return ret;
1794 
1795 	ret = media_create_pad_link(
1796 			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
1797 			&isp->isp_aewb.subdev.entity, 0,
1798 			MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1799 	if (ret < 0)
1800 		return ret;
1801 
1802 	ret = media_create_pad_link(
1803 			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
1804 			&isp->isp_af.subdev.entity, 0,
1805 			MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1806 	if (ret < 0)
1807 		return ret;
1808 
1809 	ret = media_create_pad_link(
1810 			&isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP,
1811 			&isp->isp_hist.subdev.entity, 0,
1812 			MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1813 	if (ret < 0)
1814 		return ret;
1815 
1816 	return 0;
1817 }
1818 
isp_cleanup_modules(struct isp_device * isp)1819 static void isp_cleanup_modules(struct isp_device *isp)
1820 {
1821 	omap3isp_h3a_aewb_cleanup(isp);
1822 	omap3isp_h3a_af_cleanup(isp);
1823 	omap3isp_hist_cleanup(isp);
1824 	omap3isp_resizer_cleanup(isp);
1825 	omap3isp_preview_cleanup(isp);
1826 	omap3isp_ccdc_cleanup(isp);
1827 	omap3isp_ccp2_cleanup(isp);
1828 	omap3isp_csi2_cleanup(isp);
1829 	omap3isp_csiphy_cleanup(isp);
1830 }
1831 
isp_initialize_modules(struct isp_device * isp)1832 static int isp_initialize_modules(struct isp_device *isp)
1833 {
1834 	int ret;
1835 
1836 	ret = omap3isp_csiphy_init(isp);
1837 	if (ret < 0) {
1838 		dev_err(isp->dev, "CSI PHY initialization failed\n");
1839 		return ret;
1840 	}
1841 
1842 	ret = omap3isp_csi2_init(isp);
1843 	if (ret < 0) {
1844 		dev_err(isp->dev, "CSI2 initialization failed\n");
1845 		goto error_csi2;
1846 	}
1847 
1848 	ret = omap3isp_ccp2_init(isp);
1849 	if (ret < 0) {
1850 		dev_err_probe(isp->dev, ret, "CCP2 initialization failed\n");
1851 		goto error_ccp2;
1852 	}
1853 
1854 	ret = omap3isp_ccdc_init(isp);
1855 	if (ret < 0) {
1856 		dev_err(isp->dev, "CCDC initialization failed\n");
1857 		goto error_ccdc;
1858 	}
1859 
1860 	ret = omap3isp_preview_init(isp);
1861 	if (ret < 0) {
1862 		dev_err(isp->dev, "Preview initialization failed\n");
1863 		goto error_preview;
1864 	}
1865 
1866 	ret = omap3isp_resizer_init(isp);
1867 	if (ret < 0) {
1868 		dev_err(isp->dev, "Resizer initialization failed\n");
1869 		goto error_resizer;
1870 	}
1871 
1872 	ret = omap3isp_hist_init(isp);
1873 	if (ret < 0) {
1874 		dev_err(isp->dev, "Histogram initialization failed\n");
1875 		goto error_hist;
1876 	}
1877 
1878 	ret = omap3isp_h3a_aewb_init(isp);
1879 	if (ret < 0) {
1880 		dev_err(isp->dev, "H3A AEWB initialization failed\n");
1881 		goto error_h3a_aewb;
1882 	}
1883 
1884 	ret = omap3isp_h3a_af_init(isp);
1885 	if (ret < 0) {
1886 		dev_err(isp->dev, "H3A AF initialization failed\n");
1887 		goto error_h3a_af;
1888 	}
1889 
1890 	return 0;
1891 
1892 error_h3a_af:
1893 	omap3isp_h3a_aewb_cleanup(isp);
1894 error_h3a_aewb:
1895 	omap3isp_hist_cleanup(isp);
1896 error_hist:
1897 	omap3isp_resizer_cleanup(isp);
1898 error_resizer:
1899 	omap3isp_preview_cleanup(isp);
1900 error_preview:
1901 	omap3isp_ccdc_cleanup(isp);
1902 error_ccdc:
1903 	omap3isp_ccp2_cleanup(isp);
1904 error_ccp2:
1905 	omap3isp_csi2_cleanup(isp);
1906 error_csi2:
1907 	omap3isp_csiphy_cleanup(isp);
1908 
1909 	return ret;
1910 }
1911 
isp_detach_iommu(struct isp_device * isp)1912 static void isp_detach_iommu(struct isp_device *isp)
1913 {
1914 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1915 	arm_iommu_detach_device(isp->dev);
1916 	arm_iommu_release_mapping(isp->mapping);
1917 	isp->mapping = NULL;
1918 #endif
1919 }
1920 
isp_attach_iommu(struct isp_device * isp)1921 static int isp_attach_iommu(struct isp_device *isp)
1922 {
1923 #ifdef CONFIG_ARM_DMA_USE_IOMMU
1924 	struct dma_iommu_mapping *mapping;
1925 	int ret;
1926 
1927 	/* We always want to replace any default mapping from the arch code */
1928 	mapping = to_dma_iommu_mapping(isp->dev);
1929 	if (mapping) {
1930 		arm_iommu_detach_device(isp->dev);
1931 		arm_iommu_release_mapping(mapping);
1932 	}
1933 
1934 	/*
1935 	 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
1936 	 * VAs. This will allocate a corresponding IOMMU domain.
1937 	 */
1938 	mapping = arm_iommu_create_mapping(isp->dev, SZ_1G, SZ_2G);
1939 	if (IS_ERR(mapping)) {
1940 		dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
1941 		return PTR_ERR(mapping);
1942 	}
1943 
1944 	isp->mapping = mapping;
1945 
1946 	/* Attach the ARM VA mapping to the device. */
1947 	ret = arm_iommu_attach_device(isp->dev, mapping);
1948 	if (ret < 0) {
1949 		dev_err(isp->dev, "failed to attach device to VA mapping\n");
1950 		goto error;
1951 	}
1952 
1953 	return 0;
1954 
1955 error:
1956 	arm_iommu_release_mapping(isp->mapping);
1957 	isp->mapping = NULL;
1958 	return ret;
1959 #else
1960 	return -ENODEV;
1961 #endif
1962 }
1963 
1964 /*
1965  * isp_remove - Remove ISP platform device
1966  * @pdev: Pointer to ISP platform device
1967  *
1968  * Always returns 0.
1969  */
isp_remove(struct platform_device * pdev)1970 static void isp_remove(struct platform_device *pdev)
1971 {
1972 	struct isp_device *isp = platform_get_drvdata(pdev);
1973 
1974 	v4l2_async_nf_unregister(&isp->notifier);
1975 	v4l2_async_nf_cleanup(&isp->notifier);
1976 	isp_unregister_entities(isp);
1977 	isp_cleanup_modules(isp);
1978 	isp_xclk_cleanup(isp);
1979 
1980 	__omap3isp_get(isp, false);
1981 	isp_detach_iommu(isp);
1982 	__omap3isp_put(isp, false);
1983 
1984 	media_entity_enum_cleanup(&isp->crashed);
1985 
1986 	kfree(isp);
1987 }
1988 
1989 enum isp_of_phy {
1990 	ISP_OF_PHY_PARALLEL = 0,
1991 	ISP_OF_PHY_CSIPHY1,
1992 	ISP_OF_PHY_CSIPHY2,
1993 };
1994 
isp_subdev_notifier_bound(struct v4l2_async_notifier * async,struct v4l2_subdev * sd,struct v4l2_async_connection * asc)1995 static int isp_subdev_notifier_bound(struct v4l2_async_notifier *async,
1996 				     struct v4l2_subdev *sd,
1997 				     struct v4l2_async_connection *asc)
1998 {
1999 	struct isp_device *isp = container_of(async, struct isp_device,
2000 					      notifier);
2001 	struct isp_bus_cfg *bus_cfg =
2002 		&container_of(asc, struct isp_async_subdev, asd)->bus;
2003 	int ret;
2004 
2005 	mutex_lock(&isp->media_dev.graph_mutex);
2006 	ret = isp_link_entity(isp, &sd->entity, bus_cfg->interface);
2007 	mutex_unlock(&isp->media_dev.graph_mutex);
2008 
2009 	return ret;
2010 }
2011 
isp_subdev_notifier_complete(struct v4l2_async_notifier * async)2012 static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
2013 {
2014 	struct isp_device *isp = container_of(async, struct isp_device,
2015 					      notifier);
2016 	int ret;
2017 
2018 	mutex_lock(&isp->media_dev.graph_mutex);
2019 	ret = media_entity_enum_init(&isp->crashed, &isp->media_dev);
2020 	mutex_unlock(&isp->media_dev.graph_mutex);
2021 	if (ret)
2022 		return ret;
2023 
2024 	ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
2025 	if (ret < 0)
2026 		return ret;
2027 
2028 	return media_device_register(&isp->media_dev);
2029 }
2030 
isp_parse_of_parallel_endpoint(struct device * dev,struct v4l2_fwnode_endpoint * vep,struct isp_bus_cfg * buscfg)2031 static void isp_parse_of_parallel_endpoint(struct device *dev,
2032 					   struct v4l2_fwnode_endpoint *vep,
2033 					   struct isp_bus_cfg *buscfg)
2034 {
2035 	buscfg->interface = ISP_INTERFACE_PARALLEL;
2036 	buscfg->bus.parallel.data_lane_shift = vep->bus.parallel.data_shift;
2037 	buscfg->bus.parallel.clk_pol =
2038 		!!(vep->bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING);
2039 	buscfg->bus.parallel.hs_pol =
2040 		!!(vep->bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
2041 	buscfg->bus.parallel.vs_pol =
2042 		!!(vep->bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
2043 	buscfg->bus.parallel.fld_pol =
2044 		!!(vep->bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
2045 	buscfg->bus.parallel.data_pol =
2046 		!!(vep->bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
2047 	buscfg->bus.parallel.bt656 = vep->bus_type == V4L2_MBUS_BT656;
2048 }
2049 
isp_parse_of_csi2_endpoint(struct device * dev,struct v4l2_fwnode_endpoint * vep,struct isp_bus_cfg * buscfg)2050 static void isp_parse_of_csi2_endpoint(struct device *dev,
2051 				       struct v4l2_fwnode_endpoint *vep,
2052 				       struct isp_bus_cfg *buscfg)
2053 {
2054 	unsigned int i;
2055 
2056 	buscfg->bus.csi2.lanecfg.clk.pos = vep->bus.mipi_csi2.clock_lane;
2057 	buscfg->bus.csi2.lanecfg.clk.pol =
2058 		vep->bus.mipi_csi2.lane_polarities[0];
2059 	dev_dbg(dev, "clock lane polarity %u, pos %u\n",
2060 		buscfg->bus.csi2.lanecfg.clk.pol,
2061 		buscfg->bus.csi2.lanecfg.clk.pos);
2062 
2063 	buscfg->bus.csi2.num_data_lanes = vep->bus.mipi_csi2.num_data_lanes;
2064 
2065 	for (i = 0; i < buscfg->bus.csi2.num_data_lanes; i++) {
2066 		buscfg->bus.csi2.lanecfg.data[i].pos =
2067 			vep->bus.mipi_csi2.data_lanes[i];
2068 		buscfg->bus.csi2.lanecfg.data[i].pol =
2069 			vep->bus.mipi_csi2.lane_polarities[i + 1];
2070 		dev_dbg(dev,
2071 			"data lane %u polarity %u, pos %u\n", i,
2072 			buscfg->bus.csi2.lanecfg.data[i].pol,
2073 			buscfg->bus.csi2.lanecfg.data[i].pos);
2074 	}
2075 	/*
2076 	 * FIXME: now we assume the CRC is always there. Implement a way to
2077 	 * obtain this information from the sensor. Frame descriptors, perhaps?
2078 	 */
2079 	buscfg->bus.csi2.crc = 1;
2080 }
2081 
isp_parse_of_csi1_endpoint(struct device * dev,struct v4l2_fwnode_endpoint * vep,struct isp_bus_cfg * buscfg)2082 static void isp_parse_of_csi1_endpoint(struct device *dev,
2083 				       struct v4l2_fwnode_endpoint *vep,
2084 				       struct isp_bus_cfg *buscfg)
2085 {
2086 	buscfg->bus.ccp2.lanecfg.clk.pos = vep->bus.mipi_csi1.clock_lane;
2087 	buscfg->bus.ccp2.lanecfg.clk.pol = vep->bus.mipi_csi1.lane_polarity[0];
2088 	dev_dbg(dev, "clock lane polarity %u, pos %u\n",
2089 		buscfg->bus.ccp2.lanecfg.clk.pol,
2090 	buscfg->bus.ccp2.lanecfg.clk.pos);
2091 
2092 	buscfg->bus.ccp2.lanecfg.data[0].pos = vep->bus.mipi_csi1.data_lane;
2093 	buscfg->bus.ccp2.lanecfg.data[0].pol =
2094 		vep->bus.mipi_csi1.lane_polarity[1];
2095 
2096 	dev_dbg(dev, "data lane polarity %u, pos %u\n",
2097 		buscfg->bus.ccp2.lanecfg.data[0].pol,
2098 		buscfg->bus.ccp2.lanecfg.data[0].pos);
2099 
2100 	buscfg->bus.ccp2.strobe_clk_pol = vep->bus.mipi_csi1.clock_inv;
2101 	buscfg->bus.ccp2.phy_layer = vep->bus.mipi_csi1.strobe;
2102 	buscfg->bus.ccp2.ccp2_mode = vep->bus_type == V4L2_MBUS_CCP2;
2103 	buscfg->bus.ccp2.vp_clk_pol = 1;
2104 
2105 	buscfg->bus.ccp2.crc = 1;
2106 }
2107 
2108 static struct {
2109 	u32 phy;
2110 	u32 csi2_if;
2111 	u32 csi1_if;
2112 } isp_bus_interfaces[2] = {
2113 	{ ISP_OF_PHY_CSIPHY1,
2114 	  ISP_INTERFACE_CSI2C_PHY1, ISP_INTERFACE_CCP2B_PHY1 },
2115 	{ ISP_OF_PHY_CSIPHY2,
2116 	  ISP_INTERFACE_CSI2A_PHY2, ISP_INTERFACE_CCP2B_PHY2 },
2117 };
2118 
isp_parse_of_endpoints(struct isp_device * isp)2119 static int isp_parse_of_endpoints(struct isp_device *isp)
2120 {
2121 	struct fwnode_handle *ep;
2122 	struct isp_async_subdev *isd = NULL;
2123 	unsigned int i;
2124 
2125 	ep = fwnode_graph_get_endpoint_by_id(
2126 		dev_fwnode(isp->dev), ISP_OF_PHY_PARALLEL, 0,
2127 		FWNODE_GRAPH_ENDPOINT_NEXT);
2128 
2129 	if (ep) {
2130 		struct v4l2_fwnode_endpoint vep = {
2131 			.bus_type = V4L2_MBUS_PARALLEL
2132 		};
2133 		int ret;
2134 
2135 		dev_dbg(isp->dev, "parsing parallel interface\n");
2136 
2137 		ret = v4l2_fwnode_endpoint_parse(ep, &vep);
2138 
2139 		if (!ret) {
2140 			isd = v4l2_async_nf_add_fwnode_remote(&isp->notifier,
2141 							      ep, struct
2142 							      isp_async_subdev);
2143 			if (!IS_ERR(isd))
2144 				isp_parse_of_parallel_endpoint(isp->dev, &vep, &isd->bus);
2145 		}
2146 
2147 		fwnode_handle_put(ep);
2148 	}
2149 
2150 	for (i = 0; i < ARRAY_SIZE(isp_bus_interfaces); i++) {
2151 		struct v4l2_fwnode_endpoint vep = {
2152 			.bus_type = V4L2_MBUS_CSI2_DPHY
2153 		};
2154 		int ret;
2155 
2156 		ep = fwnode_graph_get_endpoint_by_id(
2157 			dev_fwnode(isp->dev), isp_bus_interfaces[i].phy, 0,
2158 			FWNODE_GRAPH_ENDPOINT_NEXT);
2159 
2160 		if (!ep)
2161 			continue;
2162 
2163 		dev_dbg(isp->dev, "parsing serial interface %u, node %pOF\n", i,
2164 			to_of_node(ep));
2165 
2166 		ret = v4l2_fwnode_endpoint_parse(ep, &vep);
2167 		if (ret == -ENXIO) {
2168 			vep = (struct v4l2_fwnode_endpoint)
2169 				{ .bus_type = V4L2_MBUS_CSI1 };
2170 			ret = v4l2_fwnode_endpoint_parse(ep, &vep);
2171 
2172 			if (ret == -ENXIO) {
2173 				vep = (struct v4l2_fwnode_endpoint)
2174 					{ .bus_type = V4L2_MBUS_CCP2 };
2175 				ret = v4l2_fwnode_endpoint_parse(ep, &vep);
2176 			}
2177 		}
2178 
2179 		if (!ret) {
2180 			isd = v4l2_async_nf_add_fwnode_remote(&isp->notifier,
2181 							      ep,
2182 							      struct
2183 							      isp_async_subdev);
2184 
2185 			if (!IS_ERR(isd)) {
2186 				switch (vep.bus_type) {
2187 				case V4L2_MBUS_CSI2_DPHY:
2188 					isd->bus.interface =
2189 						isp_bus_interfaces[i].csi2_if;
2190 					isp_parse_of_csi2_endpoint(isp->dev, &vep, &isd->bus);
2191 					break;
2192 				case V4L2_MBUS_CSI1:
2193 				case V4L2_MBUS_CCP2:
2194 					isd->bus.interface =
2195 						isp_bus_interfaces[i].csi1_if;
2196 					isp_parse_of_csi1_endpoint(isp->dev, &vep,
2197 								   &isd->bus);
2198 					break;
2199 				default:
2200 					break;
2201 				}
2202 			}
2203 		}
2204 
2205 		fwnode_handle_put(ep);
2206 	}
2207 
2208 	return 0;
2209 }
2210 
2211 static const struct v4l2_async_notifier_operations isp_subdev_notifier_ops = {
2212 	.bound = isp_subdev_notifier_bound,
2213 	.complete = isp_subdev_notifier_complete,
2214 };
2215 
2216 /*
2217  * isp_probe - Probe ISP platform device
2218  * @pdev: Pointer to ISP platform device
2219  *
2220  * Returns 0 if successful,
2221  *   -ENOMEM if no memory available,
2222  *   -ENODEV if no platform device resources found
2223  *     or no space for remapping registers,
2224  *   -EINVAL if couldn't install ISR,
2225  *   or clk_get return error value.
2226  */
isp_probe(struct platform_device * pdev)2227 static int isp_probe(struct platform_device *pdev)
2228 {
2229 	struct isp_device *isp;
2230 	struct resource *mem;
2231 	int ret;
2232 	int i, m;
2233 
2234 	isp = kzalloc(sizeof(*isp), GFP_KERNEL);
2235 	if (!isp) {
2236 		dev_err(&pdev->dev, "could not allocate memory\n");
2237 		return -ENOMEM;
2238 	}
2239 
2240 	ret = fwnode_property_read_u32(of_fwnode_handle(pdev->dev.of_node),
2241 				       "ti,phy-type", &isp->phy_type);
2242 	if (ret)
2243 		goto error_release_isp;
2244 
2245 	isp->syscon = syscon_regmap_lookup_by_phandle_args(pdev->dev.of_node,
2246 							   "syscon", 1,
2247 							   &isp->syscon_offset);
2248 	if (IS_ERR(isp->syscon)) {
2249 		ret = PTR_ERR(isp->syscon);
2250 		goto error_release_isp;
2251 	}
2252 
2253 	isp->autoidle = autoidle;
2254 
2255 	mutex_init(&isp->isp_mutex);
2256 	spin_lock_init(&isp->stat_lock);
2257 	isp->dev = &pdev->dev;
2258 
2259 	isp->ref_count = 0;
2260 
2261 	ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
2262 	if (ret)
2263 		goto error;
2264 
2265 	platform_set_drvdata(pdev, isp);
2266 
2267 	/* Regulators */
2268 	isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy1");
2269 	if (IS_ERR(isp->isp_csiphy1.vdd)) {
2270 		ret = PTR_ERR(isp->isp_csiphy1.vdd);
2271 		goto error;
2272 	}
2273 
2274 	isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy2");
2275 	if (IS_ERR(isp->isp_csiphy2.vdd)) {
2276 		ret = PTR_ERR(isp->isp_csiphy2.vdd);
2277 		goto error;
2278 	}
2279 
2280 	/* Clocks
2281 	 *
2282 	 * The ISP clock tree is revision-dependent. We thus need to enable ICLK
2283 	 * manually to read the revision before calling __omap3isp_get().
2284 	 *
2285 	 * Start by mapping the ISP MMIO area, which is in two pieces.
2286 	 * The ISP IOMMU is in between. Map both now, and fill in the
2287 	 * ISP revision specific portions a little later in the
2288 	 * function.
2289 	 */
2290 	for (i = 0; i < 2; i++) {
2291 		unsigned int map_idx = i ? OMAP3_ISP_IOMEM_CSI2A_REGS1 : 0;
2292 
2293 		isp->mmio_base[map_idx] =
2294 			devm_platform_get_and_ioremap_resource(pdev, i, &mem);
2295 		if (IS_ERR(isp->mmio_base[map_idx])) {
2296 			ret = PTR_ERR(isp->mmio_base[map_idx]);
2297 			goto error;
2298 		}
2299 	}
2300 
2301 	ret = isp_get_clocks(isp);
2302 	if (ret < 0)
2303 		goto error;
2304 
2305 	ret = clk_enable(isp->clock[ISP_CLK_CAM_ICK]);
2306 	if (ret < 0)
2307 		goto error;
2308 
2309 	isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION);
2310 	dev_info(isp->dev, "Revision %d.%d found\n",
2311 		 (isp->revision & 0xf0) >> 4, isp->revision & 0x0f);
2312 
2313 	clk_disable(isp->clock[ISP_CLK_CAM_ICK]);
2314 
2315 	if (__omap3isp_get(isp, false) == NULL) {
2316 		ret = -ENODEV;
2317 		goto error;
2318 	}
2319 
2320 	ret = isp_reset(isp);
2321 	if (ret < 0)
2322 		goto error_isp;
2323 
2324 	ret = isp_xclk_init(isp);
2325 	if (ret < 0)
2326 		goto error_isp;
2327 
2328 	/* Memory resources */
2329 	for (m = 0; m < ARRAY_SIZE(isp_res_maps); m++)
2330 		if (isp->revision == isp_res_maps[m].isp_rev)
2331 			break;
2332 
2333 	if (m == ARRAY_SIZE(isp_res_maps)) {
2334 		dev_err(isp->dev, "No resource map found for ISP rev %d.%d\n",
2335 			(isp->revision & 0xf0) >> 4, isp->revision & 0xf);
2336 		ret = -ENODEV;
2337 		goto error_isp;
2338 	}
2339 
2340 	for (i = 1; i < OMAP3_ISP_IOMEM_CSI2A_REGS1; i++)
2341 		isp->mmio_base[i] =
2342 			isp->mmio_base[0] + isp_res_maps[m].offset[i];
2343 
2344 	for (i = OMAP3_ISP_IOMEM_CSIPHY2; i < OMAP3_ISP_IOMEM_LAST; i++)
2345 		isp->mmio_base[i] =
2346 			isp->mmio_base[OMAP3_ISP_IOMEM_CSI2A_REGS1]
2347 			+ isp_res_maps[m].offset[i];
2348 
2349 	isp->mmio_hist_base_phys =
2350 		mem->start + isp_res_maps[m].offset[OMAP3_ISP_IOMEM_HIST];
2351 
2352 	/* IOMMU */
2353 	ret = isp_attach_iommu(isp);
2354 	if (ret < 0) {
2355 		dev_err(&pdev->dev, "unable to attach to IOMMU\n");
2356 		goto error_isp;
2357 	}
2358 
2359 	/* Interrupt */
2360 	ret = platform_get_irq(pdev, 0);
2361 	if (ret < 0)
2362 		goto error_iommu;
2363 	isp->irq_num = ret;
2364 
2365 	if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
2366 			     "OMAP3 ISP", isp)) {
2367 		dev_err(isp->dev, "Unable to request IRQ\n");
2368 		ret = -EINVAL;
2369 		goto error_iommu;
2370 	}
2371 
2372 	/* Entities */
2373 	ret = isp_initialize_modules(isp);
2374 	if (ret < 0)
2375 		goto error_iommu;
2376 
2377 	ret = isp_register_entities(isp);
2378 	if (ret < 0)
2379 		goto error_modules;
2380 
2381 	ret = isp_create_links(isp);
2382 	if (ret < 0)
2383 		goto error_register_entities;
2384 
2385 	isp->notifier.ops = &isp_subdev_notifier_ops;
2386 
2387 	v4l2_async_nf_init(&isp->notifier, &isp->v4l2_dev);
2388 
2389 	ret = isp_parse_of_endpoints(isp);
2390 	if (ret < 0)
2391 		goto error_register_entities;
2392 
2393 	ret = v4l2_async_nf_register(&isp->notifier);
2394 	if (ret)
2395 		goto error_register_entities;
2396 
2397 	isp_core_init(isp, 1);
2398 	omap3isp_put(isp);
2399 
2400 	return 0;
2401 
2402 error_register_entities:
2403 	v4l2_async_nf_cleanup(&isp->notifier);
2404 	isp_unregister_entities(isp);
2405 error_modules:
2406 	isp_cleanup_modules(isp);
2407 error_iommu:
2408 	isp_detach_iommu(isp);
2409 error_isp:
2410 	isp_xclk_cleanup(isp);
2411 	__omap3isp_put(isp, false);
2412 error:
2413 	mutex_destroy(&isp->isp_mutex);
2414 error_release_isp:
2415 	kfree(isp);
2416 
2417 	return ret;
2418 }
2419 
2420 static const struct dev_pm_ops omap3isp_pm_ops = {
2421 	.prepare = isp_pm_prepare,
2422 	.suspend = isp_pm_suspend,
2423 	.resume = isp_pm_resume,
2424 	.complete = isp_pm_complete,
2425 };
2426 
2427 static const struct platform_device_id omap3isp_id_table[] = {
2428 	{ "omap3isp", 0 },
2429 	{ },
2430 };
2431 MODULE_DEVICE_TABLE(platform, omap3isp_id_table);
2432 
2433 static const struct of_device_id omap3isp_of_table[] = {
2434 	{ .compatible = "ti,omap3-isp" },
2435 	{ },
2436 };
2437 MODULE_DEVICE_TABLE(of, omap3isp_of_table);
2438 
2439 static struct platform_driver omap3isp_driver = {
2440 	.probe = isp_probe,
2441 	.remove = isp_remove,
2442 	.id_table = omap3isp_id_table,
2443 	.driver = {
2444 		.name = "omap3isp",
2445 		.pm	= &omap3isp_pm_ops,
2446 		.of_match_table = omap3isp_of_table,
2447 	},
2448 };
2449 
2450 module_platform_driver(omap3isp_driver);
2451 
2452 MODULE_AUTHOR("Nokia Corporation");
2453 MODULE_DESCRIPTION("TI OMAP3 ISP driver");
2454 MODULE_LICENSE("GPL");
2455 MODULE_VERSION(ISP_VIDEO_DRIVER_VERSION);
2456