xref: /linux/drivers/media/platform/ti/cal/cal-camerarx.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TI Camera Access Layer (CAL) - CAMERARX
4  *
5  * Copyright (c) 2015-2020 Texas Instruments Inc.
6  *
7  * Authors:
8  *	Benoit Parrot <bparrot@ti.com>
9  *	Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/module.h>
16 #include <linux/of_graph.h>
17 #include <linux/platform_device.h>
18 #include <linux/regmap.h>
19 #include <linux/slab.h>
20 
21 #include <media/v4l2-ctrls.h>
22 #include <media/v4l2-fwnode.h>
23 #include <media/v4l2-subdev.h>
24 
25 #include "cal.h"
26 #include "cal_regs.h"
27 
28 /* ------------------------------------------------------------------
29  *	I/O Register Accessors
30  * ------------------------------------------------------------------
31  */
32 
camerarx_read(struct cal_camerarx * phy,u32 offset)33 static inline u32 camerarx_read(struct cal_camerarx *phy, u32 offset)
34 {
35 	return ioread32(phy->base + offset);
36 }
37 
camerarx_write(struct cal_camerarx * phy,u32 offset,u32 val)38 static inline void camerarx_write(struct cal_camerarx *phy, u32 offset, u32 val)
39 {
40 	iowrite32(val, phy->base + offset);
41 }
42 
43 /* ------------------------------------------------------------------
44  *	CAMERARX Management
45  * ------------------------------------------------------------------
46  */
47 
cal_camerarx_get_ext_link_freq(struct cal_camerarx * phy)48 static s64 cal_camerarx_get_ext_link_freq(struct cal_camerarx *phy)
49 {
50 	struct v4l2_mbus_config_mipi_csi2 *mipi_csi2 = &phy->endpoint.bus.mipi_csi2;
51 	u32 num_lanes = mipi_csi2->num_data_lanes;
52 	struct v4l2_subdev_state *state;
53 	u32 bpp;
54 	s64 freq;
55 
56 	/*
57 	 * v4l2_get_link_freq() uses V4L2_CID_LINK_FREQ first, and falls back
58 	 * to V4L2_CID_PIXEL_RATE if V4L2_CID_LINK_FREQ is not available.
59 	 *
60 	 * With multistream input there is no single pixel rate, and thus we
61 	 * cannot use V4L2_CID_PIXEL_RATE, so we pass 0 as the bpp which
62 	 * causes v4l2_get_link_freq() to return an error if it falls back to
63 	 * V4L2_CID_PIXEL_RATE.
64 	 */
65 
66 	state = v4l2_subdev_get_locked_active_state(&phy->subdev);
67 
68 	if (state->routing.num_routes > 1) {
69 		bpp = 0;
70 	} else {
71 		struct v4l2_subdev_route *route = &state->routing.routes[0];
72 		const struct cal_format_info *fmtinfo;
73 		struct v4l2_mbus_framefmt *fmt;
74 
75 		fmt = v4l2_subdev_state_get_format(state, route->sink_pad,
76 						   route->sink_stream);
77 
78 		fmtinfo = cal_format_by_code(fmt->code);
79 		if (!fmtinfo)
80 			return -EINVAL;
81 
82 		bpp = fmtinfo->bpp;
83 	}
84 
85 	freq = v4l2_get_link_freq(&phy->source->entity.pads[phy->source_pad],
86 				  bpp, 2 * num_lanes);
87 	if (freq < 0) {
88 		phy_err(phy, "failed to get link freq for subdev '%s'\n",
89 			phy->source->name);
90 		return freq;
91 	}
92 
93 	phy_dbg(3, phy, "Source Link Freq: %llu\n", freq);
94 
95 	return freq;
96 }
97 
cal_camerarx_lane_config(struct cal_camerarx * phy)98 static void cal_camerarx_lane_config(struct cal_camerarx *phy)
99 {
100 	u32 val = cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance));
101 	u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK;
102 	u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK;
103 	struct v4l2_mbus_config_mipi_csi2 *mipi_csi2 =
104 		&phy->endpoint.bus.mipi_csi2;
105 	int lane;
106 
107 	cal_set_field(&val, mipi_csi2->clock_lane + 1, lane_mask);
108 	cal_set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask);
109 	for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) {
110 		/*
111 		 * Every lane are one nibble apart starting with the
112 		 * clock followed by the data lanes so shift masks by 4.
113 		 */
114 		lane_mask <<= 4;
115 		polarity_mask <<= 4;
116 		cal_set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask);
117 		cal_set_field(&val, mipi_csi2->lane_polarities[lane + 1],
118 			      polarity_mask);
119 	}
120 
121 	cal_write(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), val);
122 	phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n",
123 		phy->instance, val);
124 }
125 
cal_camerarx_enable(struct cal_camerarx * phy)126 static void cal_camerarx_enable(struct cal_camerarx *phy)
127 {
128 	u32 num_lanes = phy->cal->data->camerarx[phy->instance].num_lanes;
129 
130 	regmap_field_write(phy->fields[F_CAMMODE], 0);
131 	/* Always enable all lanes at the phy control level */
132 	regmap_field_write(phy->fields[F_LANEENABLE], (1 << num_lanes) - 1);
133 	/* F_CSI_MODE is not present on every architecture */
134 	if (phy->fields[F_CSI_MODE])
135 		regmap_field_write(phy->fields[F_CSI_MODE], 1);
136 	regmap_field_write(phy->fields[F_CTRLCLKEN], 1);
137 }
138 
cal_camerarx_disable(struct cal_camerarx * phy)139 void cal_camerarx_disable(struct cal_camerarx *phy)
140 {
141 	regmap_field_write(phy->fields[F_CTRLCLKEN], 0);
142 }
143 
144 /*
145  * TCLK values are OK at their reset values
146  */
147 #define TCLK_TERM	0
148 #define TCLK_MISS	1
149 #define TCLK_SETTLE	14
150 
cal_camerarx_config(struct cal_camerarx * phy,s64 link_freq)151 static void cal_camerarx_config(struct cal_camerarx *phy, s64 link_freq)
152 {
153 	unsigned int reg0, reg1;
154 	unsigned int ths_term, ths_settle;
155 
156 	/* DPHY timing configuration */
157 
158 	/* THS_TERM: Programmed value = floor(20 ns/DDRClk period) */
159 	ths_term = div_s64(20 * link_freq, 1000 * 1000 * 1000);
160 	phy_dbg(1, phy, "ths_term: %d (0x%02x)\n", ths_term, ths_term);
161 
162 	/* THS_SETTLE: Programmed value = floor(105 ns/DDRClk period) + 4 */
163 	ths_settle = div_s64(105 * link_freq, 1000 * 1000 * 1000) + 4;
164 	phy_dbg(1, phy, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle);
165 
166 	reg0 = camerarx_read(phy, CAL_CSI2_PHY_REG0);
167 	cal_set_field(&reg0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE,
168 		      CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK);
169 	cal_set_field(&reg0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK);
170 	cal_set_field(&reg0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK);
171 
172 	phy_dbg(1, phy, "CSI2_%d_REG0 = 0x%08x\n", phy->instance, reg0);
173 	camerarx_write(phy, CAL_CSI2_PHY_REG0, reg0);
174 
175 	reg1 = camerarx_read(phy, CAL_CSI2_PHY_REG1);
176 	cal_set_field(&reg1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK);
177 	cal_set_field(&reg1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK);
178 	cal_set_field(&reg1, TCLK_MISS,
179 		      CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK);
180 	cal_set_field(&reg1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK);
181 
182 	phy_dbg(1, phy, "CSI2_%d_REG1 = 0x%08x\n", phy->instance, reg1);
183 	camerarx_write(phy, CAL_CSI2_PHY_REG1, reg1);
184 }
185 
cal_camerarx_power(struct cal_camerarx * phy,bool enable)186 static void cal_camerarx_power(struct cal_camerarx *phy, bool enable)
187 {
188 	u32 target_state;
189 	unsigned int i;
190 
191 	target_state = enable ? CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON :
192 		       CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF;
193 
194 	cal_write_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance),
195 			target_state, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
196 
197 	for (i = 0; i < 10; i++) {
198 		u32 current_state;
199 
200 		current_state = cal_read_field(phy->cal,
201 					       CAL_CSI2_COMPLEXIO_CFG(phy->instance),
202 					       CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK);
203 
204 		if (current_state == target_state)
205 			break;
206 
207 		usleep_range(1000, 1100);
208 	}
209 
210 	if (i == 10)
211 		phy_err(phy, "Failed to power %s complexio\n",
212 			enable ? "up" : "down");
213 }
214 
cal_camerarx_wait_reset(struct cal_camerarx * phy)215 static void cal_camerarx_wait_reset(struct cal_camerarx *phy)
216 {
217 	unsigned long timeout;
218 
219 	timeout = jiffies + msecs_to_jiffies(750);
220 	while (time_before(jiffies, timeout)) {
221 		if (cal_read_field(phy->cal,
222 				   CAL_CSI2_COMPLEXIO_CFG(phy->instance),
223 				   CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) ==
224 		    CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED)
225 			break;
226 		usleep_range(500, 5000);
227 	}
228 
229 	if (cal_read_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance),
230 			   CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) !=
231 			   CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED)
232 		phy_err(phy, "Timeout waiting for Complex IO reset done\n");
233 }
234 
cal_camerarx_wait_stop_state(struct cal_camerarx * phy)235 static void cal_camerarx_wait_stop_state(struct cal_camerarx *phy)
236 {
237 	unsigned long timeout;
238 
239 	timeout = jiffies + msecs_to_jiffies(750);
240 	while (time_before(jiffies, timeout)) {
241 		if (cal_read_field(phy->cal,
242 				   CAL_CSI2_TIMING(phy->instance),
243 				   CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK) == 0)
244 			break;
245 		usleep_range(500, 5000);
246 	}
247 
248 	if (cal_read_field(phy->cal, CAL_CSI2_TIMING(phy->instance),
249 			   CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK) != 0)
250 		phy_err(phy, "Timeout waiting for stop state\n");
251 }
252 
cal_camerarx_enable_irqs(struct cal_camerarx * phy)253 static void cal_camerarx_enable_irqs(struct cal_camerarx *phy)
254 {
255 	const u32 cio_err_mask =
256 		CAL_CSI2_COMPLEXIO_IRQ_LANE_ERRORS_MASK |
257 		CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK |
258 		CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK |
259 		CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK;
260 	const u32 vc_err_mask =
261 		CAL_CSI2_VC_IRQ_CS_IRQ_MASK(0) |
262 		CAL_CSI2_VC_IRQ_CS_IRQ_MASK(1) |
263 		CAL_CSI2_VC_IRQ_CS_IRQ_MASK(2) |
264 		CAL_CSI2_VC_IRQ_CS_IRQ_MASK(3) |
265 		CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(0) |
266 		CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(1) |
267 		CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(2) |
268 		CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(3);
269 
270 	/* Enable CIO & VC error IRQs. */
271 	cal_write(phy->cal, CAL_HL_IRQENABLE_SET(0),
272 		  CAL_HL_IRQ_CIO_MASK(phy->instance) |
273 		  CAL_HL_IRQ_VC_MASK(phy->instance));
274 	cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance),
275 		  cio_err_mask);
276 	cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(phy->instance),
277 		  vc_err_mask);
278 }
279 
cal_camerarx_disable_irqs(struct cal_camerarx * phy)280 static void cal_camerarx_disable_irqs(struct cal_camerarx *phy)
281 {
282 	/* Disable CIO error irqs */
283 	cal_write(phy->cal, CAL_HL_IRQENABLE_CLR(0),
284 		  CAL_HL_IRQ_CIO_MASK(phy->instance) |
285 		  CAL_HL_IRQ_VC_MASK(phy->instance));
286 	cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance), 0);
287 	cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(phy->instance), 0);
288 }
289 
cal_camerarx_ppi_enable(struct cal_camerarx * phy)290 static void cal_camerarx_ppi_enable(struct cal_camerarx *phy)
291 {
292 	cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance),
293 			1, CAL_CSI2_PPI_CTRL_ECC_EN_MASK);
294 
295 	cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance),
296 			1, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
297 }
298 
cal_camerarx_ppi_disable(struct cal_camerarx * phy)299 static void cal_camerarx_ppi_disable(struct cal_camerarx *phy)
300 {
301 	cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance),
302 			0, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
303 }
304 
cal_camerarx_start(struct cal_camerarx * phy,u32 sink_stream)305 static int cal_camerarx_start(struct cal_camerarx *phy, u32 sink_stream)
306 {
307 	struct media_pad *remote_pad;
308 	s64 link_freq;
309 	u32 sscounter;
310 	u32 val;
311 	int ret;
312 
313 	remote_pad = media_pad_remote_pad_first(&phy->pads[CAL_CAMERARX_PAD_SINK]);
314 
315 	/*
316 	 * We need to enable the PHY hardware when enabling the first stream,
317 	 * but for the following streams we just propagate the enable_streams
318 	 * to the source.
319 	 */
320 
321 	if (phy->enable_count > 0) {
322 		ret = v4l2_subdev_enable_streams(phy->source, remote_pad->index,
323 						 BIT(sink_stream));
324 		if (ret) {
325 			phy_err(phy, "enable streams failed in source: %d\n", ret);
326 			return ret;
327 		}
328 
329 		phy->enable_count++;
330 
331 		return 0;
332 	}
333 
334 	link_freq = cal_camerarx_get_ext_link_freq(phy);
335 	if (link_freq < 0)
336 		return link_freq;
337 
338 	ret = v4l2_subdev_call(phy->source, core, s_power, 1);
339 	if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
340 		phy_err(phy, "power on failed in subdev\n");
341 		return ret;
342 	}
343 
344 	cal_camerarx_enable_irqs(phy);
345 
346 	/*
347 	 * CSI-2 PHY Link Initialization Sequence, according to the DRA74xP /
348 	 * DRA75xP / DRA76xP / DRA77xP TRM. The DRA71x / DRA72x and the AM65x /
349 	 * DRA80xM TRMs have a slightly simplified sequence.
350 	 */
351 
352 	/*
353 	 * 1. Configure all CSI-2 low level protocol registers to be ready to
354 	 *    receive signals/data from the CSI-2 PHY.
355 	 *
356 	 *    i.-v. Configure the lanes position and polarity.
357 	 */
358 	cal_camerarx_lane_config(phy);
359 
360 	/*
361 	 *    vi.-vii. Configure D-PHY mode, enable the required lanes and
362 	 *             enable the CAMERARX clock.
363 	 */
364 	cal_camerarx_enable(phy);
365 
366 	/*
367 	 * 2. CSI PHY and link initialization sequence.
368 	 *
369 	 *    a. Deassert the CSI-2 PHY reset. Do not wait for reset completion
370 	 *       at this point, as it requires the external source to send the
371 	 *       CSI-2 HS clock.
372 	 */
373 	cal_write_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance),
374 			CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
375 			CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
376 	phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x De-assert Complex IO Reset\n",
377 		phy->instance,
378 		cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance)));
379 
380 	/* Dummy read to allow SCP reset to complete. */
381 	camerarx_read(phy, CAL_CSI2_PHY_REG0);
382 
383 	/* Program the PHY timing parameters. */
384 	cal_camerarx_config(phy, link_freq);
385 
386 	/*
387 	 *    b. Assert the FORCERXMODE signal.
388 	 *
389 	 * The stop-state-counter is based on fclk cycles, and we always use
390 	 * the x16 and x4 settings, so stop-state-timeout =
391 	 * fclk-cycle * 16 * 4 * counter.
392 	 *
393 	 * Stop-state-timeout must be more than 100us as per CSI-2 spec, so we
394 	 * calculate a timeout that's 100us (rounding up).
395 	 */
396 	sscounter = DIV_ROUND_UP(clk_get_rate(phy->cal->fclk), 10000 *  16 * 4);
397 
398 	val = cal_read(phy->cal, CAL_CSI2_TIMING(phy->instance));
399 	cal_set_field(&val, 1, CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
400 	cal_set_field(&val, 1, CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
401 	cal_set_field(&val, sscounter,
402 		      CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
403 	cal_write(phy->cal, CAL_CSI2_TIMING(phy->instance), val);
404 	phy_dbg(3, phy, "CAL_CSI2_TIMING(%d) = 0x%08x Stop States\n",
405 		phy->instance,
406 		cal_read(phy->cal, CAL_CSI2_TIMING(phy->instance)));
407 
408 	/* Assert the FORCERXMODE signal. */
409 	cal_write_field(phy->cal, CAL_CSI2_TIMING(phy->instance),
410 			1, CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
411 	phy_dbg(3, phy, "CAL_CSI2_TIMING(%d) = 0x%08x Force RXMODE\n",
412 		phy->instance,
413 		cal_read(phy->cal, CAL_CSI2_TIMING(phy->instance)));
414 
415 	/*
416 	 * c. Connect pull-down on CSI-2 PHY link (using pad control).
417 	 *
418 	 * This is not required on DRA71x, DRA72x, AM65x and DRA80xM. Not
419 	 * implemented.
420 	 */
421 
422 	/*
423 	 * d. Power up the CSI-2 PHY.
424 	 * e. Check whether the state status reaches the ON state.
425 	 */
426 	cal_camerarx_power(phy, true);
427 
428 	/*
429 	 * Start the source to enable the CSI-2 HS clock. We can now wait for
430 	 * CSI-2 PHY reset to complete.
431 	 */
432 	ret = v4l2_subdev_enable_streams(phy->source, remote_pad->index,
433 					 BIT(sink_stream));
434 	if (ret) {
435 		v4l2_subdev_call(phy->source, core, s_power, 0);
436 		cal_camerarx_disable_irqs(phy);
437 		phy_err(phy, "stream on failed in subdev\n");
438 		return ret;
439 	}
440 
441 	cal_camerarx_wait_reset(phy);
442 
443 	/* f. Wait for STOPSTATE=1 for all enabled lane modules. */
444 	cal_camerarx_wait_stop_state(phy);
445 
446 	phy_dbg(1, phy, "CSI2_%u_REG1 = 0x%08x (bits 31-28 should be set)\n",
447 		phy->instance, camerarx_read(phy, CAL_CSI2_PHY_REG1));
448 
449 	/*
450 	 * g. Disable pull-down on CSI-2 PHY link (using pad control).
451 	 *
452 	 * This is not required on DRA71x, DRA72x, AM65x and DRA80xM. Not
453 	 * implemented.
454 	 */
455 
456 	/* Finally, enable the PHY Protocol Interface (PPI). */
457 	cal_camerarx_ppi_enable(phy);
458 
459 	phy->enable_count++;
460 
461 	return 0;
462 }
463 
cal_camerarx_stop(struct cal_camerarx * phy,u32 sink_stream)464 static void cal_camerarx_stop(struct cal_camerarx *phy, u32 sink_stream)
465 {
466 	struct media_pad *remote_pad;
467 	int ret;
468 
469 	remote_pad = media_pad_remote_pad_first(&phy->pads[CAL_CAMERARX_PAD_SINK]);
470 
471 	if (--phy->enable_count > 0) {
472 		ret = v4l2_subdev_disable_streams(phy->source,
473 						  remote_pad->index,
474 						  BIT(sink_stream));
475 		if (ret)
476 			phy_err(phy, "stream off failed in subdev\n");
477 
478 		return;
479 	}
480 
481 	cal_camerarx_ppi_disable(phy);
482 
483 	cal_camerarx_disable_irqs(phy);
484 
485 	cal_camerarx_power(phy, false);
486 
487 	/* Assert Complex IO Reset */
488 	cal_write_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance),
489 			CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL,
490 			CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
491 
492 	phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Complex IO in Reset\n",
493 		phy->instance,
494 		cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance)));
495 
496 	/* Disable the phy */
497 	cal_camerarx_disable(phy);
498 
499 	ret = v4l2_subdev_disable_streams(phy->source, remote_pad->index,
500 					  BIT(sink_stream));
501 	if (ret)
502 		phy_err(phy, "stream off failed in subdev\n");
503 
504 	ret = v4l2_subdev_call(phy->source, core, s_power, 0);
505 	if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
506 		phy_err(phy, "power off failed in subdev\n");
507 }
508 
509 /*
510  *   Errata i913: CSI2 LDO Needs to be disabled when module is powered on
511  *
512  *   Enabling CSI2 LDO shorts it to core supply. It is crucial the 2 CSI2
513  *   LDOs on the device are disabled if CSI-2 module is powered on
514  *   (0x4845 B304 | 0x4845 B384 [28:27] = 0x1) or in ULPS (0x4845 B304
515  *   | 0x4845 B384 [28:27] = 0x2) mode. Common concerns include: high
516  *   current draw on the module supply in active mode.
517  *
518  *   Errata does not apply when CSI-2 module is powered off
519  *   (0x4845 B304 | 0x4845 B384 [28:27] = 0x0).
520  *
521  * SW Workaround:
522  *	Set the following register bits to disable the LDO,
523  *	which is essentially CSI2 REG10 bit 6:
524  *
525  *		Core 0:  0x4845 B828 = 0x0000 0040
526  *		Core 1:  0x4845 B928 = 0x0000 0040
527  */
cal_camerarx_i913_errata(struct cal_camerarx * phy)528 void cal_camerarx_i913_errata(struct cal_camerarx *phy)
529 {
530 	u32 reg10 = camerarx_read(phy, CAL_CSI2_PHY_REG10);
531 
532 	cal_set_field(&reg10, 1, CAL_CSI2_PHY_REG10_I933_LDO_DISABLE_MASK);
533 
534 	phy_dbg(1, phy, "CSI2_%d_REG10 = 0x%08x\n", phy->instance, reg10);
535 	camerarx_write(phy, CAL_CSI2_PHY_REG10, reg10);
536 }
537 
cal_camerarx_regmap_init(struct cal_dev * cal,struct cal_camerarx * phy)538 static int cal_camerarx_regmap_init(struct cal_dev *cal,
539 				    struct cal_camerarx *phy)
540 {
541 	const struct cal_camerarx_data *phy_data;
542 	unsigned int i;
543 
544 	if (!cal->data)
545 		return -EINVAL;
546 
547 	phy_data = &cal->data->camerarx[phy->instance];
548 
549 	for (i = 0; i < F_MAX_FIELDS; i++) {
550 		struct reg_field field = {
551 			.reg = cal->syscon_camerrx_offset,
552 			.lsb = phy_data->fields[i].lsb,
553 			.msb = phy_data->fields[i].msb,
554 		};
555 
556 		/*
557 		 * Here we update the reg offset with the
558 		 * value found in DT
559 		 */
560 		phy->fields[i] = devm_regmap_field_alloc(cal->dev,
561 							 cal->syscon_camerrx,
562 							 field);
563 		if (IS_ERR(phy->fields[i])) {
564 			cal_err(cal, "Unable to allocate regmap fields\n");
565 			return PTR_ERR(phy->fields[i]);
566 		}
567 	}
568 
569 	return 0;
570 }
571 
cal_camerarx_parse_dt(struct cal_camerarx * phy)572 static int cal_camerarx_parse_dt(struct cal_camerarx *phy)
573 {
574 	struct v4l2_fwnode_endpoint *endpoint = &phy->endpoint;
575 	char data_lanes[V4L2_MBUS_CSI2_MAX_DATA_LANES * 2];
576 	struct device_node *ep_node;
577 	unsigned int i;
578 	int ret;
579 
580 	/*
581 	 * Find the endpoint node for the port corresponding to the PHY
582 	 * instance, and parse its CSI-2-related properties.
583 	 */
584 	ep_node = of_graph_get_endpoint_by_regs(phy->cal->dev->of_node,
585 						phy->instance, 0);
586 	if (!ep_node) {
587 		/*
588 		 * The endpoint is not mandatory, not all PHY instances need to
589 		 * be connected in DT.
590 		 */
591 		phy_dbg(3, phy, "Port has no endpoint\n");
592 		return 0;
593 	}
594 
595 	endpoint->bus_type = V4L2_MBUS_CSI2_DPHY;
596 	ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), endpoint);
597 	if (ret < 0) {
598 		phy_err(phy, "Failed to parse endpoint\n");
599 		goto done;
600 	}
601 
602 	for (i = 0; i < endpoint->bus.mipi_csi2.num_data_lanes; i++) {
603 		unsigned int lane = endpoint->bus.mipi_csi2.data_lanes[i];
604 
605 		if (lane > 4) {
606 			phy_err(phy, "Invalid position %u for data lane %u\n",
607 				lane, i);
608 			ret = -EINVAL;
609 			goto done;
610 		}
611 
612 		data_lanes[i*2] = '0' + lane;
613 		data_lanes[i*2+1] = ' ';
614 	}
615 
616 	data_lanes[i*2-1] = '\0';
617 
618 	phy_dbg(3, phy,
619 		"CSI-2 bus: clock lane <%u>, data lanes <%s>, flags 0x%08x\n",
620 		endpoint->bus.mipi_csi2.clock_lane, data_lanes,
621 		endpoint->bus.mipi_csi2.flags);
622 
623 	/* Retrieve the connected device and store it for later use. */
624 	phy->source_ep_node = of_graph_get_remote_endpoint(ep_node);
625 	phy->source_node = of_graph_get_port_parent(phy->source_ep_node);
626 	if (!phy->source_node) {
627 		phy_dbg(3, phy, "Can't get remote parent\n");
628 		of_node_put(phy->source_ep_node);
629 		ret = -EINVAL;
630 		goto done;
631 	}
632 
633 	phy_dbg(1, phy, "Found connected device %pOFn\n", phy->source_node);
634 
635 done:
636 	of_node_put(ep_node);
637 	return ret;
638 }
639 
640 /* ------------------------------------------------------------------
641  *	V4L2 Subdev Operations
642  * ------------------------------------------------------------------
643  */
644 
to_cal_camerarx(struct v4l2_subdev * sd)645 static inline struct cal_camerarx *to_cal_camerarx(struct v4l2_subdev *sd)
646 {
647 	return container_of(sd, struct cal_camerarx, subdev);
648 }
649 
650 struct cal_camerarx *
cal_camerarx_get_phy_from_entity(struct media_entity * entity)651 cal_camerarx_get_phy_from_entity(struct media_entity *entity)
652 {
653 	struct v4l2_subdev *sd;
654 
655 	sd = media_entity_to_v4l2_subdev(entity);
656 	if (!sd)
657 		return NULL;
658 
659 	return to_cal_camerarx(sd);
660 }
661 
cal_camerarx_sd_enable_streams(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,u32 pad,u64 streams_mask)662 static int cal_camerarx_sd_enable_streams(struct v4l2_subdev *sd,
663 					  struct v4l2_subdev_state *state,
664 					  u32 pad, u64 streams_mask)
665 {
666 	struct cal_camerarx *phy = to_cal_camerarx(sd);
667 	u32 sink_stream;
668 	int ret;
669 
670 	ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, 0,
671 						    NULL, &sink_stream);
672 	if (ret)
673 		return ret;
674 
675 	return cal_camerarx_start(phy, sink_stream);
676 }
677 
cal_camerarx_sd_disable_streams(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,u32 pad,u64 streams_mask)678 static int cal_camerarx_sd_disable_streams(struct v4l2_subdev *sd,
679 					   struct v4l2_subdev_state *state,
680 					   u32 pad, u64 streams_mask)
681 {
682 	struct cal_camerarx *phy = to_cal_camerarx(sd);
683 	u32 sink_stream;
684 	int ret;
685 
686 	ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, 0,
687 						    NULL, &sink_stream);
688 	if (ret)
689 		return ret;
690 
691 	cal_camerarx_stop(phy, sink_stream);
692 
693 	return 0;
694 }
695 
cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_mbus_code_enum * code)696 static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
697 					  struct v4l2_subdev_state *state,
698 					  struct v4l2_subdev_mbus_code_enum *code)
699 {
700 	/* No transcoding, source and sink codes must match. */
701 	if (cal_rx_pad_is_source(code->pad)) {
702 		struct v4l2_mbus_framefmt *fmt;
703 
704 		if (code->index > 0)
705 			return -EINVAL;
706 
707 		fmt = v4l2_subdev_state_get_opposite_stream_format(state,
708 								   code->pad,
709 								   code->stream);
710 		if (!fmt)
711 			return -EINVAL;
712 
713 		code->code = fmt->code;
714 	} else {
715 		if (code->index >= cal_num_formats)
716 			return -EINVAL;
717 
718 		code->code = cal_formats[code->index].code;
719 	}
720 
721 	return 0;
722 }
723 
cal_camerarx_sd_enum_frame_size(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_frame_size_enum * fse)724 static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
725 					   struct v4l2_subdev_state *state,
726 					   struct v4l2_subdev_frame_size_enum *fse)
727 {
728 	const struct cal_format_info *fmtinfo;
729 
730 	if (fse->index > 0)
731 		return -EINVAL;
732 
733 	/* No transcoding, source and sink formats must match. */
734 	if (cal_rx_pad_is_source(fse->pad)) {
735 		struct v4l2_mbus_framefmt *fmt;
736 
737 		fmt = v4l2_subdev_state_get_opposite_stream_format(state,
738 								   fse->pad,
739 								   fse->stream);
740 		if (!fmt)
741 			return -EINVAL;
742 
743 		if (fse->code != fmt->code)
744 			return -EINVAL;
745 
746 		fse->min_width = fmt->width;
747 		fse->max_width = fmt->width;
748 		fse->min_height = fmt->height;
749 		fse->max_height = fmt->height;
750 	} else {
751 		fmtinfo = cal_format_by_code(fse->code);
752 		if (!fmtinfo)
753 			return -EINVAL;
754 
755 		fse->min_width = CAL_MIN_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8);
756 		fse->max_width = CAL_MAX_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8);
757 		fse->min_height = CAL_MIN_HEIGHT_LINES;
758 		fse->max_height = CAL_MAX_HEIGHT_LINES;
759 	}
760 
761 	return 0;
762 }
763 
cal_camerarx_sd_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)764 static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
765 				   struct v4l2_subdev_state *state,
766 				   struct v4l2_subdev_format *format)
767 {
768 	const struct cal_format_info *fmtinfo;
769 	struct v4l2_mbus_framefmt *fmt;
770 	unsigned int bpp;
771 
772 	/* No transcoding, source and sink formats must match. */
773 	if (cal_rx_pad_is_source(format->pad))
774 		return v4l2_subdev_get_fmt(sd, state, format);
775 
776 	/*
777 	 * Default to the first format if the requested media bus code isn't
778 	 * supported.
779 	 */
780 	fmtinfo = cal_format_by_code(format->format.code);
781 	if (!fmtinfo)
782 		fmtinfo = &cal_formats[0];
783 
784 	/* Clamp the size, update the code. The colorspace is accepted as-is. */
785 	bpp = ALIGN(fmtinfo->bpp, 8);
786 
787 	format->format.width = clamp_t(unsigned int, format->format.width,
788 				       CAL_MIN_WIDTH_BYTES * 8 / bpp,
789 				       CAL_MAX_WIDTH_BYTES * 8 / bpp);
790 	format->format.height = clamp_t(unsigned int, format->format.height,
791 					CAL_MIN_HEIGHT_LINES,
792 					CAL_MAX_HEIGHT_LINES);
793 	format->format.code = fmtinfo->code;
794 	format->format.field = V4L2_FIELD_NONE;
795 
796 	/* Store the format and propagate it to the source pad. */
797 
798 	fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
799 	if (!fmt)
800 		return -EINVAL;
801 
802 	*fmt = format->format;
803 
804 	fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
805 							   format->stream);
806 	if (!fmt)
807 		return -EINVAL;
808 
809 	*fmt = format->format;
810 
811 	return 0;
812 }
813 
cal_camerarx_set_routing(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_krouting * routing)814 static int cal_camerarx_set_routing(struct v4l2_subdev *sd,
815 				    struct v4l2_subdev_state *state,
816 				    struct v4l2_subdev_krouting *routing)
817 {
818 	static const struct v4l2_mbus_framefmt format = {
819 		.width = 640,
820 		.height = 480,
821 		.code = MEDIA_BUS_FMT_UYVY8_1X16,
822 		.field = V4L2_FIELD_NONE,
823 		.colorspace = V4L2_COLORSPACE_SRGB,
824 		.ycbcr_enc = V4L2_YCBCR_ENC_601,
825 		.quantization = V4L2_QUANTIZATION_LIM_RANGE,
826 		.xfer_func = V4L2_XFER_FUNC_SRGB,
827 	};
828 	int ret;
829 
830 	ret = v4l2_subdev_routing_validate(sd, routing,
831 					   V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 |
832 					   V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING);
833 	if (ret)
834 		return ret;
835 
836 	ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
837 	if (ret)
838 		return ret;
839 
840 	return 0;
841 }
842 
cal_camerarx_sd_set_routing(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,enum v4l2_subdev_format_whence which,struct v4l2_subdev_krouting * routing)843 static int cal_camerarx_sd_set_routing(struct v4l2_subdev *sd,
844 				       struct v4l2_subdev_state *state,
845 				       enum v4l2_subdev_format_whence which,
846 				       struct v4l2_subdev_krouting *routing)
847 {
848 	return cal_camerarx_set_routing(sd, state, routing);
849 }
850 
cal_camerarx_sd_init_state(struct v4l2_subdev * sd,struct v4l2_subdev_state * state)851 static int cal_camerarx_sd_init_state(struct v4l2_subdev *sd,
852 				      struct v4l2_subdev_state *state)
853 {
854 	struct v4l2_subdev_route routes[] = { {
855 		.sink_pad = 0,
856 		.sink_stream = 0,
857 		.source_pad = 1,
858 		.source_stream = 0,
859 		.flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
860 	} };
861 
862 	struct v4l2_subdev_krouting routing = {
863 		.num_routes = 1,
864 		.routes = routes,
865 	};
866 
867 	/* Initialize routing to single route to the fist source pad */
868 	return cal_camerarx_set_routing(sd, state, &routing);
869 }
870 
cal_camerarx_get_frame_desc(struct v4l2_subdev * sd,unsigned int pad,struct v4l2_mbus_frame_desc * fd)871 static int cal_camerarx_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
872 				       struct v4l2_mbus_frame_desc *fd)
873 {
874 	struct cal_camerarx *phy = to_cal_camerarx(sd);
875 	struct v4l2_mbus_frame_desc remote_desc;
876 	const struct media_pad *remote_pad;
877 	struct v4l2_subdev_state *state;
878 	u32 sink_stream;
879 	unsigned int i;
880 	int ret;
881 
882 	state = v4l2_subdev_lock_and_get_active_state(sd);
883 
884 	ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
885 						    pad, 0,
886 						    NULL, &sink_stream);
887 	if (ret)
888 		goto out_unlock;
889 
890 	remote_pad = media_pad_remote_pad_first(&phy->pads[CAL_CAMERARX_PAD_SINK]);
891 	if (!remote_pad) {
892 		ret = -EPIPE;
893 		goto out_unlock;
894 	}
895 
896 	ret = v4l2_subdev_call(phy->source, pad, get_frame_desc,
897 			       remote_pad->index, &remote_desc);
898 	if (ret)
899 		goto out_unlock;
900 
901 	if (remote_desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
902 		cal_err(phy->cal,
903 			"Frame descriptor does not describe CSI-2 link");
904 		ret = -EINVAL;
905 		goto out_unlock;
906 	}
907 
908 	for (i = 0; i < remote_desc.num_entries; i++) {
909 		if (remote_desc.entry[i].stream == sink_stream)
910 			break;
911 	}
912 
913 	if (i == remote_desc.num_entries) {
914 		cal_err(phy->cal, "Stream %u not found in remote frame desc\n",
915 			sink_stream);
916 		ret = -EINVAL;
917 		goto out_unlock;
918 	}
919 
920 	fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
921 	fd->num_entries = 1;
922 	fd->entry[0] = remote_desc.entry[i];
923 
924 out_unlock:
925 	v4l2_subdev_unlock_state(state);
926 
927 	return ret;
928 }
929 
930 static const struct v4l2_subdev_pad_ops cal_camerarx_pad_ops = {
931 	.enable_streams = cal_camerarx_sd_enable_streams,
932 	.disable_streams = cal_camerarx_sd_disable_streams,
933 	.enum_mbus_code = cal_camerarx_sd_enum_mbus_code,
934 	.enum_frame_size = cal_camerarx_sd_enum_frame_size,
935 	.get_fmt = v4l2_subdev_get_fmt,
936 	.set_fmt = cal_camerarx_sd_set_fmt,
937 	.set_routing = cal_camerarx_sd_set_routing,
938 	.get_frame_desc = cal_camerarx_get_frame_desc,
939 };
940 
941 static const struct v4l2_subdev_ops cal_camerarx_subdev_ops = {
942 	.pad = &cal_camerarx_pad_ops,
943 };
944 
945 static const struct v4l2_subdev_internal_ops cal_camerarx_internal_ops = {
946 	.init_state = cal_camerarx_sd_init_state,
947 };
948 
949 static const struct media_entity_operations cal_camerarx_media_ops = {
950 	.link_validate = v4l2_subdev_link_validate,
951 	.has_pad_interdep = v4l2_subdev_has_pad_interdep,
952 };
953 
954 /* ------------------------------------------------------------------
955  *	Create and Destroy
956  * ------------------------------------------------------------------
957  */
958 
cal_camerarx_create(struct cal_dev * cal,unsigned int instance)959 struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
960 					 unsigned int instance)
961 {
962 	struct platform_device *pdev = to_platform_device(cal->dev);
963 	struct cal_camerarx *phy;
964 	struct v4l2_subdev *sd;
965 	unsigned int i;
966 	int ret;
967 
968 	phy = devm_kzalloc(cal->dev, sizeof(*phy), GFP_KERNEL);
969 	if (!phy)
970 		return ERR_PTR(-ENOMEM);
971 
972 	phy->cal = cal;
973 	phy->instance = instance;
974 
975 	spin_lock_init(&phy->vc_lock);
976 
977 	phy->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
978 						(instance == 0) ?
979 						"cal_rx_core0" :
980 						"cal_rx_core1");
981 	phy->base = devm_ioremap_resource(cal->dev, phy->res);
982 	if (IS_ERR(phy->base)) {
983 		cal_err(cal, "failed to ioremap\n");
984 		return ERR_CAST(phy->base);
985 	}
986 
987 	cal_dbg(1, cal, "ioresource %s at %pa - %pa\n",
988 		phy->res->name, &phy->res->start, &phy->res->end);
989 
990 	ret = cal_camerarx_regmap_init(cal, phy);
991 	if (ret)
992 		return ERR_PTR(ret);
993 
994 	ret = cal_camerarx_parse_dt(phy);
995 	if (ret)
996 		return ERR_PTR(ret);
997 
998 	/* Initialize the V4L2 subdev and media entity. */
999 	sd = &phy->subdev;
1000 	v4l2_subdev_init(sd, &cal_camerarx_subdev_ops);
1001 	sd->internal_ops = &cal_camerarx_internal_ops;
1002 	sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1003 	sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS;
1004 	snprintf(sd->name, sizeof(sd->name), "CAMERARX%u", instance);
1005 	sd->dev = cal->dev;
1006 
1007 	phy->pads[CAL_CAMERARX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
1008 	for (i = CAL_CAMERARX_PAD_FIRST_SOURCE; i < CAL_CAMERARX_NUM_PADS; ++i)
1009 		phy->pads[i].flags = MEDIA_PAD_FL_SOURCE;
1010 	sd->entity.ops = &cal_camerarx_media_ops;
1011 	ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(phy->pads),
1012 				     phy->pads);
1013 	if (ret)
1014 		goto err_node_put;
1015 
1016 	ret = v4l2_subdev_init_finalize(sd);
1017 	if (ret)
1018 		goto err_entity_cleanup;
1019 
1020 	ret = v4l2_device_register_subdev(&cal->v4l2_dev, sd);
1021 	if (ret)
1022 		goto err_free_state;
1023 
1024 	return phy;
1025 
1026 err_free_state:
1027 	v4l2_subdev_cleanup(sd);
1028 err_entity_cleanup:
1029 	media_entity_cleanup(&phy->subdev.entity);
1030 err_node_put:
1031 	of_node_put(phy->source_ep_node);
1032 	of_node_put(phy->source_node);
1033 	return ERR_PTR(ret);
1034 }
1035 
cal_camerarx_destroy(struct cal_camerarx * phy)1036 void cal_camerarx_destroy(struct cal_camerarx *phy)
1037 {
1038 	if (!phy)
1039 		return;
1040 
1041 	v4l2_device_unregister_subdev(&phy->subdev);
1042 	v4l2_subdev_cleanup(&phy->subdev);
1043 	media_entity_cleanup(&phy->subdev.entity);
1044 	of_node_put(phy->source_ep_node);
1045 	of_node_put(phy->source_node);
1046 }
1047