1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * RP1 Camera Front End Driver
4 *
5 * Copyright (c) 2021-2024 Raspberry Pi Ltd.
6 * Copyright (c) 2023-2024 Ideas on Board Oy
7 */
8
9 #include <linux/clk.h>
10 #include <linux/debugfs.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/lcm.h>
19 #include <linux/math.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/property.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/videodev2.h>
28
29 #include <media/v4l2-async.h>
30 #include <media/v4l2-common.h>
31 #include <media/v4l2-ctrls.h>
32 #include <media/v4l2-dev.h>
33 #include <media/v4l2-device.h>
34 #include <media/v4l2-event.h>
35 #include <media/v4l2-fwnode.h>
36 #include <media/v4l2-ioctl.h>
37 #include <media/v4l2-mc.h>
38 #include <media/videobuf2-dma-contig.h>
39
40 #include <linux/media/raspberrypi/pisp_fe_config.h>
41 #include <linux/media/raspberrypi/pisp_fe_statistics.h>
42
43 #include "cfe-fmts.h"
44 #include "cfe.h"
45 #include "csi2.h"
46 #include "pisp-fe.h"
47
48 #define CREATE_TRACE_POINTS
49 #include "cfe-trace.h"
50
51 #define CFE_MODULE_NAME "rp1-cfe"
52 #define CFE_VERSION "1.0"
53
54 #define cfe_dbg(cfe, fmt, arg...) dev_dbg(&(cfe)->pdev->dev, fmt, ##arg)
55 #define cfe_info(cfe, fmt, arg...) dev_info(&(cfe)->pdev->dev, fmt, ##arg)
56 #define cfe_err(cfe, fmt, arg...) dev_err(&(cfe)->pdev->dev, fmt, ##arg)
57
58 /* MIPICFG registers */
59 #define MIPICFG_CFG 0x004
60 #define MIPICFG_INTR 0x028
61 #define MIPICFG_INTE 0x02c
62 #define MIPICFG_INTF 0x030
63 #define MIPICFG_INTS 0x034
64
65 #define MIPICFG_CFG_SEL_CSI BIT(0)
66
67 #define MIPICFG_INT_CSI_DMA BIT(0)
68 #define MIPICFG_INT_CSI_HOST BIT(2)
69 #define MIPICFG_INT_PISP_FE BIT(4)
70
71 #define BPL_ALIGNMENT 16
72 #define MAX_BYTESPERLINE 0xffffff00
73 #define MAX_BUFFER_SIZE 0xffffff00
74 /*
75 * Max width is therefore determined by the max stride divided by the number of
76 * bits per pixel.
77 *
78 * However, to avoid overflow issues let's use a 16k maximum. This lets us
79 * calculate 16k * 16k * 4 with 32bits. If we need higher maximums, a careful
80 * review and adjustment of the code is needed so that it will deal with
81 * overflows correctly.
82 */
83 #define MAX_WIDTH 16384
84 #define MAX_HEIGHT MAX_WIDTH
85 /* Define a nominal minimum image size */
86 #define MIN_WIDTH 16
87 #define MIN_HEIGHT 16
88
89 #define MIN_META_WIDTH 4
90 #define MIN_META_HEIGHT 1
91
92 const struct v4l2_mbus_framefmt cfe_default_format = {
93 .width = 640,
94 .height = 480,
95 .code = MEDIA_BUS_FMT_SRGGB10_1X10,
96 .field = V4L2_FIELD_NONE,
97 .colorspace = V4L2_COLORSPACE_RAW,
98 .ycbcr_enc = V4L2_YCBCR_ENC_601,
99 .quantization = V4L2_QUANTIZATION_FULL_RANGE,
100 .xfer_func = V4L2_XFER_FUNC_NONE,
101 };
102
103 enum node_ids {
104 /* CSI2 HW output nodes first. */
105 CSI2_CH0,
106 CSI2_CH1,
107 CSI2_CH2,
108 CSI2_CH3,
109 /* FE only nodes from here on. */
110 FE_OUT0,
111 FE_OUT1,
112 FE_STATS,
113 FE_CONFIG,
114 NUM_NODES
115 };
116
117 struct node_description {
118 enum node_ids id;
119 const char *name;
120 unsigned int caps;
121 unsigned int pad_flags;
122 unsigned int link_pad;
123 };
124
125 /* Must match the ordering of enum ids */
126 static const struct node_description node_desc[NUM_NODES] = {
127 [CSI2_CH0] = {
128 .name = "csi2-ch0",
129 .caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE,
130 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
131 .link_pad = CSI2_PAD_FIRST_SOURCE + 0
132 },
133 /*
134 * At the moment the main userspace component (libcamera) doesn't
135 * support metadata with video nodes that support both video and
136 * metadata. So for the time being this node is set to only support
137 * V4L2_CAP_META_CAPTURE.
138 */
139 [CSI2_CH1] = {
140 .name = "csi2-ch1",
141 .caps = V4L2_CAP_META_CAPTURE,
142 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
143 .link_pad = CSI2_PAD_FIRST_SOURCE + 1
144 },
145 [CSI2_CH2] = {
146 .name = "csi2-ch2",
147 .caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE,
148 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
149 .link_pad = CSI2_PAD_FIRST_SOURCE + 2
150 },
151 [CSI2_CH3] = {
152 .name = "csi2-ch3",
153 .caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE,
154 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
155 .link_pad = CSI2_PAD_FIRST_SOURCE + 3
156 },
157 [FE_OUT0] = {
158 .name = "fe-image0",
159 .caps = V4L2_CAP_VIDEO_CAPTURE,
160 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
161 .link_pad = FE_OUTPUT0_PAD
162 },
163 [FE_OUT1] = {
164 .name = "fe-image1",
165 .caps = V4L2_CAP_VIDEO_CAPTURE,
166 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
167 .link_pad = FE_OUTPUT1_PAD
168 },
169 [FE_STATS] = {
170 .name = "fe-stats",
171 .caps = V4L2_CAP_META_CAPTURE,
172 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
173 .link_pad = FE_STATS_PAD
174 },
175 [FE_CONFIG] = {
176 .name = "fe-config",
177 .caps = V4L2_CAP_META_OUTPUT,
178 .pad_flags = MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MUST_CONNECT,
179 .link_pad = FE_CONFIG_PAD
180 },
181 };
182
183 #define is_fe_node(node) (((node)->id) >= FE_OUT0)
184 #define is_csi2_node(node) (!is_fe_node(node))
185
186 #define node_supports_image_output(node) \
187 (node_desc[(node)->id].caps & V4L2_CAP_VIDEO_CAPTURE)
188 #define node_supports_meta_output(node) \
189 (node_desc[(node)->id].caps & V4L2_CAP_META_CAPTURE)
190 #define node_supports_image_input(node) \
191 (node_desc[(node)->id].caps & V4L2_CAP_VIDEO_OUTPUT)
192 #define node_supports_meta_input(node) \
193 (node_desc[(node)->id].caps & V4L2_CAP_META_OUTPUT)
194 #define node_supports_image(node) \
195 (node_supports_image_output(node) || node_supports_image_input(node))
196 #define node_supports_meta(node) \
197 (node_supports_meta_output(node) || node_supports_meta_input(node))
198
199 #define is_image_output_node(node) \
200 ((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
201 #define is_image_input_node(node) \
202 ((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
203 #define is_image_node(node) \
204 (is_image_output_node(node) || is_image_input_node(node))
205 #define is_meta_output_node(node) \
206 ((node)->buffer_queue.type == V4L2_BUF_TYPE_META_CAPTURE)
207 #define is_meta_input_node(node) \
208 ((node)->buffer_queue.type == V4L2_BUF_TYPE_META_OUTPUT)
209 #define is_meta_node(node) \
210 (is_meta_output_node(node) || is_meta_input_node(node))
211
212 /* To track state across all nodes. */
213 #define NODE_REGISTERED BIT(0)
214 #define NODE_ENABLED BIT(1)
215 #define NODE_STREAMING BIT(2)
216 #define FS_INT BIT(3)
217 #define FE_INT BIT(4)
218 #define NUM_STATES 5
219
220 struct cfe_buffer {
221 struct vb2_v4l2_buffer vb;
222 struct list_head list;
223 };
224
225 struct cfe_config_buffer {
226 struct cfe_buffer buf;
227 struct pisp_fe_config config;
228 };
229
to_cfe_buffer(struct vb2_buffer * vb)230 static inline struct cfe_buffer *to_cfe_buffer(struct vb2_buffer *vb)
231 {
232 return container_of(vb, struct cfe_buffer, vb.vb2_buf);
233 }
234
235 static inline
to_cfe_config_buffer(struct cfe_buffer * buf)236 struct cfe_config_buffer *to_cfe_config_buffer(struct cfe_buffer *buf)
237 {
238 return container_of(buf, struct cfe_config_buffer, buf);
239 }
240
241 struct cfe_node {
242 /* Node id */
243 enum node_ids id;
244 /* Pointer pointing to current v4l2_buffer */
245 struct cfe_buffer *cur_frm;
246 /* Pointer pointing to next v4l2_buffer */
247 struct cfe_buffer *next_frm;
248 /* Used to store current pixel format */
249 struct v4l2_format vid_fmt;
250 /* Used to store current meta format */
251 struct v4l2_format meta_fmt;
252 /* Buffer queue used in video-buf */
253 struct vb2_queue buffer_queue;
254 /* Queue of filled frames */
255 struct list_head dma_queue;
256 /* lock used to access this structure */
257 struct mutex lock;
258 /* Identifies video device for this channel */
259 struct video_device video_dev;
260 /* Pointer to the parent handle */
261 struct cfe_device *cfe;
262 /* Media pad for this node */
263 struct media_pad pad;
264 /* Frame-start counter */
265 unsigned int fs_count;
266 /* Timestamp of the current buffer */
267 u64 ts;
268 };
269
270 struct cfe_device {
271 struct dentry *debugfs;
272 struct kref kref;
273
274 /* peripheral base address */
275 void __iomem *mipi_cfg_base;
276
277 struct clk *clk;
278
279 /* V4l2 device */
280 struct v4l2_device v4l2_dev;
281 struct media_device mdev;
282 struct media_pipeline pipe;
283
284 /* IRQ lock for node state and DMA queues */
285 spinlock_t state_lock;
286 bool job_ready;
287 bool job_queued;
288
289 /* parent device */
290 struct platform_device *pdev;
291 /* subdevice async Notifier */
292 struct v4l2_async_notifier notifier;
293
294 /* Source sub device */
295 struct v4l2_subdev *source_sd;
296 /* Source subdev's pad */
297 u32 source_pad;
298
299 struct cfe_node node[NUM_NODES];
300 DECLARE_BITMAP(node_flags, NUM_STATES * NUM_NODES);
301
302 struct csi2_device csi2;
303 struct pisp_fe_device fe;
304
305 int fe_csi2_channel;
306
307 /* Mask of enabled streams */
308 u64 streams_mask;
309 };
310
is_fe_enabled(struct cfe_device * cfe)311 static inline bool is_fe_enabled(struct cfe_device *cfe)
312 {
313 return cfe->fe_csi2_channel != -1;
314 }
315
to_cfe_device(struct v4l2_device * v4l2_dev)316 static inline struct cfe_device *to_cfe_device(struct v4l2_device *v4l2_dev)
317 {
318 return container_of(v4l2_dev, struct cfe_device, v4l2_dev);
319 }
320
cfg_reg_read(struct cfe_device * cfe,u32 offset)321 static inline u32 cfg_reg_read(struct cfe_device *cfe, u32 offset)
322 {
323 return readl(cfe->mipi_cfg_base + offset);
324 }
325
cfg_reg_write(struct cfe_device * cfe,u32 offset,u32 val)326 static inline void cfg_reg_write(struct cfe_device *cfe, u32 offset, u32 val)
327 {
328 writel(val, cfe->mipi_cfg_base + offset);
329 }
330
check_state(struct cfe_device * cfe,unsigned long state,unsigned int node_id)331 static bool check_state(struct cfe_device *cfe, unsigned long state,
332 unsigned int node_id)
333 {
334 unsigned long bit;
335
336 for_each_set_bit(bit, &state, sizeof(state)) {
337 if (!test_bit(bit + (node_id * NUM_STATES), cfe->node_flags))
338 return false;
339 }
340
341 return true;
342 }
343
set_state(struct cfe_device * cfe,unsigned long state,unsigned int node_id)344 static void set_state(struct cfe_device *cfe, unsigned long state,
345 unsigned int node_id)
346 {
347 unsigned long bit;
348
349 for_each_set_bit(bit, &state, sizeof(state))
350 set_bit(bit + (node_id * NUM_STATES), cfe->node_flags);
351 }
352
clear_state(struct cfe_device * cfe,unsigned long state,unsigned int node_id)353 static void clear_state(struct cfe_device *cfe, unsigned long state,
354 unsigned int node_id)
355 {
356 unsigned long bit;
357
358 for_each_set_bit(bit, &state, sizeof(state))
359 clear_bit(bit + (node_id * NUM_STATES), cfe->node_flags);
360 }
361
test_any_node(struct cfe_device * cfe,unsigned long cond)362 static bool test_any_node(struct cfe_device *cfe, unsigned long cond)
363 {
364 for (unsigned int i = 0; i < NUM_NODES; i++) {
365 if (check_state(cfe, cond, i))
366 return true;
367 }
368
369 return false;
370 }
371
test_all_nodes(struct cfe_device * cfe,unsigned long precond,unsigned long cond)372 static bool test_all_nodes(struct cfe_device *cfe, unsigned long precond,
373 unsigned long cond)
374 {
375 for (unsigned int i = 0; i < NUM_NODES; i++) {
376 if (check_state(cfe, precond, i)) {
377 if (!check_state(cfe, cond, i))
378 return false;
379 }
380 }
381
382 return true;
383 }
384
mipi_cfg_regs_show(struct seq_file * s,void * data)385 static int mipi_cfg_regs_show(struct seq_file *s, void *data)
386 {
387 struct cfe_device *cfe = s->private;
388 int ret;
389
390 ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
391 if (ret)
392 return ret;
393
394 #define DUMP(reg) seq_printf(s, #reg " \t0x%08x\n", cfg_reg_read(cfe, reg))
395 DUMP(MIPICFG_CFG);
396 DUMP(MIPICFG_INTR);
397 DUMP(MIPICFG_INTE);
398 DUMP(MIPICFG_INTF);
399 DUMP(MIPICFG_INTS);
400 #undef DUMP
401
402 pm_runtime_put(&cfe->pdev->dev);
403
404 return 0;
405 }
406
407 DEFINE_SHOW_ATTRIBUTE(mipi_cfg_regs);
408
409 /* Format setup functions */
find_format_by_code(u32 code)410 const struct cfe_fmt *find_format_by_code(u32 code)
411 {
412 for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) {
413 if (formats[i].code == code)
414 return &formats[i];
415 }
416
417 return NULL;
418 }
419
find_format_by_pix(u32 pixelformat)420 const struct cfe_fmt *find_format_by_pix(u32 pixelformat)
421 {
422 for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) {
423 if (formats[i].fourcc == pixelformat)
424 return &formats[i];
425 }
426
427 return NULL;
428 }
429
find_format_by_code_and_fourcc(u32 code,u32 fourcc)430 static const struct cfe_fmt *find_format_by_code_and_fourcc(u32 code,
431 u32 fourcc)
432 {
433 for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) {
434 if (formats[i].code == code && formats[i].fourcc == fourcc)
435 return &formats[i];
436 }
437
438 return NULL;
439 }
440
441 /*
442 * Given the mbus code, find the 16 bit remapped code. Returns 0 if no remap
443 * possible.
444 */
cfe_find_16bit_code(u32 code)445 u32 cfe_find_16bit_code(u32 code)
446 {
447 const struct cfe_fmt *cfe_fmt;
448
449 cfe_fmt = find_format_by_code(code);
450
451 if (!cfe_fmt || !cfe_fmt->remap[CFE_REMAP_16BIT])
452 return 0;
453
454 cfe_fmt = find_format_by_pix(cfe_fmt->remap[CFE_REMAP_16BIT]);
455 if (!cfe_fmt)
456 return 0;
457
458 return cfe_fmt->code;
459 }
460
461 /*
462 * Given the mbus code, find the 8 bit compressed code. Returns 0 if no remap
463 * possible.
464 */
cfe_find_compressed_code(u32 code)465 u32 cfe_find_compressed_code(u32 code)
466 {
467 const struct cfe_fmt *cfe_fmt;
468
469 cfe_fmt = find_format_by_code(code);
470
471 if (!cfe_fmt || !cfe_fmt->remap[CFE_REMAP_COMPRESSED])
472 return 0;
473
474 cfe_fmt = find_format_by_pix(cfe_fmt->remap[CFE_REMAP_COMPRESSED]);
475 if (!cfe_fmt)
476 return 0;
477
478 return cfe_fmt->code;
479 }
480
cfe_calc_vid_format_size_bpl(struct cfe_device * cfe,const struct cfe_fmt * fmt,struct v4l2_format * f)481 static void cfe_calc_vid_format_size_bpl(struct cfe_device *cfe,
482 const struct cfe_fmt *fmt,
483 struct v4l2_format *f)
484 {
485 unsigned int min_bytesperline;
486
487 v4l_bound_align_image(&f->fmt.pix.width, MIN_WIDTH, MAX_WIDTH, 2,
488 &f->fmt.pix.height, MIN_HEIGHT, MAX_HEIGHT, 0, 0);
489
490 min_bytesperline =
491 ALIGN((f->fmt.pix.width * fmt->depth) >> 3, BPL_ALIGNMENT);
492
493 if (f->fmt.pix.bytesperline > min_bytesperline &&
494 f->fmt.pix.bytesperline <= MAX_BYTESPERLINE)
495 f->fmt.pix.bytesperline =
496 ALIGN(f->fmt.pix.bytesperline, BPL_ALIGNMENT);
497 else
498 f->fmt.pix.bytesperline = min_bytesperline;
499
500 f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
501
502 cfe_dbg(cfe, "%s: %p4cc size: %ux%u bpl:%u img_size:%u\n", __func__,
503 &f->fmt.pix.pixelformat, f->fmt.pix.width, f->fmt.pix.height,
504 f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
505 }
506
cfe_calc_meta_format_size_bpl(struct cfe_device * cfe,const struct cfe_fmt * fmt,struct v4l2_format * f)507 static void cfe_calc_meta_format_size_bpl(struct cfe_device *cfe,
508 const struct cfe_fmt *fmt,
509 struct v4l2_format *f)
510 {
511 v4l_bound_align_image(&f->fmt.meta.width, MIN_META_WIDTH, MAX_WIDTH, 2,
512 &f->fmt.meta.height, MIN_META_HEIGHT, MAX_HEIGHT,
513 0, 0);
514
515 f->fmt.meta.bytesperline = (f->fmt.meta.width * fmt->depth) >> 3;
516 f->fmt.meta.buffersize = f->fmt.meta.height * f->fmt.pix.bytesperline;
517
518 cfe_dbg(cfe, "%s: %p4cc size: %ux%u bpl:%u buf_size:%u\n", __func__,
519 &f->fmt.meta.dataformat, f->fmt.meta.width, f->fmt.meta.height,
520 f->fmt.meta.bytesperline, f->fmt.meta.buffersize);
521 }
522
cfe_schedule_next_csi2_job(struct cfe_device * cfe)523 static void cfe_schedule_next_csi2_job(struct cfe_device *cfe)
524 {
525 struct cfe_buffer *buf;
526 dma_addr_t addr;
527
528 for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; i++) {
529 struct cfe_node *node = &cfe->node[i];
530 unsigned int stride, size;
531
532 if (!check_state(cfe, NODE_STREAMING, i))
533 continue;
534
535 buf = list_first_entry(&node->dma_queue, struct cfe_buffer,
536 list);
537 node->next_frm = buf;
538 list_del(&buf->list);
539
540 trace_cfe_csi2_schedule(node->id, &buf->vb.vb2_buf);
541
542 if (is_meta_node(node)) {
543 size = node->meta_fmt.fmt.meta.buffersize;
544 /* We use CSI2_CH_CTRL_PACK_BYTES, so stride == 0 */
545 stride = 0;
546 } else {
547 size = node->vid_fmt.fmt.pix.sizeimage;
548 stride = node->vid_fmt.fmt.pix.bytesperline;
549 }
550
551 addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
552 csi2_set_buffer(&cfe->csi2, node->id, addr, stride, size);
553 }
554 }
555
cfe_schedule_next_pisp_job(struct cfe_device * cfe)556 static void cfe_schedule_next_pisp_job(struct cfe_device *cfe)
557 {
558 struct vb2_buffer *vb2_bufs[FE_NUM_PADS] = { 0 };
559 struct cfe_config_buffer *config_buf;
560 struct cfe_buffer *buf;
561
562 for (unsigned int i = CSI2_NUM_CHANNELS; i < NUM_NODES; i++) {
563 struct cfe_node *node = &cfe->node[i];
564
565 if (!check_state(cfe, NODE_STREAMING, i))
566 continue;
567
568 buf = list_first_entry(&node->dma_queue, struct cfe_buffer,
569 list);
570
571 trace_cfe_fe_schedule(node->id, &buf->vb.vb2_buf);
572
573 node->next_frm = buf;
574 vb2_bufs[node_desc[i].link_pad] = &buf->vb.vb2_buf;
575 list_del(&buf->list);
576 }
577
578 config_buf = to_cfe_config_buffer(cfe->node[FE_CONFIG].next_frm);
579 pisp_fe_submit_job(&cfe->fe, vb2_bufs, &config_buf->config);
580 }
581
cfe_check_job_ready(struct cfe_device * cfe)582 static bool cfe_check_job_ready(struct cfe_device *cfe)
583 {
584 for (unsigned int i = 0; i < NUM_NODES; i++) {
585 struct cfe_node *node = &cfe->node[i];
586
587 if (!check_state(cfe, NODE_ENABLED, i))
588 continue;
589
590 if (list_empty(&node->dma_queue))
591 return false;
592 }
593
594 return true;
595 }
596
cfe_prepare_next_job(struct cfe_device * cfe)597 static void cfe_prepare_next_job(struct cfe_device *cfe)
598 {
599 trace_cfe_prepare_next_job(is_fe_enabled(cfe));
600
601 cfe->job_queued = true;
602 cfe_schedule_next_csi2_job(cfe);
603 if (is_fe_enabled(cfe))
604 cfe_schedule_next_pisp_job(cfe);
605
606 /* Flag if another job is ready after this. */
607 cfe->job_ready = cfe_check_job_ready(cfe);
608 }
609
cfe_process_buffer_complete(struct cfe_node * node,enum vb2_buffer_state state)610 static void cfe_process_buffer_complete(struct cfe_node *node,
611 enum vb2_buffer_state state)
612 {
613 trace_cfe_buffer_complete(node->id, &node->cur_frm->vb);
614
615 node->cur_frm->vb.sequence = node->fs_count - 1;
616 vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state);
617 }
618
cfe_queue_event_sof(struct cfe_node * node)619 static void cfe_queue_event_sof(struct cfe_node *node)
620 {
621 struct v4l2_event event = {
622 .type = V4L2_EVENT_FRAME_SYNC,
623 .u.frame_sync.frame_sequence = node->fs_count - 1,
624 };
625
626 v4l2_event_queue(&node->video_dev, &event);
627 }
628
cfe_sof_isr(struct cfe_node * node)629 static void cfe_sof_isr(struct cfe_node *node)
630 {
631 struct cfe_device *cfe = node->cfe;
632 bool matching_fs = true;
633
634 trace_cfe_frame_start(node->id, node->fs_count);
635
636 /*
637 * If the sensor is producing unexpected frame event ordering over a
638 * sustained period of time, guard against the possibility of coming
639 * here and orphaning the cur_frm if it's not been dequeued already.
640 * Unfortunately, there is not enough hardware state to tell if this
641 * may have occurred.
642 */
643 if (WARN(node->cur_frm, "%s: [%s] Orphaned frame at seq %u\n",
644 __func__, node_desc[node->id].name, node->fs_count))
645 cfe_process_buffer_complete(node, VB2_BUF_STATE_ERROR);
646
647 node->cur_frm = node->next_frm;
648 node->next_frm = NULL;
649 node->fs_count++;
650
651 node->ts = ktime_get_ns();
652 for (unsigned int i = 0; i < NUM_NODES; i++) {
653 if (!check_state(cfe, NODE_STREAMING, i) || i == node->id)
654 continue;
655 /*
656 * This checks if any other node has seen a FS. If yes, use the
657 * same timestamp, eventually across all node buffers.
658 */
659 if (cfe->node[i].fs_count >= node->fs_count)
660 node->ts = cfe->node[i].ts;
661 /*
662 * This checks if all other node have seen a matching FS. If
663 * yes, we can flag another job to be queued.
664 */
665 if (matching_fs && cfe->node[i].fs_count != node->fs_count)
666 matching_fs = false;
667 }
668
669 if (matching_fs)
670 cfe->job_queued = false;
671
672 if (node->cur_frm)
673 node->cur_frm->vb.vb2_buf.timestamp = node->ts;
674
675 set_state(cfe, FS_INT, node->id);
676 clear_state(cfe, FE_INT, node->id);
677
678 if (is_image_output_node(node))
679 cfe_queue_event_sof(node);
680 }
681
cfe_eof_isr(struct cfe_node * node)682 static void cfe_eof_isr(struct cfe_node *node)
683 {
684 struct cfe_device *cfe = node->cfe;
685
686 trace_cfe_frame_end(node->id, node->fs_count - 1);
687
688 if (node->cur_frm)
689 cfe_process_buffer_complete(node, VB2_BUF_STATE_DONE);
690
691 node->cur_frm = NULL;
692 set_state(cfe, FE_INT, node->id);
693 clear_state(cfe, FS_INT, node->id);
694 }
695
cfe_isr(int irq,void * dev)696 static irqreturn_t cfe_isr(int irq, void *dev)
697 {
698 struct cfe_device *cfe = dev;
699 bool sof[NUM_NODES] = { 0 }, eof[NUM_NODES] = { 0 };
700 u32 sts;
701
702 sts = cfg_reg_read(cfe, MIPICFG_INTS);
703
704 if (sts & MIPICFG_INT_CSI_DMA)
705 csi2_isr(&cfe->csi2, sof, eof);
706
707 if (sts & MIPICFG_INT_PISP_FE)
708 pisp_fe_isr(&cfe->fe, sof + CSI2_NUM_CHANNELS,
709 eof + CSI2_NUM_CHANNELS);
710
711 spin_lock(&cfe->state_lock);
712
713 for (unsigned int i = 0; i < NUM_NODES; i++) {
714 struct cfe_node *node = &cfe->node[i];
715
716 /*
717 * The check_state(NODE_STREAMING) is to ensure we do not loop
718 * over the CSI2_CHx nodes when the FE is active since they
719 * generate interrupts even though the node is not streaming.
720 */
721 if (!check_state(cfe, NODE_STREAMING, i) || !(sof[i] || eof[i]))
722 continue;
723
724 /*
725 * There are 3 cases where we could get FS + FE_ACK at
726 * the same time:
727 * 1) FE of the current frame, and FS of the next frame.
728 * 2) FS + FE of the same frame.
729 * 3) FE of the current frame, and FS + FE of the next
730 * frame. To handle this, see the sof handler below.
731 *
732 * (1) is handled implicitly by the ordering of the FE and FS
733 * handlers below.
734 */
735 if (eof[i]) {
736 /*
737 * The condition below tests for (2). Run the FS handler
738 * first before the FE handler, both for the current
739 * frame.
740 */
741 if (sof[i] && !check_state(cfe, FS_INT, i)) {
742 cfe_sof_isr(node);
743 sof[i] = false;
744 }
745
746 cfe_eof_isr(node);
747 }
748
749 if (sof[i]) {
750 /*
751 * The condition below tests for (3). In such cases, we
752 * come in here with FS flag set in the node state from
753 * the previous frame since it only gets cleared in
754 * cfe_eof_isr(). Handle the FE for the previous
755 * frame first before the FS handler for the current
756 * frame.
757 */
758 if (check_state(cfe, FS_INT, node->id) &&
759 !check_state(cfe, FE_INT, node->id)) {
760 cfe_dbg(cfe, "%s: [%s] Handling missing previous FE interrupt\n",
761 __func__, node_desc[node->id].name);
762 cfe_eof_isr(node);
763 }
764
765 cfe_sof_isr(node);
766 }
767
768 if (!cfe->job_queued && cfe->job_ready)
769 cfe_prepare_next_job(cfe);
770 }
771
772 spin_unlock(&cfe->state_lock);
773
774 return IRQ_HANDLED;
775 }
776
777 /*
778 * Stream helpers
779 */
780
cfe_get_vc_dt_fallback(struct cfe_device * cfe,u8 * vc,u8 * dt)781 static int cfe_get_vc_dt_fallback(struct cfe_device *cfe, u8 *vc, u8 *dt)
782 {
783 struct v4l2_subdev_state *state;
784 struct v4l2_mbus_framefmt *fmt;
785 const struct cfe_fmt *cfe_fmt;
786
787 state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
788
789 fmt = v4l2_subdev_state_get_format(state, CSI2_PAD_SINK, 0);
790 if (!fmt)
791 return -EINVAL;
792
793 cfe_fmt = find_format_by_code(fmt->code);
794 if (!cfe_fmt)
795 return -EINVAL;
796
797 *vc = 0;
798 *dt = cfe_fmt->csi_dt;
799
800 return 0;
801 }
802
cfe_get_vc_dt(struct cfe_device * cfe,unsigned int channel,u8 * vc,u8 * dt)803 static int cfe_get_vc_dt(struct cfe_device *cfe, unsigned int channel, u8 *vc,
804 u8 *dt)
805 {
806 struct v4l2_mbus_frame_desc remote_desc;
807 struct v4l2_subdev_state *state;
808 u32 sink_stream;
809 unsigned int i;
810 int ret;
811
812 state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
813
814 ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
815 CSI2_PAD_FIRST_SOURCE + channel, 0, NULL, &sink_stream);
816 if (ret)
817 return ret;
818
819 ret = v4l2_subdev_call(cfe->source_sd, pad, get_frame_desc,
820 cfe->source_pad, &remote_desc);
821 if (ret == -ENOIOCTLCMD) {
822 cfe_dbg(cfe, "source does not support get_frame_desc, use fallback\n");
823 return cfe_get_vc_dt_fallback(cfe, vc, dt);
824 } else if (ret) {
825 cfe_err(cfe, "Failed to get frame descriptor\n");
826 return ret;
827 }
828
829 if (remote_desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
830 cfe_err(cfe, "Frame descriptor does not describe CSI-2 link");
831 return -EINVAL;
832 }
833
834 for (i = 0; i < remote_desc.num_entries; i++) {
835 if (remote_desc.entry[i].stream == sink_stream)
836 break;
837 }
838
839 if (i == remote_desc.num_entries) {
840 cfe_err(cfe, "Stream %u not found in remote frame desc\n",
841 sink_stream);
842 return -EINVAL;
843 }
844
845 *vc = remote_desc.entry[i].bus.csi2.vc;
846 *dt = remote_desc.entry[i].bus.csi2.dt;
847
848 return 0;
849 }
850
cfe_start_channel(struct cfe_node * node)851 static int cfe_start_channel(struct cfe_node *node)
852 {
853 struct cfe_device *cfe = node->cfe;
854 struct v4l2_subdev_state *state;
855 struct v4l2_mbus_framefmt *source_fmt;
856 const struct cfe_fmt *fmt;
857 unsigned long flags;
858 bool start_fe;
859 int ret;
860
861 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
862
863 start_fe = is_fe_enabled(cfe) &&
864 test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
865
866 state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
867
868 if (start_fe) {
869 unsigned int width, height;
870 u8 vc, dt;
871
872 cfe_dbg(cfe, "%s: %s using csi2 channel %d\n", __func__,
873 node_desc[FE_OUT0].name, cfe->fe_csi2_channel);
874
875 ret = cfe_get_vc_dt(cfe, cfe->fe_csi2_channel, &vc, &dt);
876 if (ret)
877 return ret;
878
879 source_fmt = v4l2_subdev_state_get_format(state,
880 node_desc[cfe->fe_csi2_channel].link_pad);
881 fmt = find_format_by_code(source_fmt->code);
882
883 width = source_fmt->width;
884 height = source_fmt->height;
885
886 /* Must have a valid CSI2 datatype. */
887 WARN_ON(!fmt->csi_dt);
888
889 /*
890 * Start the associated CSI2 Channel as well.
891 *
892 * Must write to the ADDR register to latch the ctrl values
893 * even if we are connected to the front end. Once running,
894 * this is handled by the CSI2 AUTO_ARM mode.
895 */
896 csi2_start_channel(&cfe->csi2, cfe->fe_csi2_channel,
897 CSI2_MODE_FE_STREAMING,
898 true, false, width, height, vc, dt);
899 csi2_set_buffer(&cfe->csi2, cfe->fe_csi2_channel, 0, 0, -1);
900 pisp_fe_start(&cfe->fe);
901 }
902
903 if (is_csi2_node(node)) {
904 unsigned int width = 0, height = 0;
905 u8 vc, dt;
906
907 ret = cfe_get_vc_dt(cfe, node->id, &vc, &dt);
908 if (ret) {
909 if (start_fe) {
910 csi2_stop_channel(&cfe->csi2,
911 cfe->fe_csi2_channel);
912 pisp_fe_stop(&cfe->fe);
913 }
914
915 return ret;
916 }
917
918 u32 mode = CSI2_MODE_NORMAL;
919
920 source_fmt = v4l2_subdev_state_get_format(state,
921 node_desc[node->id].link_pad);
922 fmt = find_format_by_code(source_fmt->code);
923
924 /* Must have a valid CSI2 datatype. */
925 WARN_ON(!fmt->csi_dt);
926
927 if (is_image_output_node(node)) {
928 u32 pixfmt;
929
930 width = source_fmt->width;
931 height = source_fmt->height;
932
933 pixfmt = node->vid_fmt.fmt.pix.pixelformat;
934
935 if (pixfmt == fmt->remap[CFE_REMAP_16BIT]) {
936 mode = CSI2_MODE_REMAP;
937 } else if (pixfmt == fmt->remap[CFE_REMAP_COMPRESSED]) {
938 mode = CSI2_MODE_COMPRESSED;
939 csi2_set_compression(&cfe->csi2, node->id,
940 CSI2_COMPRESSION_DELTA, 0,
941 0);
942 }
943 }
944 /* Unconditionally start this CSI2 channel. */
945 csi2_start_channel(&cfe->csi2, node->id,
946 mode,
947 /* Auto arm */
948 false,
949 /* Pack bytes */
950 is_meta_node(node) ? true : false,
951 width, height, vc, dt);
952 }
953
954 spin_lock_irqsave(&cfe->state_lock, flags);
955 if (cfe->job_ready && test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING))
956 cfe_prepare_next_job(cfe);
957 spin_unlock_irqrestore(&cfe->state_lock, flags);
958
959 return 0;
960 }
961
cfe_stop_channel(struct cfe_node * node,bool fe_stop)962 static void cfe_stop_channel(struct cfe_node *node, bool fe_stop)
963 {
964 struct cfe_device *cfe = node->cfe;
965
966 cfe_dbg(cfe, "%s: [%s] fe_stop %u\n", __func__,
967 node_desc[node->id].name, fe_stop);
968
969 if (fe_stop) {
970 csi2_stop_channel(&cfe->csi2, cfe->fe_csi2_channel);
971 pisp_fe_stop(&cfe->fe);
972 }
973
974 if (is_csi2_node(node))
975 csi2_stop_channel(&cfe->csi2, node->id);
976 }
977
cfe_return_buffers(struct cfe_node * node,enum vb2_buffer_state state)978 static void cfe_return_buffers(struct cfe_node *node,
979 enum vb2_buffer_state state)
980 {
981 struct cfe_device *cfe = node->cfe;
982 struct cfe_buffer *buf, *tmp;
983 unsigned long flags;
984
985 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
986
987 spin_lock_irqsave(&cfe->state_lock, flags);
988 list_for_each_entry_safe(buf, tmp, &node->dma_queue, list) {
989 list_del(&buf->list);
990 trace_cfe_return_buffer(node->id, buf->vb.vb2_buf.index, 2);
991 vb2_buffer_done(&buf->vb.vb2_buf, state);
992 }
993
994 if (node->cur_frm) {
995 trace_cfe_return_buffer(node->id,
996 node->cur_frm->vb.vb2_buf.index, 0);
997 vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state);
998 }
999 if (node->next_frm && node->cur_frm != node->next_frm) {
1000 trace_cfe_return_buffer(node->id,
1001 node->next_frm->vb.vb2_buf.index, 1);
1002 vb2_buffer_done(&node->next_frm->vb.vb2_buf, state);
1003 }
1004
1005 node->cur_frm = NULL;
1006 node->next_frm = NULL;
1007 spin_unlock_irqrestore(&cfe->state_lock, flags);
1008 }
1009
1010 /*
1011 * vb2 ops
1012 */
1013
cfe_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])1014 static int cfe_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
1015 unsigned int *nplanes, unsigned int sizes[],
1016 struct device *alloc_devs[])
1017 {
1018 struct cfe_node *node = vb2_get_drv_priv(vq);
1019 struct cfe_device *cfe = node->cfe;
1020 unsigned int size = is_image_node(node) ?
1021 node->vid_fmt.fmt.pix.sizeimage :
1022 node->meta_fmt.fmt.meta.buffersize;
1023
1024 cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
1025 node->buffer_queue.type);
1026
1027 if (*nplanes) {
1028 if (sizes[0] < size) {
1029 cfe_err(cfe, "sizes[0] %i < size %u\n", sizes[0], size);
1030 return -EINVAL;
1031 }
1032 size = sizes[0];
1033 }
1034
1035 *nplanes = 1;
1036 sizes[0] = size;
1037
1038 return 0;
1039 }
1040
cfe_buffer_prepare(struct vb2_buffer * vb)1041 static int cfe_buffer_prepare(struct vb2_buffer *vb)
1042 {
1043 struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue);
1044 struct cfe_device *cfe = node->cfe;
1045 struct cfe_buffer *buf = to_cfe_buffer(vb);
1046 unsigned long size;
1047
1048 trace_cfe_buffer_prepare(node->id, vb);
1049
1050 size = is_image_node(node) ? node->vid_fmt.fmt.pix.sizeimage :
1051 node->meta_fmt.fmt.meta.buffersize;
1052 if (vb2_plane_size(vb, 0) < size) {
1053 cfe_err(cfe, "data will not fit into plane (%lu < %lu)\n",
1054 vb2_plane_size(vb, 0), size);
1055 return -EINVAL;
1056 }
1057
1058 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1059
1060 if (node->id == FE_CONFIG) {
1061 struct cfe_config_buffer *b = to_cfe_config_buffer(buf);
1062 void *addr = vb2_plane_vaddr(vb, 0);
1063
1064 memcpy(&b->config, addr, sizeof(struct pisp_fe_config));
1065 return pisp_fe_validate_config(&cfe->fe, &b->config,
1066 &cfe->node[FE_OUT0].vid_fmt,
1067 &cfe->node[FE_OUT1].vid_fmt);
1068 }
1069
1070 return 0;
1071 }
1072
cfe_buffer_queue(struct vb2_buffer * vb)1073 static void cfe_buffer_queue(struct vb2_buffer *vb)
1074 {
1075 struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue);
1076 struct cfe_device *cfe = node->cfe;
1077 struct cfe_buffer *buf = to_cfe_buffer(vb);
1078 unsigned long flags;
1079 bool schedule_now;
1080
1081 spin_lock_irqsave(&cfe->state_lock, flags);
1082
1083 list_add_tail(&buf->list, &node->dma_queue);
1084
1085 if (!cfe->job_ready)
1086 cfe->job_ready = cfe_check_job_ready(cfe);
1087
1088 schedule_now = !cfe->job_queued && cfe->job_ready &&
1089 test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
1090
1091 trace_cfe_buffer_queue(node->id, vb, schedule_now);
1092
1093 if (schedule_now)
1094 cfe_prepare_next_job(cfe);
1095
1096 spin_unlock_irqrestore(&cfe->state_lock, flags);
1097 }
1098
cfe_get_source_link_freq(struct cfe_device * cfe)1099 static s64 cfe_get_source_link_freq(struct cfe_device *cfe)
1100 {
1101 struct media_pad *src_pad =
1102 &cfe->source_sd->entity.pads[cfe->source_pad];
1103 struct v4l2_subdev_state *state;
1104 s64 link_freq;
1105 u32 bpp;
1106
1107 state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
1108
1109 /*
1110 * v4l2_get_link_freq() uses V4L2_CID_LINK_FREQ first, and falls back
1111 * to V4L2_CID_PIXEL_RATE if V4L2_CID_LINK_FREQ is not available.
1112 *
1113 * With multistream input there is no single pixel rate, and thus we
1114 * cannot use V4L2_CID_PIXEL_RATE, so we pass 0 as the bpp which
1115 * causes v4l2_get_link_freq() to return an error if it falls back to
1116 * V4L2_CID_PIXEL_RATE.
1117 */
1118
1119 if (state->routing.num_routes == 1) {
1120 struct v4l2_subdev_route *route = &state->routing.routes[0];
1121 struct v4l2_mbus_framefmt *source_fmt;
1122 const struct cfe_fmt *fmt;
1123
1124 source_fmt = v4l2_subdev_state_get_format(state,
1125 route->sink_pad,
1126 route->sink_stream);
1127
1128 fmt = find_format_by_code(source_fmt->code);
1129 if (!fmt)
1130 return -EINVAL;
1131
1132 bpp = fmt->depth;
1133 } else {
1134 bpp = 0;
1135 }
1136
1137 link_freq = v4l2_get_link_freq(src_pad, bpp,
1138 2 * cfe->csi2.dphy.active_lanes);
1139 if (link_freq < 0)
1140 cfe_err(cfe, "failed to get link freq for subdev '%s'\n",
1141 cfe->source_sd->name);
1142
1143 return link_freq;
1144 }
1145
cfe_start_streaming(struct vb2_queue * vq,unsigned int count)1146 static int cfe_start_streaming(struct vb2_queue *vq, unsigned int count)
1147 {
1148 struct v4l2_mbus_config mbus_config = { 0 };
1149 struct cfe_node *node = vb2_get_drv_priv(vq);
1150 struct cfe_device *cfe = node->cfe;
1151 struct v4l2_subdev_state *state;
1152 struct v4l2_subdev_route *route;
1153 s64 link_freq;
1154 int ret;
1155
1156 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1157
1158 if (!check_state(cfe, NODE_ENABLED, node->id)) {
1159 cfe_err(cfe, "%s node link is not enabled.\n",
1160 node_desc[node->id].name);
1161 ret = -EINVAL;
1162 goto err_streaming;
1163 }
1164
1165 ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
1166 if (ret < 0) {
1167 cfe_err(cfe, "pm_runtime_resume_and_get failed\n");
1168 goto err_streaming;
1169 }
1170
1171 /* When using the Frontend, we must enable the FE_CONFIG node. */
1172 if (is_fe_enabled(cfe) &&
1173 !check_state(cfe, NODE_ENABLED, cfe->node[FE_CONFIG].id)) {
1174 cfe_err(cfe, "FE enabled, but FE_CONFIG node is not\n");
1175 ret = -EINVAL;
1176 goto err_pm_put;
1177 }
1178
1179 ret = media_pipeline_start(&node->pad, &cfe->pipe);
1180 if (ret < 0) {
1181 cfe_err(cfe, "Failed to start media pipeline: %d\n", ret);
1182 goto err_pm_put;
1183 }
1184
1185 state = v4l2_subdev_lock_and_get_active_state(&cfe->csi2.sd);
1186
1187 clear_state(cfe, FS_INT | FE_INT, node->id);
1188 set_state(cfe, NODE_STREAMING, node->id);
1189 node->fs_count = 0;
1190
1191 ret = cfe_start_channel(node);
1192 if (ret)
1193 goto err_unlock_state;
1194
1195 if (!test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING)) {
1196 cfe_dbg(cfe, "Streaming on hold, as all nodes are not set to streaming yet\n");
1197 v4l2_subdev_unlock_state(state);
1198 return 0;
1199 }
1200
1201 cfg_reg_write(cfe, MIPICFG_CFG, MIPICFG_CFG_SEL_CSI);
1202 cfg_reg_write(cfe, MIPICFG_INTE,
1203 MIPICFG_INT_CSI_DMA | MIPICFG_INT_PISP_FE);
1204
1205 ret = v4l2_subdev_call(cfe->source_sd, pad, get_mbus_config, 0,
1206 &mbus_config);
1207 if (ret < 0 && ret != -ENOIOCTLCMD) {
1208 cfe_err(cfe, "g_mbus_config failed\n");
1209 goto err_clear_inte;
1210 }
1211
1212 cfe->csi2.dphy.active_lanes = mbus_config.bus.mipi_csi2.num_data_lanes;
1213 if (!cfe->csi2.dphy.active_lanes)
1214 cfe->csi2.dphy.active_lanes = cfe->csi2.dphy.max_lanes;
1215 if (cfe->csi2.dphy.active_lanes > cfe->csi2.dphy.max_lanes) {
1216 cfe_err(cfe, "Device has requested %u data lanes, which is >%u configured in DT\n",
1217 cfe->csi2.dphy.active_lanes, cfe->csi2.dphy.max_lanes);
1218 ret = -EINVAL;
1219 goto err_clear_inte;
1220 }
1221
1222 link_freq = cfe_get_source_link_freq(cfe);
1223 if (link_freq < 0)
1224 goto err_clear_inte;
1225
1226 cfe->csi2.dphy.dphy_rate = div_s64(link_freq * 2, 1000000);
1227 csi2_open_rx(&cfe->csi2);
1228
1229 cfe->streams_mask = 0;
1230
1231 for_each_active_route(&state->routing, route)
1232 cfe->streams_mask |= BIT_ULL(route->sink_stream);
1233
1234 ret = v4l2_subdev_enable_streams(cfe->source_sd, cfe->source_pad,
1235 cfe->streams_mask);
1236 if (ret) {
1237 cfe_err(cfe, "stream on failed in subdev\n");
1238 goto err_disable_cfe;
1239 }
1240
1241 cfe_dbg(cfe, "Streaming enabled\n");
1242
1243 v4l2_subdev_unlock_state(state);
1244
1245 return 0;
1246
1247 err_disable_cfe:
1248 csi2_close_rx(&cfe->csi2);
1249 err_clear_inte:
1250 cfg_reg_write(cfe, MIPICFG_INTE, 0);
1251
1252 cfe_stop_channel(node,
1253 is_fe_enabled(cfe) && test_all_nodes(cfe, NODE_ENABLED,
1254 NODE_STREAMING));
1255 err_unlock_state:
1256 v4l2_subdev_unlock_state(state);
1257 media_pipeline_stop(&node->pad);
1258 err_pm_put:
1259 pm_runtime_put(&cfe->pdev->dev);
1260 err_streaming:
1261 cfe_return_buffers(node, VB2_BUF_STATE_QUEUED);
1262 clear_state(cfe, NODE_STREAMING, node->id);
1263
1264 return ret;
1265 }
1266
cfe_stop_streaming(struct vb2_queue * vq)1267 static void cfe_stop_streaming(struct vb2_queue *vq)
1268 {
1269 struct cfe_node *node = vb2_get_drv_priv(vq);
1270 struct cfe_device *cfe = node->cfe;
1271 unsigned long flags;
1272 bool fe_stop;
1273
1274 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1275
1276 spin_lock_irqsave(&cfe->state_lock, flags);
1277 fe_stop = is_fe_enabled(cfe) &&
1278 test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
1279
1280 cfe->job_ready = false;
1281 clear_state(cfe, NODE_STREAMING, node->id);
1282 spin_unlock_irqrestore(&cfe->state_lock, flags);
1283
1284 cfe_stop_channel(node, fe_stop);
1285
1286 if (!test_any_node(cfe, NODE_STREAMING)) {
1287 struct v4l2_subdev_state *state;
1288 int ret;
1289
1290 state = v4l2_subdev_lock_and_get_active_state(&cfe->csi2.sd);
1291
1292 ret = v4l2_subdev_disable_streams(cfe->source_sd,
1293 cfe->source_pad,
1294 cfe->streams_mask);
1295 if (ret)
1296 cfe_err(cfe, "stream disable failed in subdev\n");
1297
1298 v4l2_subdev_unlock_state(state);
1299
1300 csi2_close_rx(&cfe->csi2);
1301
1302 cfg_reg_write(cfe, MIPICFG_INTE, 0);
1303
1304 cfe_dbg(cfe, "%s: Streaming disabled\n", __func__);
1305 }
1306
1307 media_pipeline_stop(&node->pad);
1308
1309 /* Clear all queued buffers for the node */
1310 cfe_return_buffers(node, VB2_BUF_STATE_ERROR);
1311
1312 pm_runtime_put(&cfe->pdev->dev);
1313 }
1314
1315 static const struct vb2_ops cfe_video_qops = {
1316 .queue_setup = cfe_queue_setup,
1317 .buf_prepare = cfe_buffer_prepare,
1318 .buf_queue = cfe_buffer_queue,
1319 .start_streaming = cfe_start_streaming,
1320 .stop_streaming = cfe_stop_streaming,
1321 };
1322
1323 /*
1324 * v4l2 ioctl ops
1325 */
1326
cfe_querycap(struct file * file,void * priv,struct v4l2_capability * cap)1327 static int cfe_querycap(struct file *file, void *priv,
1328 struct v4l2_capability *cap)
1329 {
1330 strscpy(cap->driver, CFE_MODULE_NAME, sizeof(cap->driver));
1331 strscpy(cap->card, CFE_MODULE_NAME, sizeof(cap->card));
1332
1333 cap->capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE |
1334 V4L2_CAP_META_OUTPUT;
1335
1336 return 0;
1337 }
1338
cfe_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)1339 static int cfe_enum_fmt_vid_cap(struct file *file, void *priv,
1340 struct v4l2_fmtdesc *f)
1341 {
1342 struct cfe_node *node = video_drvdata(file);
1343 struct cfe_device *cfe = node->cfe;
1344 unsigned int i, j;
1345
1346 if (!node_supports_image_output(node))
1347 return -EINVAL;
1348
1349 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1350
1351 for (i = 0, j = 0; i < ARRAY_SIZE(formats); i++) {
1352 if (f->mbus_code && formats[i].code != f->mbus_code)
1353 continue;
1354
1355 if (formats[i].flags & CFE_FORMAT_FLAG_META_OUT ||
1356 formats[i].flags & CFE_FORMAT_FLAG_META_CAP)
1357 continue;
1358
1359 if (is_fe_node(node) &&
1360 !(formats[i].flags & CFE_FORMAT_FLAG_FE_OUT))
1361 continue;
1362
1363 if (j == f->index) {
1364 f->pixelformat = formats[i].fourcc;
1365 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1366 return 0;
1367 }
1368 j++;
1369 }
1370
1371 return -EINVAL;
1372 }
1373
cfe_g_fmt(struct file * file,void * priv,struct v4l2_format * f)1374 static int cfe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
1375 {
1376 struct cfe_node *node = video_drvdata(file);
1377
1378 if (!node_supports_image(node))
1379 return -EINVAL;
1380
1381 *f = node->vid_fmt;
1382
1383 return 0;
1384 }
1385
cfe_validate_fmt_vid_cap(struct cfe_node * node,struct v4l2_format * f)1386 static int cfe_validate_fmt_vid_cap(struct cfe_node *node,
1387 struct v4l2_format *f)
1388 {
1389 struct cfe_device *cfe = node->cfe;
1390 const struct cfe_fmt *fmt;
1391
1392 cfe_dbg(cfe, "%s: [%s] %ux%u, V4L2 pix %p4cc\n", __func__,
1393 node_desc[node->id].name, f->fmt.pix.width, f->fmt.pix.height,
1394 &f->fmt.pix.pixelformat);
1395
1396 if (!node_supports_image_output(node))
1397 return -EINVAL;
1398
1399 /*
1400 * Default to a format that works for both CSI2 and FE.
1401 */
1402 fmt = find_format_by_pix(f->fmt.pix.pixelformat);
1403 if (!fmt)
1404 fmt = find_format_by_code(MEDIA_BUS_FMT_SBGGR10_1X10);
1405
1406 f->fmt.pix.pixelformat = fmt->fourcc;
1407
1408 if (is_fe_node(node) && fmt->remap[CFE_REMAP_16BIT]) {
1409 f->fmt.pix.pixelformat = fmt->remap[CFE_REMAP_16BIT];
1410 fmt = find_format_by_pix(f->fmt.pix.pixelformat);
1411 }
1412
1413 f->fmt.pix.field = V4L2_FIELD_NONE;
1414
1415 cfe_calc_vid_format_size_bpl(cfe, fmt, f);
1416
1417 return 0;
1418 }
1419
cfe_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)1420 static int cfe_s_fmt_vid_cap(struct file *file, void *priv,
1421 struct v4l2_format *f)
1422 {
1423 struct cfe_node *node = video_drvdata(file);
1424 struct cfe_device *cfe = node->cfe;
1425 struct vb2_queue *q = &node->buffer_queue;
1426 int ret;
1427
1428 if (vb2_is_busy(q))
1429 return -EBUSY;
1430
1431 ret = cfe_validate_fmt_vid_cap(node, f);
1432 if (ret)
1433 return ret;
1434
1435 node->vid_fmt = *f;
1436
1437 cfe_dbg(cfe, "%s: Set %ux%u, V4L2 pix %p4cc\n", __func__,
1438 node->vid_fmt.fmt.pix.width, node->vid_fmt.fmt.pix.height,
1439 &node->vid_fmt.fmt.pix.pixelformat);
1440
1441 return 0;
1442 }
1443
cfe_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)1444 static int cfe_try_fmt_vid_cap(struct file *file, void *priv,
1445 struct v4l2_format *f)
1446 {
1447 struct cfe_node *node = video_drvdata(file);
1448 struct cfe_device *cfe = node->cfe;
1449
1450 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1451
1452 return cfe_validate_fmt_vid_cap(node, f);
1453 }
1454
cfe_enum_fmt_meta(struct file * file,void * priv,struct v4l2_fmtdesc * f)1455 static int cfe_enum_fmt_meta(struct file *file, void *priv,
1456 struct v4l2_fmtdesc *f)
1457 {
1458 struct cfe_node *node = video_drvdata(file);
1459 struct cfe_device *cfe = node->cfe;
1460
1461 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1462
1463 if (!node_supports_meta(node))
1464 return -EINVAL;
1465
1466 switch (node->id) {
1467 case CSI2_CH0...CSI2_CH3:
1468 f->flags = V4L2_FMT_FLAG_META_LINE_BASED;
1469
1470 switch (f->index) {
1471 case 0:
1472 f->pixelformat = V4L2_META_FMT_GENERIC_8;
1473 return 0;
1474 case 1:
1475 f->pixelformat = V4L2_META_FMT_GENERIC_CSI2_10;
1476 return 0;
1477 case 2:
1478 f->pixelformat = V4L2_META_FMT_GENERIC_CSI2_12;
1479 return 0;
1480 default:
1481 return -EINVAL;
1482 }
1483 default:
1484 break;
1485 }
1486
1487 if (f->index != 0)
1488 return -EINVAL;
1489
1490 switch (node->id) {
1491 case FE_STATS:
1492 f->pixelformat = V4L2_META_FMT_RPI_FE_STATS;
1493 return 0;
1494 case FE_CONFIG:
1495 f->pixelformat = V4L2_META_FMT_RPI_FE_CFG;
1496 return 0;
1497 default:
1498 return -EINVAL;
1499 }
1500 }
1501
cfe_validate_fmt_meta(struct cfe_node * node,struct v4l2_format * f)1502 static int cfe_validate_fmt_meta(struct cfe_node *node, struct v4l2_format *f)
1503 {
1504 struct cfe_device *cfe = node->cfe;
1505 const struct cfe_fmt *fmt;
1506
1507 switch (node->id) {
1508 case CSI2_CH0...CSI2_CH3:
1509 cfe_dbg(cfe, "%s: [%s] %ux%u, V4L2 meta %p4cc\n", __func__,
1510 node_desc[node->id].name, f->fmt.meta.width,
1511 f->fmt.meta.height, &f->fmt.meta.dataformat);
1512 break;
1513 case FE_STATS:
1514 case FE_CONFIG:
1515 cfe_dbg(cfe, "%s: [%s] %u bytes, V4L2 meta %p4cc\n", __func__,
1516 node_desc[node->id].name, f->fmt.meta.buffersize,
1517 &f->fmt.meta.dataformat);
1518 break;
1519 default:
1520 return -EINVAL;
1521 }
1522
1523 if (!node_supports_meta(node))
1524 return -EINVAL;
1525
1526 switch (node->id) {
1527 case CSI2_CH0...CSI2_CH3:
1528 fmt = find_format_by_pix(f->fmt.meta.dataformat);
1529 if (!fmt || !(fmt->flags & CFE_FORMAT_FLAG_META_CAP))
1530 fmt = find_format_by_pix(V4L2_META_FMT_GENERIC_CSI2_10);
1531
1532 f->fmt.meta.dataformat = fmt->fourcc;
1533
1534 cfe_calc_meta_format_size_bpl(cfe, fmt, f);
1535
1536 return 0;
1537 case FE_STATS:
1538 f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_STATS;
1539 f->fmt.meta.buffersize = sizeof(struct pisp_statistics);
1540 return 0;
1541 case FE_CONFIG:
1542 f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_CFG;
1543 f->fmt.meta.buffersize = sizeof(struct pisp_fe_config);
1544 return 0;
1545 default:
1546 return -EINVAL;
1547 }
1548 }
1549
cfe_g_fmt_meta(struct file * file,void * priv,struct v4l2_format * f)1550 static int cfe_g_fmt_meta(struct file *file, void *priv, struct v4l2_format *f)
1551 {
1552 struct cfe_node *node = video_drvdata(file);
1553 struct cfe_device *cfe = node->cfe;
1554
1555 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1556
1557 if (!node_supports_meta(node))
1558 return -EINVAL;
1559
1560 *f = node->meta_fmt;
1561
1562 return 0;
1563 }
1564
cfe_s_fmt_meta(struct file * file,void * priv,struct v4l2_format * f)1565 static int cfe_s_fmt_meta(struct file *file, void *priv, struct v4l2_format *f)
1566 {
1567 struct cfe_node *node = video_drvdata(file);
1568 struct cfe_device *cfe = node->cfe;
1569 struct vb2_queue *q = &node->buffer_queue;
1570 int ret;
1571
1572 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1573
1574 if (vb2_is_busy(q))
1575 return -EBUSY;
1576
1577 if (!node_supports_meta(node))
1578 return -EINVAL;
1579
1580 ret = cfe_validate_fmt_meta(node, f);
1581 if (ret)
1582 return ret;
1583
1584 node->meta_fmt = *f;
1585
1586 cfe_dbg(cfe, "%s: Set %p4cc\n", __func__,
1587 &node->meta_fmt.fmt.meta.dataformat);
1588
1589 return 0;
1590 }
1591
cfe_try_fmt_meta(struct file * file,void * priv,struct v4l2_format * f)1592 static int cfe_try_fmt_meta(struct file *file, void *priv,
1593 struct v4l2_format *f)
1594 {
1595 struct cfe_node *node = video_drvdata(file);
1596 struct cfe_device *cfe = node->cfe;
1597
1598 cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1599 return cfe_validate_fmt_meta(node, f);
1600 }
1601
cfe_enum_framesizes(struct file * file,void * priv,struct v4l2_frmsizeenum * fsize)1602 static int cfe_enum_framesizes(struct file *file, void *priv,
1603 struct v4l2_frmsizeenum *fsize)
1604 {
1605 struct cfe_node *node = video_drvdata(file);
1606 struct cfe_device *cfe = node->cfe;
1607 const struct cfe_fmt *fmt;
1608
1609 cfe_dbg(cfe, "%s [%s]\n", __func__, node_desc[node->id].name);
1610
1611 if (fsize->index > 0)
1612 return -EINVAL;
1613
1614 /* check for valid format */
1615 fmt = find_format_by_pix(fsize->pixel_format);
1616 if (!fmt) {
1617 cfe_dbg(cfe, "Invalid pixel code: %x\n", fsize->pixel_format);
1618 return -EINVAL;
1619 }
1620
1621 /* TODO: Do we have limits on the step_width? */
1622
1623 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
1624 fsize->stepwise.min_width = MIN_WIDTH;
1625 fsize->stepwise.max_width = MAX_WIDTH;
1626 fsize->stepwise.step_width = 2;
1627 fsize->stepwise.min_height = MIN_HEIGHT;
1628 fsize->stepwise.max_height = MAX_HEIGHT;
1629 fsize->stepwise.step_height = 1;
1630
1631 return 0;
1632 }
1633
cfe_vb2_ioctl_reqbufs(struct file * file,void * priv,struct v4l2_requestbuffers * p)1634 static int cfe_vb2_ioctl_reqbufs(struct file *file, void *priv,
1635 struct v4l2_requestbuffers *p)
1636 {
1637 struct video_device *vdev = video_devdata(file);
1638 struct cfe_node *node = video_get_drvdata(vdev);
1639 struct cfe_device *cfe = node->cfe;
1640 int ret;
1641
1642 cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
1643 p->type);
1644
1645 if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1646 p->type != V4L2_BUF_TYPE_META_CAPTURE &&
1647 p->type != V4L2_BUF_TYPE_META_OUTPUT)
1648 return -EINVAL;
1649
1650 ret = vb2_queue_change_type(vdev->queue, p->type);
1651 if (ret)
1652 return ret;
1653
1654 return vb2_ioctl_reqbufs(file, priv, p);
1655 }
1656
cfe_vb2_ioctl_create_bufs(struct file * file,void * priv,struct v4l2_create_buffers * p)1657 static int cfe_vb2_ioctl_create_bufs(struct file *file, void *priv,
1658 struct v4l2_create_buffers *p)
1659 {
1660 struct video_device *vdev = video_devdata(file);
1661 struct cfe_node *node = video_get_drvdata(vdev);
1662 struct cfe_device *cfe = node->cfe;
1663 int ret;
1664
1665 cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
1666 p->format.type);
1667
1668 if (p->format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1669 p->format.type != V4L2_BUF_TYPE_META_CAPTURE &&
1670 p->format.type != V4L2_BUF_TYPE_META_OUTPUT)
1671 return -EINVAL;
1672
1673 ret = vb2_queue_change_type(vdev->queue, p->format.type);
1674 if (ret)
1675 return ret;
1676
1677 return vb2_ioctl_create_bufs(file, priv, p);
1678 }
1679
cfe_subscribe_event(struct v4l2_fh * fh,const struct v4l2_event_subscription * sub)1680 static int cfe_subscribe_event(struct v4l2_fh *fh,
1681 const struct v4l2_event_subscription *sub)
1682 {
1683 struct cfe_node *node = video_get_drvdata(fh->vdev);
1684
1685 switch (sub->type) {
1686 case V4L2_EVENT_FRAME_SYNC:
1687 if (!node_supports_image_output(node))
1688 break;
1689
1690 return v4l2_event_subscribe(fh, sub, 2, NULL);
1691 case V4L2_EVENT_SOURCE_CHANGE:
1692 if (!node_supports_image_output(node) &&
1693 !node_supports_meta_output(node))
1694 break;
1695
1696 return v4l2_event_subscribe(fh, sub, 4, NULL);
1697 }
1698
1699 return v4l2_ctrl_subscribe_event(fh, sub);
1700 }
1701
1702 static const struct v4l2_ioctl_ops cfe_ioctl_ops = {
1703 .vidioc_querycap = cfe_querycap,
1704 .vidioc_enum_fmt_vid_cap = cfe_enum_fmt_vid_cap,
1705 .vidioc_g_fmt_vid_cap = cfe_g_fmt,
1706 .vidioc_s_fmt_vid_cap = cfe_s_fmt_vid_cap,
1707 .vidioc_try_fmt_vid_cap = cfe_try_fmt_vid_cap,
1708
1709 .vidioc_enum_fmt_meta_cap = cfe_enum_fmt_meta,
1710 .vidioc_g_fmt_meta_cap = cfe_g_fmt_meta,
1711 .vidioc_s_fmt_meta_cap = cfe_s_fmt_meta,
1712 .vidioc_try_fmt_meta_cap = cfe_try_fmt_meta,
1713
1714 .vidioc_enum_fmt_meta_out = cfe_enum_fmt_meta,
1715 .vidioc_g_fmt_meta_out = cfe_g_fmt_meta,
1716 .vidioc_s_fmt_meta_out = cfe_s_fmt_meta,
1717 .vidioc_try_fmt_meta_out = cfe_try_fmt_meta,
1718
1719 .vidioc_enum_framesizes = cfe_enum_framesizes,
1720
1721 .vidioc_reqbufs = cfe_vb2_ioctl_reqbufs,
1722 .vidioc_create_bufs = cfe_vb2_ioctl_create_bufs,
1723 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1724 .vidioc_querybuf = vb2_ioctl_querybuf,
1725 .vidioc_qbuf = vb2_ioctl_qbuf,
1726 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1727 .vidioc_expbuf = vb2_ioctl_expbuf,
1728 .vidioc_streamon = vb2_ioctl_streamon,
1729 .vidioc_streamoff = vb2_ioctl_streamoff,
1730
1731 .vidioc_subscribe_event = cfe_subscribe_event,
1732 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1733 };
1734
cfe_notify(struct v4l2_subdev * sd,unsigned int notification,void * arg)1735 static void cfe_notify(struct v4l2_subdev *sd, unsigned int notification,
1736 void *arg)
1737 {
1738 struct cfe_device *cfe = to_cfe_device(sd->v4l2_dev);
1739
1740 switch (notification) {
1741 case V4L2_DEVICE_NOTIFY_EVENT:
1742 for (unsigned int i = 0; i < NUM_NODES; i++) {
1743 struct cfe_node *node = &cfe->node[i];
1744
1745 if (check_state(cfe, NODE_REGISTERED, i))
1746 continue;
1747
1748 v4l2_event_queue(&node->video_dev, arg);
1749 }
1750 break;
1751 default:
1752 break;
1753 }
1754 }
1755
1756 /* cfe capture driver file operations */
1757 static const struct v4l2_file_operations cfe_fops = {
1758 .owner = THIS_MODULE,
1759 .open = v4l2_fh_open,
1760 .release = vb2_fop_release,
1761 .poll = vb2_fop_poll,
1762 .unlocked_ioctl = video_ioctl2,
1763 .mmap = vb2_fop_mmap,
1764 };
1765
cfe_video_link_validate(struct media_link * link)1766 static int cfe_video_link_validate(struct media_link *link)
1767 {
1768 struct video_device *vd = container_of(link->sink->entity,
1769 struct video_device, entity);
1770 struct cfe_node *node = container_of(vd, struct cfe_node, video_dev);
1771 struct cfe_device *cfe = node->cfe;
1772 struct v4l2_mbus_framefmt *source_fmt;
1773 struct v4l2_subdev_state *state;
1774 struct v4l2_subdev *source_sd;
1775 int ret = 0;
1776
1777 cfe_dbg(cfe, "%s: [%s] link \"%s\":%u -> \"%s\":%u\n", __func__,
1778 node_desc[node->id].name,
1779 link->source->entity->name, link->source->index,
1780 link->sink->entity->name, link->sink->index);
1781
1782 if (!media_entity_remote_source_pad_unique(link->sink->entity)) {
1783 cfe_err(cfe, "video node %s pad not connected\n", vd->name);
1784 return -ENOTCONN;
1785 }
1786
1787 source_sd = media_entity_to_v4l2_subdev(link->source->entity);
1788
1789 state = v4l2_subdev_lock_and_get_active_state(source_sd);
1790
1791 source_fmt = v4l2_subdev_state_get_format(state, link->source->index);
1792 if (!source_fmt) {
1793 ret = -EINVAL;
1794 goto out;
1795 }
1796
1797 if (is_image_output_node(node)) {
1798 struct v4l2_pix_format *pix_fmt = &node->vid_fmt.fmt.pix;
1799 const struct cfe_fmt *fmt;
1800
1801 if (source_fmt->width != pix_fmt->width ||
1802 source_fmt->height != pix_fmt->height) {
1803 cfe_err(cfe, "Wrong width or height %ux%u (remote pad set to %ux%u)\n",
1804 pix_fmt->width, pix_fmt->height,
1805 source_fmt->width, source_fmt->height);
1806 ret = -EINVAL;
1807 goto out;
1808 }
1809
1810 fmt = find_format_by_code_and_fourcc(source_fmt->code,
1811 pix_fmt->pixelformat);
1812 if (!fmt) {
1813 cfe_err(cfe, "Format mismatch!\n");
1814 ret = -EINVAL;
1815 goto out;
1816 }
1817 } else if (is_csi2_node(node) && is_meta_output_node(node)) {
1818 struct v4l2_meta_format *meta_fmt = &node->meta_fmt.fmt.meta;
1819 const struct cfe_fmt *fmt;
1820
1821 if (source_fmt->width != meta_fmt->width ||
1822 source_fmt->height != meta_fmt->height) {
1823 cfe_err(cfe, "Wrong width or height %ux%u (remote pad set to %ux%u)\n",
1824 meta_fmt->width, meta_fmt->height,
1825 source_fmt->width, source_fmt->height);
1826 ret = -EINVAL;
1827 goto out;
1828 }
1829
1830 fmt = find_format_by_code_and_fourcc(source_fmt->code,
1831 meta_fmt->dataformat);
1832 if (!fmt) {
1833 cfe_err(cfe, "Format mismatch!\n");
1834 ret = -EINVAL;
1835 goto out;
1836 }
1837 }
1838
1839 out:
1840 v4l2_subdev_unlock_state(state);
1841
1842 return ret;
1843 }
1844
1845 static const struct media_entity_operations cfe_media_entity_ops = {
1846 .link_validate = cfe_video_link_validate,
1847 };
1848
cfe_video_link_notify(struct media_link * link,u32 flags,unsigned int notification)1849 static int cfe_video_link_notify(struct media_link *link, u32 flags,
1850 unsigned int notification)
1851 {
1852 struct media_device *mdev = link->graph_obj.mdev;
1853 struct cfe_device *cfe = container_of(mdev, struct cfe_device, mdev);
1854 struct media_entity *fe = &cfe->fe.sd.entity;
1855 struct media_entity *csi2 = &cfe->csi2.sd.entity;
1856 unsigned long lock_flags;
1857
1858 if (notification != MEDIA_DEV_NOTIFY_POST_LINK_CH)
1859 return 0;
1860
1861 cfe_dbg(cfe, "%s: %s[%u] -> %s[%u] 0x%x", __func__,
1862 link->source->entity->name, link->source->index,
1863 link->sink->entity->name, link->sink->index, flags);
1864
1865 spin_lock_irqsave(&cfe->state_lock, lock_flags);
1866
1867 for (unsigned int i = 0; i < NUM_NODES; i++) {
1868 if (link->sink->entity != &cfe->node[i].video_dev.entity &&
1869 link->source->entity != &cfe->node[i].video_dev.entity)
1870 continue;
1871
1872 if (link->flags & MEDIA_LNK_FL_ENABLED)
1873 set_state(cfe, NODE_ENABLED, i);
1874 else
1875 clear_state(cfe, NODE_ENABLED, i);
1876
1877 break;
1878 }
1879
1880 spin_unlock_irqrestore(&cfe->state_lock, lock_flags);
1881
1882 if (link->source->entity != csi2)
1883 return 0;
1884 if (link->sink->entity != fe)
1885 return 0;
1886 if (link->sink->index != 0)
1887 return 0;
1888
1889 cfe->fe_csi2_channel = -1;
1890 if (link->flags & MEDIA_LNK_FL_ENABLED) {
1891 if (link->source->index == node_desc[CSI2_CH0].link_pad)
1892 cfe->fe_csi2_channel = CSI2_CH0;
1893 else if (link->source->index == node_desc[CSI2_CH1].link_pad)
1894 cfe->fe_csi2_channel = CSI2_CH1;
1895 else if (link->source->index == node_desc[CSI2_CH2].link_pad)
1896 cfe->fe_csi2_channel = CSI2_CH2;
1897 else if (link->source->index == node_desc[CSI2_CH3].link_pad)
1898 cfe->fe_csi2_channel = CSI2_CH3;
1899 }
1900
1901 if (is_fe_enabled(cfe))
1902 cfe_dbg(cfe, "%s: Found CSI2:%d -> FE:0 link\n", __func__,
1903 cfe->fe_csi2_channel);
1904 else
1905 cfe_dbg(cfe, "%s: Unable to find CSI2:x -> FE:0 link\n",
1906 __func__);
1907
1908 return 0;
1909 }
1910
1911 static const struct media_device_ops cfe_media_device_ops = {
1912 .link_notify = cfe_video_link_notify,
1913 };
1914
cfe_release(struct kref * kref)1915 static void cfe_release(struct kref *kref)
1916 {
1917 struct cfe_device *cfe = container_of(kref, struct cfe_device, kref);
1918
1919 media_device_cleanup(&cfe->mdev);
1920
1921 kfree(cfe);
1922 }
1923
cfe_put(struct cfe_device * cfe)1924 static void cfe_put(struct cfe_device *cfe)
1925 {
1926 kref_put(&cfe->kref, cfe_release);
1927 }
1928
cfe_get(struct cfe_device * cfe)1929 static void cfe_get(struct cfe_device *cfe)
1930 {
1931 kref_get(&cfe->kref);
1932 }
1933
cfe_node_release(struct video_device * vdev)1934 static void cfe_node_release(struct video_device *vdev)
1935 {
1936 struct cfe_node *node = video_get_drvdata(vdev);
1937
1938 cfe_put(node->cfe);
1939 }
1940
cfe_register_node(struct cfe_device * cfe,int id)1941 static int cfe_register_node(struct cfe_device *cfe, int id)
1942 {
1943 struct video_device *vdev;
1944 const struct cfe_fmt *fmt;
1945 struct vb2_queue *q;
1946 struct cfe_node *node = &cfe->node[id];
1947 int ret;
1948
1949 node->cfe = cfe;
1950 node->id = id;
1951
1952 if (node_supports_image(node)) {
1953 if (node_supports_image_output(node))
1954 node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1955 else
1956 node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
1957
1958 fmt = find_format_by_code(cfe_default_format.code);
1959 if (!fmt) {
1960 cfe_err(cfe, "Failed to find format code\n");
1961 return -EINVAL;
1962 }
1963
1964 node->vid_fmt.fmt.pix.pixelformat = fmt->fourcc;
1965 v4l2_fill_pix_format(&node->vid_fmt.fmt.pix,
1966 &cfe_default_format);
1967
1968 ret = cfe_validate_fmt_vid_cap(node, &node->vid_fmt);
1969 if (ret)
1970 return ret;
1971 }
1972
1973 if (node_supports_meta(node)) {
1974 if (node_supports_meta_output(node))
1975 node->meta_fmt.type = V4L2_BUF_TYPE_META_CAPTURE;
1976 else
1977 node->meta_fmt.type = V4L2_BUF_TYPE_META_OUTPUT;
1978
1979 ret = cfe_validate_fmt_meta(node, &node->meta_fmt);
1980 if (ret)
1981 return ret;
1982 }
1983
1984 mutex_init(&node->lock);
1985
1986 q = &node->buffer_queue;
1987 q->type = node_supports_image(node) ? node->vid_fmt.type :
1988 node->meta_fmt.type;
1989 q->io_modes = VB2_MMAP | VB2_DMABUF;
1990 q->drv_priv = node;
1991 q->ops = &cfe_video_qops;
1992 q->mem_ops = &vb2_dma_contig_memops;
1993 q->buf_struct_size = id == FE_CONFIG ? sizeof(struct cfe_config_buffer)
1994 : sizeof(struct cfe_buffer);
1995 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1996 q->lock = &node->lock;
1997 q->min_queued_buffers = 1;
1998 q->min_reqbufs_allocation = 3;
1999 q->dev = &cfe->pdev->dev;
2000
2001 ret = vb2_queue_init(q);
2002 if (ret) {
2003 cfe_err(cfe, "vb2_queue_init() failed\n");
2004 return ret;
2005 }
2006
2007 INIT_LIST_HEAD(&node->dma_queue);
2008
2009 vdev = &node->video_dev;
2010 vdev->release = cfe_node_release;
2011 vdev->fops = &cfe_fops;
2012 vdev->ioctl_ops = &cfe_ioctl_ops;
2013 vdev->entity.ops = &cfe_media_entity_ops;
2014 vdev->v4l2_dev = &cfe->v4l2_dev;
2015 vdev->vfl_dir = (node_supports_image_output(node) ||
2016 node_supports_meta_output(node)) ?
2017 VFL_DIR_RX :
2018 VFL_DIR_TX;
2019 vdev->queue = q;
2020 vdev->lock = &node->lock;
2021 vdev->device_caps = node_desc[id].caps;
2022 vdev->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
2023
2024 /* Define the device names */
2025 snprintf(vdev->name, sizeof(vdev->name), "%s-%s", CFE_MODULE_NAME,
2026 node_desc[id].name);
2027
2028 video_set_drvdata(vdev, node);
2029 node->pad.flags = node_desc[id].pad_flags;
2030 media_entity_pads_init(&vdev->entity, 1, &node->pad);
2031
2032 if (!node_supports_image(node)) {
2033 v4l2_disable_ioctl(&node->video_dev,
2034 VIDIOC_ENUM_FRAMEINTERVALS);
2035 v4l2_disable_ioctl(&node->video_dev, VIDIOC_ENUM_FRAMESIZES);
2036 }
2037
2038 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
2039 if (ret) {
2040 cfe_err(cfe, "Unable to register video device %s\n",
2041 vdev->name);
2042 return ret;
2043 }
2044
2045 cfe_info(cfe, "Registered [%s] node id %d as /dev/video%u\n",
2046 vdev->name, id, vdev->num);
2047
2048 /*
2049 * Acquire a reference to cfe, which will be released when the video
2050 * device will be unregistered and userspace will have closed all open
2051 * file handles.
2052 */
2053 cfe_get(cfe);
2054 set_state(cfe, NODE_REGISTERED, id);
2055
2056 return 0;
2057 }
2058
cfe_unregister_nodes(struct cfe_device * cfe)2059 static void cfe_unregister_nodes(struct cfe_device *cfe)
2060 {
2061 for (unsigned int i = 0; i < NUM_NODES; i++) {
2062 struct cfe_node *node = &cfe->node[i];
2063
2064 if (check_state(cfe, NODE_REGISTERED, i)) {
2065 clear_state(cfe, NODE_REGISTERED, i);
2066 video_unregister_device(&node->video_dev);
2067 }
2068 }
2069 }
2070
cfe_link_node_pads(struct cfe_device * cfe)2071 static int cfe_link_node_pads(struct cfe_device *cfe)
2072 {
2073 struct media_pad *remote_pad;
2074 int ret;
2075
2076 /* Source -> CSI2 */
2077
2078 ret = v4l2_create_fwnode_links_to_pad(cfe->source_sd,
2079 &cfe->csi2.pad[CSI2_PAD_SINK],
2080 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
2081
2082 if (ret) {
2083 cfe_err(cfe, "Failed to create links to the source: %d\n", ret);
2084 return ret;
2085 }
2086
2087 remote_pad = media_pad_remote_pad_unique(&cfe->csi2.pad[CSI2_PAD_SINK]);
2088 if (IS_ERR(remote_pad)) {
2089 ret = PTR_ERR(remote_pad);
2090 cfe_err(cfe, "Failed to get unique remote source pad: %d\n",
2091 ret);
2092 return ret;
2093 }
2094
2095 cfe->source_pad = remote_pad->index;
2096
2097 for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; i++) {
2098 struct cfe_node *node = &cfe->node[i];
2099
2100 if (!check_state(cfe, NODE_REGISTERED, i))
2101 continue;
2102
2103 /* CSI2 channel # -> /dev/video# */
2104 ret = media_create_pad_link(&cfe->csi2.sd.entity,
2105 node_desc[i].link_pad,
2106 &node->video_dev.entity, 0, 0);
2107 if (ret)
2108 return ret;
2109
2110 if (node_supports_image(node)) {
2111 /* CSI2 channel # -> FE Input */
2112 ret = media_create_pad_link(&cfe->csi2.sd.entity,
2113 node_desc[i].link_pad,
2114 &cfe->fe.sd.entity,
2115 FE_STREAM_PAD, 0);
2116 if (ret)
2117 return ret;
2118 }
2119 }
2120
2121 for (unsigned int i = CSI2_NUM_CHANNELS; i < NUM_NODES; i++) {
2122 struct cfe_node *node = &cfe->node[i];
2123 struct media_entity *src, *dst;
2124 unsigned int src_pad, dst_pad;
2125
2126 if (node_desc[i].pad_flags & MEDIA_PAD_FL_SINK) {
2127 /* FE -> /dev/video# */
2128 src = &cfe->fe.sd.entity;
2129 src_pad = node_desc[i].link_pad;
2130 dst = &node->video_dev.entity;
2131 dst_pad = 0;
2132 } else {
2133 /* /dev/video# -> FE */
2134 dst = &cfe->fe.sd.entity;
2135 dst_pad = node_desc[i].link_pad;
2136 src = &node->video_dev.entity;
2137 src_pad = 0;
2138 }
2139
2140 ret = media_create_pad_link(src, src_pad, dst, dst_pad, 0);
2141 if (ret)
2142 return ret;
2143 }
2144
2145 return 0;
2146 }
2147
cfe_probe_complete(struct cfe_device * cfe)2148 static int cfe_probe_complete(struct cfe_device *cfe)
2149 {
2150 int ret;
2151
2152 cfe->v4l2_dev.notify = cfe_notify;
2153
2154 for (unsigned int i = 0; i < NUM_NODES; i++) {
2155 ret = cfe_register_node(cfe, i);
2156 if (ret) {
2157 cfe_err(cfe, "Unable to register video node %u.\n", i);
2158 goto unregister;
2159 }
2160 }
2161
2162 ret = cfe_link_node_pads(cfe);
2163 if (ret) {
2164 cfe_err(cfe, "Unable to link node pads.\n");
2165 goto unregister;
2166 }
2167
2168 ret = v4l2_device_register_subdev_nodes(&cfe->v4l2_dev);
2169 if (ret) {
2170 cfe_err(cfe, "Unable to register subdev nodes.\n");
2171 goto unregister;
2172 }
2173
2174 return 0;
2175
2176 unregister:
2177 cfe_unregister_nodes(cfe);
2178 return ret;
2179 }
2180
cfe_async_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * subdev,struct v4l2_async_connection * asd)2181 static int cfe_async_bound(struct v4l2_async_notifier *notifier,
2182 struct v4l2_subdev *subdev,
2183 struct v4l2_async_connection *asd)
2184 {
2185 struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev);
2186
2187 if (cfe->source_sd) {
2188 cfe_err(cfe, "Rejecting subdev %s (Already set!!)",
2189 subdev->name);
2190 return 0;
2191 }
2192
2193 cfe->source_sd = subdev;
2194
2195 cfe_dbg(cfe, "Using source %s for capture\n", subdev->name);
2196
2197 return 0;
2198 }
2199
cfe_async_complete(struct v4l2_async_notifier * notifier)2200 static int cfe_async_complete(struct v4l2_async_notifier *notifier)
2201 {
2202 struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev);
2203
2204 return cfe_probe_complete(cfe);
2205 }
2206
2207 static const struct v4l2_async_notifier_operations cfe_async_ops = {
2208 .bound = cfe_async_bound,
2209 .complete = cfe_async_complete,
2210 };
2211
cfe_register_async_nf(struct cfe_device * cfe)2212 static int cfe_register_async_nf(struct cfe_device *cfe)
2213 {
2214 struct platform_device *pdev = cfe->pdev;
2215 struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
2216 struct fwnode_handle *local_ep_fwnode;
2217 struct v4l2_async_connection *asd;
2218 int ret;
2219
2220 local_ep_fwnode = fwnode_graph_get_endpoint_by_id(pdev->dev.fwnode, 0,
2221 0, 0);
2222 if (!local_ep_fwnode) {
2223 cfe_err(cfe, "Failed to find local endpoint fwnode\n");
2224 return -ENODEV;
2225 }
2226
2227 /* Parse the local endpoint and validate its configuration. */
2228 ret = v4l2_fwnode_endpoint_parse(local_ep_fwnode, &ep);
2229 if (ret) {
2230 cfe_err(cfe, "Failed to find remote endpoint fwnode\n");
2231 goto err_put_local_fwnode;
2232 }
2233
2234 for (unsigned int lane = 0; lane < ep.bus.mipi_csi2.num_data_lanes;
2235 lane++) {
2236 if (ep.bus.mipi_csi2.data_lanes[lane] != lane + 1) {
2237 cfe_err(cfe, "Data lanes reordering not supported\n");
2238 ret = -EINVAL;
2239 goto err_put_local_fwnode;
2240 }
2241 }
2242
2243 cfe->csi2.dphy.max_lanes = ep.bus.mipi_csi2.num_data_lanes;
2244 cfe->csi2.bus_flags = ep.bus.mipi_csi2.flags;
2245
2246 /* Initialize and register the async notifier. */
2247 v4l2_async_nf_init(&cfe->notifier, &cfe->v4l2_dev);
2248 cfe->notifier.ops = &cfe_async_ops;
2249
2250 asd = v4l2_async_nf_add_fwnode_remote(&cfe->notifier, local_ep_fwnode,
2251 struct v4l2_async_connection);
2252 if (IS_ERR(asd)) {
2253 ret = PTR_ERR(asd);
2254 cfe_err(cfe, "Error adding subdevice: %d\n", ret);
2255 goto err_put_local_fwnode;
2256 }
2257
2258 ret = v4l2_async_nf_register(&cfe->notifier);
2259 if (ret) {
2260 cfe_err(cfe, "Error registering async notifier: %d\n", ret);
2261 goto err_nf_cleanup;
2262 }
2263
2264 fwnode_handle_put(local_ep_fwnode);
2265
2266 return 0;
2267
2268 err_nf_cleanup:
2269 v4l2_async_nf_cleanup(&cfe->notifier);
2270 err_put_local_fwnode:
2271 fwnode_handle_put(local_ep_fwnode);
2272
2273 return ret;
2274 }
2275
cfe_probe(struct platform_device * pdev)2276 static int cfe_probe(struct platform_device *pdev)
2277 {
2278 struct cfe_device *cfe;
2279 char debugfs_name[32];
2280 int ret;
2281
2282 cfe = kzalloc(sizeof(*cfe), GFP_KERNEL);
2283 if (!cfe)
2284 return -ENOMEM;
2285
2286 platform_set_drvdata(pdev, cfe);
2287
2288 kref_init(&cfe->kref);
2289 cfe->pdev = pdev;
2290 cfe->fe_csi2_channel = -1;
2291 spin_lock_init(&cfe->state_lock);
2292
2293 cfe->csi2.base = devm_platform_ioremap_resource(pdev, 0);
2294 if (IS_ERR(cfe->csi2.base)) {
2295 dev_err(&pdev->dev, "Failed to get dma io block\n");
2296 ret = PTR_ERR(cfe->csi2.base);
2297 goto err_cfe_put;
2298 }
2299
2300 cfe->csi2.dphy.base = devm_platform_ioremap_resource(pdev, 1);
2301 if (IS_ERR(cfe->csi2.dphy.base)) {
2302 dev_err(&pdev->dev, "Failed to get host io block\n");
2303 ret = PTR_ERR(cfe->csi2.dphy.base);
2304 goto err_cfe_put;
2305 }
2306
2307 cfe->mipi_cfg_base = devm_platform_ioremap_resource(pdev, 2);
2308 if (IS_ERR(cfe->mipi_cfg_base)) {
2309 dev_err(&pdev->dev, "Failed to get mipi cfg io block\n");
2310 ret = PTR_ERR(cfe->mipi_cfg_base);
2311 goto err_cfe_put;
2312 }
2313
2314 cfe->fe.base = devm_platform_ioremap_resource(pdev, 3);
2315 if (IS_ERR(cfe->fe.base)) {
2316 dev_err(&pdev->dev, "Failed to get pisp fe io block\n");
2317 ret = PTR_ERR(cfe->fe.base);
2318 goto err_cfe_put;
2319 }
2320
2321 ret = platform_get_irq(pdev, 0);
2322 if (ret <= 0) {
2323 ret = -EINVAL;
2324 goto err_cfe_put;
2325 }
2326
2327 ret = devm_request_irq(&pdev->dev, ret, cfe_isr, 0, "rp1-cfe", cfe);
2328 if (ret) {
2329 dev_err(&pdev->dev, "Unable to request interrupt\n");
2330 ret = -EINVAL;
2331 goto err_cfe_put;
2332 }
2333
2334 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2335 if (ret) {
2336 dev_err(&pdev->dev, "DMA enable failed\n");
2337 goto err_cfe_put;
2338 }
2339
2340 ret = vb2_dma_contig_set_max_seg_size(&pdev->dev, UINT_MAX);
2341 if (ret)
2342 goto err_cfe_put;
2343
2344 /* TODO: Enable clock only when running. */
2345 cfe->clk = devm_clk_get(&pdev->dev, NULL);
2346 if (IS_ERR(cfe->clk)) {
2347 ret = dev_err_probe(&pdev->dev, PTR_ERR(cfe->clk),
2348 "clock not found\n");
2349 goto err_cfe_put;
2350 }
2351
2352 cfe->mdev.dev = &pdev->dev;
2353 cfe->mdev.ops = &cfe_media_device_ops;
2354 strscpy(cfe->mdev.model, CFE_MODULE_NAME, sizeof(cfe->mdev.model));
2355 strscpy(cfe->mdev.serial, "", sizeof(cfe->mdev.serial));
2356 snprintf(cfe->mdev.bus_info, sizeof(cfe->mdev.bus_info), "platform:%s",
2357 dev_name(&pdev->dev));
2358
2359 media_device_init(&cfe->mdev);
2360
2361 cfe->v4l2_dev.mdev = &cfe->mdev;
2362
2363 ret = v4l2_device_register(&pdev->dev, &cfe->v4l2_dev);
2364 if (ret) {
2365 cfe_err(cfe, "Unable to register v4l2 device.\n");
2366 goto err_cfe_put;
2367 }
2368
2369 snprintf(debugfs_name, sizeof(debugfs_name), "rp1-cfe:%s",
2370 dev_name(&pdev->dev));
2371 cfe->debugfs = debugfs_create_dir(debugfs_name, NULL);
2372 debugfs_create_file("regs", 0440, cfe->debugfs, cfe,
2373 &mipi_cfg_regs_fops);
2374
2375 /* Enable the block power domain */
2376 pm_runtime_enable(&pdev->dev);
2377
2378 ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
2379 if (ret)
2380 goto err_runtime_disable;
2381
2382 cfe->csi2.v4l2_dev = &cfe->v4l2_dev;
2383 ret = csi2_init(&cfe->csi2, cfe->debugfs);
2384 if (ret) {
2385 cfe_err(cfe, "Failed to init csi2 (%d)\n", ret);
2386 goto err_runtime_put;
2387 }
2388
2389 cfe->fe.v4l2_dev = &cfe->v4l2_dev;
2390 ret = pisp_fe_init(&cfe->fe, cfe->debugfs);
2391 if (ret) {
2392 cfe_err(cfe, "Failed to init pisp fe (%d)\n", ret);
2393 goto err_csi2_uninit;
2394 }
2395
2396 cfe->mdev.hw_revision = cfe->fe.hw_revision;
2397 ret = media_device_register(&cfe->mdev);
2398 if (ret < 0) {
2399 cfe_err(cfe, "Unable to register media-controller device.\n");
2400 goto err_pisp_fe_uninit;
2401 }
2402
2403 ret = cfe_register_async_nf(cfe);
2404 if (ret) {
2405 cfe_err(cfe, "Failed to connect subdevs\n");
2406 goto err_media_unregister;
2407 }
2408
2409 pm_runtime_put(&cfe->pdev->dev);
2410
2411 return 0;
2412
2413 err_media_unregister:
2414 media_device_unregister(&cfe->mdev);
2415 err_pisp_fe_uninit:
2416 pisp_fe_uninit(&cfe->fe);
2417 err_csi2_uninit:
2418 csi2_uninit(&cfe->csi2);
2419 err_runtime_put:
2420 pm_runtime_put(&cfe->pdev->dev);
2421 err_runtime_disable:
2422 pm_runtime_disable(&pdev->dev);
2423 debugfs_remove(cfe->debugfs);
2424 v4l2_device_unregister(&cfe->v4l2_dev);
2425 err_cfe_put:
2426 cfe_put(cfe);
2427
2428 return ret;
2429 }
2430
cfe_remove(struct platform_device * pdev)2431 static void cfe_remove(struct platform_device *pdev)
2432 {
2433 struct cfe_device *cfe = platform_get_drvdata(pdev);
2434
2435 debugfs_remove(cfe->debugfs);
2436
2437 v4l2_async_nf_unregister(&cfe->notifier);
2438 v4l2_async_nf_cleanup(&cfe->notifier);
2439
2440 media_device_unregister(&cfe->mdev);
2441 cfe_unregister_nodes(cfe);
2442
2443 pisp_fe_uninit(&cfe->fe);
2444 csi2_uninit(&cfe->csi2);
2445
2446 pm_runtime_disable(&pdev->dev);
2447
2448 v4l2_device_unregister(&cfe->v4l2_dev);
2449
2450 cfe_put(cfe);
2451 }
2452
cfe_runtime_suspend(struct device * dev)2453 static int cfe_runtime_suspend(struct device *dev)
2454 {
2455 struct platform_device *pdev = to_platform_device(dev);
2456 struct cfe_device *cfe = platform_get_drvdata(pdev);
2457
2458 clk_disable_unprepare(cfe->clk);
2459
2460 return 0;
2461 }
2462
cfe_runtime_resume(struct device * dev)2463 static int cfe_runtime_resume(struct device *dev)
2464 {
2465 struct platform_device *pdev = to_platform_device(dev);
2466 struct cfe_device *cfe = platform_get_drvdata(pdev);
2467 int ret;
2468
2469 ret = clk_prepare_enable(cfe->clk);
2470 if (ret) {
2471 dev_err(dev, "Unable to enable clock\n");
2472 return ret;
2473 }
2474
2475 return 0;
2476 }
2477
2478 static const struct dev_pm_ops cfe_pm_ops = {
2479 SET_RUNTIME_PM_OPS(cfe_runtime_suspend, cfe_runtime_resume, NULL)
2480 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2481 pm_runtime_force_resume)
2482 };
2483
2484 static const struct of_device_id cfe_of_match[] = {
2485 { .compatible = "raspberrypi,rp1-cfe" },
2486 { /* sentinel */ },
2487 };
2488 MODULE_DEVICE_TABLE(of, cfe_of_match);
2489
2490 static struct platform_driver cfe_driver = {
2491 .probe = cfe_probe,
2492 .remove = cfe_remove,
2493 .driver = {
2494 .name = CFE_MODULE_NAME,
2495 .of_match_table = cfe_of_match,
2496 .pm = &cfe_pm_ops,
2497 },
2498 };
2499
2500 module_platform_driver(cfe_driver);
2501
2502 MODULE_AUTHOR("Naushir Patuck <naush@raspberrypi.com>");
2503 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>");
2504 MODULE_DESCRIPTION("Raspberry Pi RP1 Camera Front End driver");
2505 MODULE_LICENSE("GPL");
2506 MODULE_VERSION(CFE_VERSION);
2507