1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for STM32 Digital Camera Memory Interface Pixel Processor
4 *
5 * Copyright (C) STMicroelectronics SA 2023
6 * Authors: Hugues Fruchet <hugues.fruchet@foss.st.com>
7 * Alain Volmat <alain.volmat@foss.st.com>
8 * for STMicroelectronics.
9 */
10
11 #include <linux/iopoll.h>
12 #include <linux/pm_runtime.h>
13 #include <media/v4l2-ioctl.h>
14 #include <media/v4l2-mc.h>
15 #include <media/videobuf2-core.h>
16 #include <media/videobuf2-dma-contig.h>
17
18 #include "dcmipp-common.h"
19
20 #define DCMIPP_PRSR 0x1f8
21 #define DCMIPP_CMIER 0x3f0
22 #define DCMIPP_CMIER_P0FRAMEIE BIT(9)
23 #define DCMIPP_CMIER_P0VSYNCIE BIT(10)
24 #define DCMIPP_CMIER_P0OVRIE BIT(15)
25 #define DCMIPP_CMIER_P0ALL (DCMIPP_CMIER_P0VSYNCIE |\
26 DCMIPP_CMIER_P0FRAMEIE |\
27 DCMIPP_CMIER_P0OVRIE)
28 #define DCMIPP_CMSR1 0x3f4
29 #define DCMIPP_CMSR2 0x3f8
30 #define DCMIPP_CMSR2_P0FRAMEF BIT(9)
31 #define DCMIPP_CMSR2_P0VSYNCF BIT(10)
32 #define DCMIPP_CMSR2_P0OVRF BIT(15)
33 #define DCMIPP_CMFCR 0x3fc
34 #define DCMIPP_P0FSCR 0x404
35 #define DCMIPP_P0FSCR_PIPEN BIT(31)
36 #define DCMIPP_P0FCTCR 0x500
37 #define DCMIPP_P0FCTCR_CPTREQ BIT(3)
38 #define DCMIPP_P0DCCNTR 0x5b0
39 #define DCMIPP_P0DCLMTR 0x5b4
40 #define DCMIPP_P0DCLMTR_ENABLE BIT(31)
41 #define DCMIPP_P0DCLMTR_LIMIT_MASK GENMASK(23, 0)
42 #define DCMIPP_P0PPM0AR1 0x5c4
43 #define DCMIPP_P0SR 0x5f8
44 #define DCMIPP_P0SR_CPTACT BIT(23)
45
46 struct dcmipp_bytecap_pix_map {
47 unsigned int code;
48 u32 pixelformat;
49 };
50
51 #define PIXMAP_MBUS_PFMT(mbus, fmt) \
52 { \
53 .code = MEDIA_BUS_FMT_##mbus, \
54 .pixelformat = V4L2_PIX_FMT_##fmt \
55 }
56
57 static const struct dcmipp_bytecap_pix_map dcmipp_bytecap_pix_map_list[] = {
58 PIXMAP_MBUS_PFMT(RGB565_2X8_LE, RGB565),
59 PIXMAP_MBUS_PFMT(RGB565_1X16, RGB565),
60 PIXMAP_MBUS_PFMT(RGB888_1X24, RGB24),
61 PIXMAP_MBUS_PFMT(YUYV8_2X8, YUYV),
62 PIXMAP_MBUS_PFMT(YUYV8_1X16, YUYV),
63 PIXMAP_MBUS_PFMT(YVYU8_2X8, YVYU),
64 PIXMAP_MBUS_PFMT(YVYU8_1X16, YVYU),
65 PIXMAP_MBUS_PFMT(UYVY8_2X8, UYVY),
66 PIXMAP_MBUS_PFMT(UYVY8_1X16, UYVY),
67 PIXMAP_MBUS_PFMT(VYUY8_2X8, VYUY),
68 PIXMAP_MBUS_PFMT(VYUY8_1X16, VYUY),
69 PIXMAP_MBUS_PFMT(Y8_1X8, GREY),
70 PIXMAP_MBUS_PFMT(Y10_1X10, Y10),
71 PIXMAP_MBUS_PFMT(Y12_1X12, Y12),
72 PIXMAP_MBUS_PFMT(Y14_1X14, Y14),
73 PIXMAP_MBUS_PFMT(SBGGR8_1X8, SBGGR8),
74 PIXMAP_MBUS_PFMT(SGBRG8_1X8, SGBRG8),
75 PIXMAP_MBUS_PFMT(SGRBG8_1X8, SGRBG8),
76 PIXMAP_MBUS_PFMT(SRGGB8_1X8, SRGGB8),
77 PIXMAP_MBUS_PFMT(SBGGR10_1X10, SBGGR10),
78 PIXMAP_MBUS_PFMT(SGBRG10_1X10, SGBRG10),
79 PIXMAP_MBUS_PFMT(SGRBG10_1X10, SGRBG10),
80 PIXMAP_MBUS_PFMT(SRGGB10_1X10, SRGGB10),
81 PIXMAP_MBUS_PFMT(SBGGR12_1X12, SBGGR12),
82 PIXMAP_MBUS_PFMT(SGBRG12_1X12, SGBRG12),
83 PIXMAP_MBUS_PFMT(SGRBG12_1X12, SGRBG12),
84 PIXMAP_MBUS_PFMT(SRGGB12_1X12, SRGGB12),
85 PIXMAP_MBUS_PFMT(SBGGR14_1X14, SBGGR14),
86 PIXMAP_MBUS_PFMT(SGBRG14_1X14, SGBRG14),
87 PIXMAP_MBUS_PFMT(SGRBG14_1X14, SGRBG14),
88 PIXMAP_MBUS_PFMT(SRGGB14_1X14, SRGGB14),
89 PIXMAP_MBUS_PFMT(JPEG_1X8, JPEG),
90 };
91
92 static const struct dcmipp_bytecap_pix_map *
dcmipp_bytecap_pix_map_by_pixelformat(u32 pixelformat)93 dcmipp_bytecap_pix_map_by_pixelformat(u32 pixelformat)
94 {
95 unsigned int i;
96
97 for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
98 if (dcmipp_bytecap_pix_map_list[i].pixelformat == pixelformat)
99 return &dcmipp_bytecap_pix_map_list[i];
100 }
101
102 return NULL;
103 }
104
105 struct dcmipp_buf {
106 struct vb2_v4l2_buffer vb;
107 bool prepared;
108 dma_addr_t addr;
109 size_t size;
110 struct list_head list;
111 };
112
113 enum dcmipp_state {
114 DCMIPP_STOPPED = 0,
115 DCMIPP_WAIT_FOR_BUFFER,
116 DCMIPP_RUNNING,
117 };
118
119 struct dcmipp_bytecap_device {
120 struct dcmipp_ent_device ved;
121 struct video_device vdev;
122 struct device *dev;
123 struct v4l2_pix_format format;
124 struct vb2_queue queue;
125 struct list_head buffers;
126 /*
127 * Protects concurrent calls of buf queue / irq handler
128 * and buffer handling related variables / lists
129 */
130 spinlock_t irqlock;
131 /* mutex used as vdev and queue lock */
132 struct mutex lock;
133 u32 sequence;
134 struct media_pipeline pipe;
135 struct v4l2_subdev *s_subdev;
136 u32 s_subdev_pad_nb;
137
138 enum dcmipp_state state;
139
140 /*
141 * DCMIPP driver is handling 2 buffers
142 * active: buffer into which DCMIPP is currently writing into
143 * next: buffer given to the DCMIPP and which will become
144 * automatically active on next VSYNC
145 */
146 struct dcmipp_buf *active, *next;
147
148 void __iomem *regs;
149
150 u32 cmsr2;
151
152 struct {
153 u32 errors;
154 u32 limit;
155 u32 overrun;
156 u32 buffers;
157 u32 vsync;
158 u32 frame;
159 u32 it;
160 u32 underrun;
161 u32 nactive;
162 } count;
163 };
164
165 static const struct v4l2_pix_format fmt_default = {
166 .width = DCMIPP_FMT_WIDTH_DEFAULT,
167 .height = DCMIPP_FMT_HEIGHT_DEFAULT,
168 .pixelformat = V4L2_PIX_FMT_RGB565,
169 .field = V4L2_FIELD_NONE,
170 .bytesperline = DCMIPP_FMT_WIDTH_DEFAULT * 2,
171 .sizeimage = DCMIPP_FMT_WIDTH_DEFAULT * DCMIPP_FMT_HEIGHT_DEFAULT * 2,
172 .colorspace = DCMIPP_COLORSPACE_DEFAULT,
173 .ycbcr_enc = DCMIPP_YCBCR_ENC_DEFAULT,
174 .quantization = DCMIPP_QUANTIZATION_DEFAULT,
175 .xfer_func = DCMIPP_XFER_FUNC_DEFAULT,
176 };
177
dcmipp_bytecap_querycap(struct file * file,void * priv,struct v4l2_capability * cap)178 static int dcmipp_bytecap_querycap(struct file *file, void *priv,
179 struct v4l2_capability *cap)
180 {
181 strscpy(cap->driver, DCMIPP_PDEV_NAME, sizeof(cap->driver));
182 strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
183
184 return 0;
185 }
186
dcmipp_bytecap_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)187 static int dcmipp_bytecap_g_fmt_vid_cap(struct file *file, void *priv,
188 struct v4l2_format *f)
189 {
190 struct dcmipp_bytecap_device *vcap = video_drvdata(file);
191
192 f->fmt.pix = vcap->format;
193
194 return 0;
195 }
196
dcmipp_bytecap_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)197 static int dcmipp_bytecap_try_fmt_vid_cap(struct file *file, void *priv,
198 struct v4l2_format *f)
199 {
200 struct dcmipp_bytecap_device *vcap = video_drvdata(file);
201 struct v4l2_pix_format *format = &f->fmt.pix;
202 const struct dcmipp_bytecap_pix_map *vpix;
203 u32 in_w, in_h;
204
205 /* Don't accept a pixelformat that is not on the table */
206 vpix = dcmipp_bytecap_pix_map_by_pixelformat(format->pixelformat);
207 if (!vpix)
208 format->pixelformat = fmt_default.pixelformat;
209
210 /* Adjust width & height */
211 in_w = format->width;
212 in_h = format->height;
213 v4l_bound_align_image(&format->width, DCMIPP_FRAME_MIN_WIDTH,
214 DCMIPP_FRAME_MAX_WIDTH, 0, &format->height,
215 DCMIPP_FRAME_MIN_HEIGHT, DCMIPP_FRAME_MAX_HEIGHT,
216 0, 0);
217 if (format->width != in_w || format->height != in_h)
218 dev_dbg(vcap->dev, "resolution updated: %dx%d -> %dx%d\n",
219 in_w, in_h, format->width, format->height);
220
221 if (format->pixelformat == V4L2_PIX_FMT_JPEG) {
222 format->bytesperline = format->width;
223 format->sizeimage = format->bytesperline * format->height;
224 } else {
225 v4l2_fill_pixfmt(format, format->pixelformat,
226 format->width, format->height);
227 }
228
229 if (format->field == V4L2_FIELD_ANY)
230 format->field = fmt_default.field;
231
232 dcmipp_colorimetry_clamp(format);
233
234 return 0;
235 }
236
dcmipp_bytecap_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)237 static int dcmipp_bytecap_s_fmt_vid_cap(struct file *file, void *priv,
238 struct v4l2_format *f)
239 {
240 struct dcmipp_bytecap_device *vcap = video_drvdata(file);
241 int ret;
242
243 /* Do not change the format while stream is on */
244 if (vb2_is_busy(&vcap->queue))
245 return -EBUSY;
246
247 ret = dcmipp_bytecap_try_fmt_vid_cap(file, priv, f);
248 if (ret)
249 return ret;
250
251 dev_dbg(vcap->dev, "%s: format update: old:%ux%u (0x%p4cc, %u, %u, %u, %u) new:%ux%d (0x%p4cc, %u, %u, %u, %u)\n",
252 vcap->vdev.name,
253 /* old */
254 vcap->format.width, vcap->format.height,
255 &vcap->format.pixelformat, vcap->format.colorspace,
256 vcap->format.quantization, vcap->format.xfer_func,
257 vcap->format.ycbcr_enc,
258 /* new */
259 f->fmt.pix.width, f->fmt.pix.height,
260 &f->fmt.pix.pixelformat, f->fmt.pix.colorspace,
261 f->fmt.pix.quantization, f->fmt.pix.xfer_func,
262 f->fmt.pix.ycbcr_enc);
263
264 vcap->format = f->fmt.pix;
265
266 return 0;
267 }
268
dcmipp_bytecap_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)269 static int dcmipp_bytecap_enum_fmt_vid_cap(struct file *file, void *priv,
270 struct v4l2_fmtdesc *f)
271 {
272 const struct dcmipp_bytecap_pix_map *vpix;
273 unsigned int index = f->index;
274 unsigned int i, prev_pixelformat = 0;
275
276 /*
277 * List up all formats (or only ones matching f->mbus_code), taking
278 * care of removing duplicated entries (due to support of both
279 * parallel & csi 16 bits formats
280 */
281 for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
282 vpix = &dcmipp_bytecap_pix_map_list[i];
283 /* Skip formats not matching requested mbus code */
284 if (f->mbus_code && vpix->code != f->mbus_code)
285 continue;
286
287 /* Skip duplicated pixelformat */
288 if (vpix->pixelformat == prev_pixelformat)
289 continue;
290
291 prev_pixelformat = vpix->pixelformat;
292
293 if (index == 0)
294 break;
295
296 index--;
297 }
298
299 if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list))
300 return -EINVAL;
301
302 f->pixelformat = vpix->pixelformat;
303
304 return 0;
305 }
306
dcmipp_bytecap_enum_framesizes(struct file * file,void * fh,struct v4l2_frmsizeenum * fsize)307 static int dcmipp_bytecap_enum_framesizes(struct file *file, void *fh,
308 struct v4l2_frmsizeenum *fsize)
309 {
310 const struct dcmipp_bytecap_pix_map *vpix;
311
312 if (fsize->index)
313 return -EINVAL;
314
315 /* Only accept code in the pix map table */
316 vpix = dcmipp_bytecap_pix_map_by_pixelformat(fsize->pixel_format);
317 if (!vpix)
318 return -EINVAL;
319
320 fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
321 fsize->stepwise.min_width = DCMIPP_FRAME_MIN_WIDTH;
322 fsize->stepwise.max_width = DCMIPP_FRAME_MAX_WIDTH;
323 fsize->stepwise.min_height = DCMIPP_FRAME_MIN_HEIGHT;
324 fsize->stepwise.max_height = DCMIPP_FRAME_MAX_HEIGHT;
325 fsize->stepwise.step_width = 1;
326 fsize->stepwise.step_height = 1;
327
328 return 0;
329 }
330
331 static const struct v4l2_file_operations dcmipp_bytecap_fops = {
332 .owner = THIS_MODULE,
333 .open = v4l2_fh_open,
334 .release = vb2_fop_release,
335 .read = vb2_fop_read,
336 .poll = vb2_fop_poll,
337 .unlocked_ioctl = video_ioctl2,
338 .mmap = vb2_fop_mmap,
339 };
340
341 static const struct v4l2_ioctl_ops dcmipp_bytecap_ioctl_ops = {
342 .vidioc_querycap = dcmipp_bytecap_querycap,
343
344 .vidioc_g_fmt_vid_cap = dcmipp_bytecap_g_fmt_vid_cap,
345 .vidioc_s_fmt_vid_cap = dcmipp_bytecap_s_fmt_vid_cap,
346 .vidioc_try_fmt_vid_cap = dcmipp_bytecap_try_fmt_vid_cap,
347 .vidioc_enum_fmt_vid_cap = dcmipp_bytecap_enum_fmt_vid_cap,
348 .vidioc_enum_framesizes = dcmipp_bytecap_enum_framesizes,
349
350 .vidioc_reqbufs = vb2_ioctl_reqbufs,
351 .vidioc_create_bufs = vb2_ioctl_create_bufs,
352 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
353 .vidioc_querybuf = vb2_ioctl_querybuf,
354 .vidioc_qbuf = vb2_ioctl_qbuf,
355 .vidioc_dqbuf = vb2_ioctl_dqbuf,
356 .vidioc_expbuf = vb2_ioctl_expbuf,
357 .vidioc_streamon = vb2_ioctl_streamon,
358 .vidioc_streamoff = vb2_ioctl_streamoff,
359 };
360
dcmipp_start_capture(struct dcmipp_bytecap_device * vcap,struct dcmipp_buf * buf)361 static void dcmipp_start_capture(struct dcmipp_bytecap_device *vcap,
362 struct dcmipp_buf *buf)
363 {
364 /* Set buffer address */
365 reg_write(vcap, DCMIPP_P0PPM0AR1, buf->addr);
366
367 /* Set buffer size */
368 reg_write(vcap, DCMIPP_P0DCLMTR, DCMIPP_P0DCLMTR_ENABLE |
369 ((buf->size / 4) & DCMIPP_P0DCLMTR_LIMIT_MASK));
370
371 /* Capture request */
372 reg_set(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
373 }
374
dcmipp_bytecap_all_buffers_done(struct dcmipp_bytecap_device * vcap,enum vb2_buffer_state state)375 static void dcmipp_bytecap_all_buffers_done(struct dcmipp_bytecap_device *vcap,
376 enum vb2_buffer_state state)
377 {
378 struct dcmipp_buf *buf, *node;
379
380 list_for_each_entry_safe(buf, node, &vcap->buffers, list) {
381 list_del_init(&buf->list);
382 vb2_buffer_done(&buf->vb.vb2_buf, state);
383 }
384 }
385
dcmipp_bytecap_start_streaming(struct vb2_queue * vq,unsigned int count)386 static int dcmipp_bytecap_start_streaming(struct vb2_queue *vq,
387 unsigned int count)
388 {
389 struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
390 struct media_entity *entity = &vcap->vdev.entity;
391 struct dcmipp_buf *buf;
392 struct media_pad *pad;
393 int ret;
394
395 vcap->sequence = 0;
396 memset(&vcap->count, 0, sizeof(vcap->count));
397
398 /*
399 * Get source subdev - since link is IMMUTABLE, pointer is cached
400 * within the dcmipp_bytecap_device structure
401 */
402 if (!vcap->s_subdev) {
403 pad = media_pad_remote_pad_first(&vcap->vdev.entity.pads[0]);
404 if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
405 return -EINVAL;
406 vcap->s_subdev = media_entity_to_v4l2_subdev(pad->entity);
407 vcap->s_subdev_pad_nb = pad->index;
408 }
409
410 ret = pm_runtime_resume_and_get(vcap->dev);
411 if (ret < 0) {
412 dev_err(vcap->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
413 __func__, ret);
414 goto err_buffer_done;
415 }
416
417 ret = media_pipeline_start(entity->pads, &vcap->pipe);
418 if (ret) {
419 dev_dbg(vcap->dev, "%s: Failed to start streaming, media pipeline start error (%d)\n",
420 __func__, ret);
421 goto err_pm_put;
422 }
423
424 ret = v4l2_subdev_enable_streams(vcap->s_subdev,
425 vcap->s_subdev_pad_nb, BIT_ULL(0));
426 if (ret)
427 goto err_media_pipeline_stop;
428
429 spin_lock_irq(&vcap->irqlock);
430
431 /* Enable pipe at the end of programming */
432 reg_set(vcap, DCMIPP_P0FSCR, DCMIPP_P0FSCR_PIPEN);
433
434 /*
435 * vb2 framework guarantee that we have at least 'min_queued_buffers'
436 * buffers in the list at this moment
437 */
438 vcap->next = list_first_entry(&vcap->buffers, typeof(*buf), list);
439 dev_dbg(vcap->dev, "Start with next [%d] %p phy=%pad\n",
440 vcap->next->vb.vb2_buf.index, vcap->next, &vcap->next->addr);
441
442 dcmipp_start_capture(vcap, vcap->next);
443
444 /* Enable interruptions */
445 reg_set(vcap, DCMIPP_CMIER, DCMIPP_CMIER_P0ALL);
446
447 vcap->state = DCMIPP_RUNNING;
448
449 spin_unlock_irq(&vcap->irqlock);
450
451 return 0;
452
453 err_media_pipeline_stop:
454 media_pipeline_stop(entity->pads);
455 err_pm_put:
456 pm_runtime_put(vcap->dev);
457 err_buffer_done:
458 spin_lock_irq(&vcap->irqlock);
459 /*
460 * Return all buffers to vb2 in QUEUED state.
461 * This will give ownership back to userspace
462 */
463 dcmipp_bytecap_all_buffers_done(vcap, VB2_BUF_STATE_QUEUED);
464 vcap->active = NULL;
465 spin_unlock_irq(&vcap->irqlock);
466
467 return ret;
468 }
469
dcmipp_dump_status(struct dcmipp_bytecap_device * vcap)470 static void dcmipp_dump_status(struct dcmipp_bytecap_device *vcap)
471 {
472 struct device *dev = vcap->dev;
473
474 dev_dbg(dev, "[DCMIPP_PRSR] =%#10.8x\n", reg_read(vcap, DCMIPP_PRSR));
475 dev_dbg(dev, "[DCMIPP_P0SR] =%#10.8x\n", reg_read(vcap, DCMIPP_P0SR));
476 dev_dbg(dev, "[DCMIPP_P0DCCNTR]=%#10.8x\n",
477 reg_read(vcap, DCMIPP_P0DCCNTR));
478 dev_dbg(dev, "[DCMIPP_CMSR1] =%#10.8x\n", reg_read(vcap, DCMIPP_CMSR1));
479 dev_dbg(dev, "[DCMIPP_CMSR2] =%#10.8x\n", reg_read(vcap, DCMIPP_CMSR2));
480 }
481
482 /*
483 * Stop the stream engine. Any remaining buffers in the stream queue are
484 * dequeued and passed on to the vb2 framework marked as STATE_ERROR.
485 */
dcmipp_bytecap_stop_streaming(struct vb2_queue * vq)486 static void dcmipp_bytecap_stop_streaming(struct vb2_queue *vq)
487 {
488 struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
489 int ret;
490 u32 status;
491
492 ret = v4l2_subdev_disable_streams(vcap->s_subdev,
493 vcap->s_subdev_pad_nb, BIT_ULL(0));
494 if (ret)
495 dev_warn(vcap->dev, "Failed to disable stream\n");
496
497 /* Stop the media pipeline */
498 media_pipeline_stop(vcap->vdev.entity.pads);
499
500 /* Disable interruptions */
501 reg_clear(vcap, DCMIPP_CMIER, DCMIPP_CMIER_P0ALL);
502
503 /* Stop capture */
504 reg_clear(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
505
506 /* Wait until CPTACT become 0 */
507 ret = readl_relaxed_poll_timeout(vcap->regs + DCMIPP_P0SR, status,
508 !(status & DCMIPP_P0SR_CPTACT),
509 20 * USEC_PER_MSEC,
510 1000 * USEC_PER_MSEC);
511 if (ret)
512 dev_warn(vcap->dev, "Timeout when stopping\n");
513
514 /* Disable pipe */
515 reg_clear(vcap, DCMIPP_P0FSCR, DCMIPP_P0FSCR_PIPEN);
516
517 /* Clear any pending interrupts */
518 reg_write(vcap, DCMIPP_CMFCR, DCMIPP_CMIER_P0ALL);
519
520 spin_lock_irq(&vcap->irqlock);
521
522 /* Return all queued buffers to vb2 in ERROR state */
523 dcmipp_bytecap_all_buffers_done(vcap, VB2_BUF_STATE_ERROR);
524 INIT_LIST_HEAD(&vcap->buffers);
525
526 vcap->active = NULL;
527 vcap->state = DCMIPP_STOPPED;
528
529 spin_unlock_irq(&vcap->irqlock);
530
531 dcmipp_dump_status(vcap);
532
533 pm_runtime_put(vcap->dev);
534
535 if (vcap->count.errors)
536 dev_warn(vcap->dev, "Some errors found while streaming: errors=%d (overrun=%d, limit=%d, nactive=%d), underrun=%d, buffers=%d\n",
537 vcap->count.errors, vcap->count.overrun,
538 vcap->count.limit, vcap->count.nactive,
539 vcap->count.underrun, vcap->count.buffers);
540 }
541
dcmipp_bytecap_buf_prepare(struct vb2_buffer * vb)542 static int dcmipp_bytecap_buf_prepare(struct vb2_buffer *vb)
543 {
544 struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vb->vb2_queue);
545 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
546 struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
547 unsigned long size;
548
549 size = vcap->format.sizeimage;
550
551 if (vb2_plane_size(vb, 0) < size) {
552 dev_err(vcap->dev, "%s data will not fit into plane (%lu < %lu)\n",
553 __func__, vb2_plane_size(vb, 0), size);
554 return -EINVAL;
555 }
556
557 vb2_set_plane_payload(vb, 0, size);
558
559 if (!buf->prepared) {
560 /* Get memory addresses */
561 buf->addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
562 buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0);
563 buf->prepared = true;
564
565 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
566
567 dev_dbg(vcap->dev, "Setup [%d] phy=%pad size=%zu\n",
568 vb->index, &buf->addr, buf->size);
569 }
570
571 return 0;
572 }
573
dcmipp_bytecap_buf_queue(struct vb2_buffer * vb2_buf)574 static void dcmipp_bytecap_buf_queue(struct vb2_buffer *vb2_buf)
575 {
576 struct dcmipp_bytecap_device *vcap =
577 vb2_get_drv_priv(vb2_buf->vb2_queue);
578 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2_buf);
579 struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
580
581 dev_dbg(vcap->dev, "Queue [%d] %p phy=%pad\n", buf->vb.vb2_buf.index,
582 buf, &buf->addr);
583
584 spin_lock_irq(&vcap->irqlock);
585 list_add_tail(&buf->list, &vcap->buffers);
586
587 if (vcap->state == DCMIPP_WAIT_FOR_BUFFER) {
588 vcap->next = buf;
589 dev_dbg(vcap->dev, "Restart with next [%d] %p phy=%pad\n",
590 buf->vb.vb2_buf.index, buf, &buf->addr);
591
592 dcmipp_start_capture(vcap, buf);
593
594 vcap->state = DCMIPP_RUNNING;
595 }
596
597 spin_unlock_irq(&vcap->irqlock);
598 }
599
dcmipp_bytecap_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])600 static int dcmipp_bytecap_queue_setup(struct vb2_queue *vq,
601 unsigned int *nbuffers,
602 unsigned int *nplanes,
603 unsigned int sizes[],
604 struct device *alloc_devs[])
605 {
606 struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
607 unsigned int size;
608
609 size = vcap->format.sizeimage;
610
611 /* Make sure the image size is large enough */
612 if (*nplanes)
613 return sizes[0] < vcap->format.sizeimage ? -EINVAL : 0;
614
615 *nplanes = 1;
616 sizes[0] = vcap->format.sizeimage;
617
618 dev_dbg(vcap->dev, "Setup queue, count=%d, size=%d\n",
619 *nbuffers, size);
620
621 return 0;
622 }
623
dcmipp_bytecap_buf_init(struct vb2_buffer * vb)624 static int dcmipp_bytecap_buf_init(struct vb2_buffer *vb)
625 {
626 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
627 struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
628
629 INIT_LIST_HEAD(&buf->list);
630
631 return 0;
632 }
633
634 static const struct vb2_ops dcmipp_bytecap_qops = {
635 .start_streaming = dcmipp_bytecap_start_streaming,
636 .stop_streaming = dcmipp_bytecap_stop_streaming,
637 .buf_init = dcmipp_bytecap_buf_init,
638 .buf_prepare = dcmipp_bytecap_buf_prepare,
639 .buf_queue = dcmipp_bytecap_buf_queue,
640 .queue_setup = dcmipp_bytecap_queue_setup,
641 };
642
dcmipp_bytecap_release(struct video_device * vdev)643 static void dcmipp_bytecap_release(struct video_device *vdev)
644 {
645 struct dcmipp_bytecap_device *vcap =
646 container_of(vdev, struct dcmipp_bytecap_device, vdev);
647
648 dcmipp_pads_cleanup(vcap->ved.pads);
649 mutex_destroy(&vcap->lock);
650
651 kfree(vcap);
652 }
653
dcmipp_bytecap_ent_release(struct dcmipp_ent_device * ved)654 void dcmipp_bytecap_ent_release(struct dcmipp_ent_device *ved)
655 {
656 struct dcmipp_bytecap_device *vcap =
657 container_of(ved, struct dcmipp_bytecap_device, ved);
658
659 media_entity_cleanup(ved->ent);
660 vb2_video_unregister_device(&vcap->vdev);
661 }
662
dcmipp_buffer_done(struct dcmipp_bytecap_device * vcap,struct dcmipp_buf * buf,size_t bytesused,int err)663 static void dcmipp_buffer_done(struct dcmipp_bytecap_device *vcap,
664 struct dcmipp_buf *buf,
665 size_t bytesused,
666 int err)
667 {
668 struct vb2_v4l2_buffer *vbuf;
669
670 list_del_init(&buf->list);
671
672 vbuf = &buf->vb;
673
674 vbuf->sequence = vcap->sequence++;
675 vbuf->field = V4L2_FIELD_NONE;
676 vbuf->vb2_buf.timestamp = ktime_get_ns();
677 vb2_set_plane_payload(&vbuf->vb2_buf, 0, bytesused);
678 vb2_buffer_done(&vbuf->vb2_buf,
679 err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
680 dev_dbg(vcap->dev, "Done [%d] %p phy=%pad\n", buf->vb.vb2_buf.index,
681 buf, &buf->addr);
682 vcap->count.buffers++;
683 }
684
685 /* irqlock must be held */
686 static void
dcmipp_bytecap_set_next_frame_or_stop(struct dcmipp_bytecap_device * vcap)687 dcmipp_bytecap_set_next_frame_or_stop(struct dcmipp_bytecap_device *vcap)
688 {
689 if (!vcap->next && list_is_singular(&vcap->buffers)) {
690 /*
691 * If there is no available buffer (none or a single one in the
692 * list while two are expected), stop the capture (effective
693 * for next frame). On-going frame capture will continue until
694 * FRAME END but no further capture will be done.
695 */
696 reg_clear(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
697
698 dev_dbg(vcap->dev, "Capture restart is deferred to next buffer queueing\n");
699 vcap->next = NULL;
700 vcap->state = DCMIPP_WAIT_FOR_BUFFER;
701 return;
702 }
703
704 /* If we don't have buffer yet, pick the one after active */
705 if (!vcap->next)
706 vcap->next = list_next_entry(vcap->active, list);
707
708 /*
709 * Set buffer address
710 * This register is shadowed and will be taken into
711 * account on next VSYNC (start of next frame)
712 */
713 reg_write(vcap, DCMIPP_P0PPM0AR1, vcap->next->addr);
714 dev_dbg(vcap->dev, "Write [%d] %p phy=%pad\n",
715 vcap->next->vb.vb2_buf.index, vcap->next, &vcap->next->addr);
716 }
717
718 /* irqlock must be held */
dcmipp_bytecap_process_frame(struct dcmipp_bytecap_device * vcap,size_t bytesused)719 static void dcmipp_bytecap_process_frame(struct dcmipp_bytecap_device *vcap,
720 size_t bytesused)
721 {
722 int err = 0;
723 struct dcmipp_buf *buf = vcap->active;
724
725 if (!buf) {
726 vcap->count.nactive++;
727 vcap->count.errors++;
728 return;
729 }
730
731 if (bytesused > buf->size) {
732 dev_dbg(vcap->dev, "frame larger than expected (%zu > %zu)\n",
733 bytesused, buf->size);
734 /* Clip to buffer size and return buffer to V4L2 in error */
735 bytesused = buf->size;
736 vcap->count.limit++;
737 vcap->count.errors++;
738 err = -EOVERFLOW;
739 }
740
741 dcmipp_buffer_done(vcap, buf, bytesused, err);
742 vcap->active = NULL;
743 }
744
dcmipp_bytecap_irq_thread(int irq,void * arg)745 static irqreturn_t dcmipp_bytecap_irq_thread(int irq, void *arg)
746 {
747 struct dcmipp_bytecap_device *vcap =
748 container_of(arg, struct dcmipp_bytecap_device, ved);
749 size_t bytesused = 0;
750
751 spin_lock_irq(&vcap->irqlock);
752
753 /*
754 * If we have an overrun, a frame-end will probably not be generated,
755 * in that case the active buffer will be recycled as next buffer by
756 * the VSYNC handler
757 */
758 if (vcap->cmsr2 & DCMIPP_CMSR2_P0OVRF) {
759 vcap->count.errors++;
760 vcap->count.overrun++;
761 }
762
763 if (vcap->cmsr2 & DCMIPP_CMSR2_P0FRAMEF) {
764 vcap->count.frame++;
765
766 /* Read captured buffer size */
767 bytesused = reg_read(vcap, DCMIPP_P0DCCNTR);
768 dcmipp_bytecap_process_frame(vcap, bytesused);
769 }
770
771 if (vcap->cmsr2 & DCMIPP_CMSR2_P0VSYNCF) {
772 vcap->count.vsync++;
773 if (vcap->state == DCMIPP_WAIT_FOR_BUFFER) {
774 vcap->count.underrun++;
775 goto out;
776 }
777
778 /*
779 * On VSYNC, the previously set next buffer is going to become
780 * active thanks to the shadowing mechanism of the DCMIPP. In
781 * most of the cases, since a FRAMEEND has already come,
782 * pointer next is NULL since active is reset during the
783 * FRAMEEND handling. However, in case of framerate adjustment,
784 * there are more VSYNC than FRAMEEND. Thus we recycle the
785 * active (but not used) buffer and put it back into next.
786 */
787 swap(vcap->active, vcap->next);
788 dcmipp_bytecap_set_next_frame_or_stop(vcap);
789 }
790
791 out:
792 spin_unlock_irq(&vcap->irqlock);
793 return IRQ_HANDLED;
794 }
795
dcmipp_bytecap_irq_callback(int irq,void * arg)796 static irqreturn_t dcmipp_bytecap_irq_callback(int irq, void *arg)
797 {
798 struct dcmipp_bytecap_device *vcap =
799 container_of(arg, struct dcmipp_bytecap_device, ved);
800
801 /* Store interrupt status register */
802 vcap->cmsr2 = reg_read(vcap, DCMIPP_CMSR2) & DCMIPP_CMIER_P0ALL;
803 vcap->count.it++;
804
805 /* Clear interrupt */
806 reg_write(vcap, DCMIPP_CMFCR, vcap->cmsr2);
807
808 return IRQ_WAKE_THREAD;
809 }
810
dcmipp_bytecap_link_validate(struct media_link * link)811 static int dcmipp_bytecap_link_validate(struct media_link *link)
812 {
813 struct media_entity *entity = link->sink->entity;
814 struct video_device *vd = media_entity_to_video_device(entity);
815 struct dcmipp_bytecap_device *vcap = container_of(vd,
816 struct dcmipp_bytecap_device, vdev);
817 struct v4l2_subdev *source_sd =
818 media_entity_to_v4l2_subdev(link->source->entity);
819 struct v4l2_subdev_format source_fmt = {
820 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
821 .pad = link->source->index,
822 };
823 int ret, i;
824
825 ret = v4l2_subdev_call(source_sd, pad, get_fmt, NULL, &source_fmt);
826 if (ret < 0)
827 return 0;
828
829 if (source_fmt.format.width != vcap->format.width ||
830 source_fmt.format.height != vcap->format.height) {
831 dev_err(vcap->dev, "Wrong width or height %ux%u (%ux%u expected)\n",
832 vcap->format.width, vcap->format.height,
833 source_fmt.format.width, source_fmt.format.height);
834 return -EINVAL;
835 }
836
837 for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
838 if (dcmipp_bytecap_pix_map_list[i].pixelformat ==
839 vcap->format.pixelformat &&
840 dcmipp_bytecap_pix_map_list[i].code ==
841 source_fmt.format.code)
842 break;
843 }
844
845 if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list)) {
846 dev_err(vcap->dev, "mbus code 0x%x do not match capture device format (0x%x)\n",
847 vcap->format.pixelformat, source_fmt.format.code);
848 return -EINVAL;
849 }
850
851 return 0;
852 }
853
854 static const struct media_entity_operations dcmipp_bytecap_entity_ops = {
855 .link_validate = dcmipp_bytecap_link_validate,
856 };
857
dcmipp_bytecap_ent_init(struct device * dev,const char * entity_name,struct v4l2_device * v4l2_dev,void __iomem * regs)858 struct dcmipp_ent_device *dcmipp_bytecap_ent_init(struct device *dev,
859 const char *entity_name,
860 struct v4l2_device *v4l2_dev,
861 void __iomem *regs)
862 {
863 struct dcmipp_bytecap_device *vcap;
864 struct video_device *vdev;
865 struct vb2_queue *q;
866 const unsigned long pad_flag = MEDIA_PAD_FL_SINK;
867 int ret = 0;
868
869 /* Allocate the dcmipp_bytecap_device struct */
870 vcap = kzalloc_obj(*vcap);
871 if (!vcap)
872 return ERR_PTR(-ENOMEM);
873
874 /* Allocate the pads */
875 vcap->ved.pads = dcmipp_pads_init(1, &pad_flag);
876 if (IS_ERR(vcap->ved.pads)) {
877 ret = PTR_ERR(vcap->ved.pads);
878 goto err_free_vcap;
879 }
880
881 /* Initialize the media entity */
882 vcap->vdev.entity.name = entity_name;
883 vcap->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
884 vcap->vdev.entity.ops = &dcmipp_bytecap_entity_ops;
885 ret = media_entity_pads_init(&vcap->vdev.entity, 1, vcap->ved.pads);
886 if (ret)
887 goto err_clean_pads;
888
889 /* Initialize the lock */
890 mutex_init(&vcap->lock);
891
892 /* Initialize the vb2 queue */
893 q = &vcap->queue;
894 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
895 q->io_modes = VB2_MMAP | VB2_DMABUF;
896 q->lock = &vcap->lock;
897 q->drv_priv = vcap;
898 q->buf_struct_size = sizeof(struct dcmipp_buf);
899 q->ops = &dcmipp_bytecap_qops;
900 q->mem_ops = &vb2_dma_contig_memops;
901 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
902 q->min_queued_buffers = 1;
903 q->dev = dev;
904
905 /* DCMIPP requires 16 bytes aligned buffers */
906 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
907 if (ret) {
908 dev_err(dev, "Failed to set DMA mask\n");
909 goto err_mutex_destroy;
910 }
911
912 ret = vb2_queue_init(q);
913 if (ret) {
914 dev_err(dev, "%s: vb2 queue init failed (err=%d)\n",
915 entity_name, ret);
916 goto err_clean_m_ent;
917 }
918
919 /* Initialize buffer list and its lock */
920 INIT_LIST_HEAD(&vcap->buffers);
921 spin_lock_init(&vcap->irqlock);
922
923 /* Set default frame format */
924 vcap->format = fmt_default;
925
926 /* Fill the dcmipp_ent_device struct */
927 vcap->ved.ent = &vcap->vdev.entity;
928 vcap->ved.handler = dcmipp_bytecap_irq_callback;
929 vcap->ved.thread_fn = dcmipp_bytecap_irq_thread;
930 vcap->dev = dev;
931 vcap->regs = regs;
932
933 /* Initialize the video_device struct */
934 vdev = &vcap->vdev;
935 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
936 V4L2_CAP_IO_MC;
937 vdev->release = dcmipp_bytecap_release;
938 vdev->fops = &dcmipp_bytecap_fops;
939 vdev->ioctl_ops = &dcmipp_bytecap_ioctl_ops;
940 vdev->lock = &vcap->lock;
941 vdev->queue = q;
942 vdev->v4l2_dev = v4l2_dev;
943 strscpy(vdev->name, entity_name, sizeof(vdev->name));
944 video_set_drvdata(vdev, &vcap->ved);
945
946 /* Register the video_device with the v4l2 and the media framework */
947 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
948 if (ret) {
949 dev_err(dev, "%s: video register failed (err=%d)\n",
950 vcap->vdev.name, ret);
951 goto err_clean_m_ent;
952 }
953
954 return &vcap->ved;
955
956 err_clean_m_ent:
957 media_entity_cleanup(&vcap->vdev.entity);
958 err_mutex_destroy:
959 mutex_destroy(&vcap->lock);
960 err_clean_pads:
961 dcmipp_pads_cleanup(vcap->ved.pads);
962 err_free_vcap:
963 kfree(vcap);
964
965 return ERR_PTR(ret);
966 }
967