1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Driver for Renesas RZ/G2L CRU
4 *
5 * Copyright (C) 2022 Renesas Electronics Corp.
6 *
7 * Based on Renesas R-Car VIN
8 * Copyright (C) 2016 Renesas Electronics Corp.
9 * Copyright (C) 2011-2013 Renesas Solutions Corp.
10 * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
11 * Copyright (C) 2008 Magnus Damm
12 */
13
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/pm_runtime.h>
17
18 #include <media/mipi-csi2.h>
19 #include <media/v4l2-ioctl.h>
20 #include <media/videobuf2-dma-contig.h>
21
22 #include "rzg2l-cru.h"
23 #include "rzg2l-cru-regs.h"
24
25 #define RZG2L_TIMEOUT_MS 100
26 #define RZG2L_RETRIES 10
27
28 #define RZG2L_CRU_DEFAULT_FORMAT V4L2_PIX_FMT_UYVY
29 #define RZG2L_CRU_DEFAULT_WIDTH RZG2L_CRU_MIN_INPUT_WIDTH
30 #define RZG2L_CRU_DEFAULT_HEIGHT RZG2L_CRU_MIN_INPUT_HEIGHT
31 #define RZG2L_CRU_DEFAULT_FIELD V4L2_FIELD_NONE
32 #define RZG2L_CRU_DEFAULT_COLORSPACE V4L2_COLORSPACE_SRGB
33
34 #define RZG2L_CRU_STRIDE_MAX 32640
35 #define RZG2L_CRU_STRIDE_ALIGN 128
36
37 struct rzg2l_cru_buffer {
38 struct vb2_v4l2_buffer vb;
39 struct list_head list;
40 };
41
42 #define to_buf_list(vb2_buffer) \
43 (&container_of(vb2_buffer, struct rzg2l_cru_buffer, vb)->list)
44
45 /* -----------------------------------------------------------------------------
46 * DMA operations
47 */
__rzg2l_cru_write(struct rzg2l_cru_dev * cru,u32 offset,u32 value)48 static void __rzg2l_cru_write(struct rzg2l_cru_dev *cru, u32 offset, u32 value)
49 {
50 const u16 *regs = cru->info->regs;
51
52 /*
53 * CRUnCTRL is a first register on all CRU supported SoCs so validate
54 * rest of the registers have valid offset being set in cru->info->regs.
55 */
56 if (WARN_ON(offset >= RZG2L_CRU_MAX_REG) ||
57 WARN_ON(offset != CRUnCTRL && regs[offset] == 0))
58 return;
59
60 iowrite32(value, cru->base + regs[offset]);
61 }
62
__rzg2l_cru_read(struct rzg2l_cru_dev * cru,u32 offset)63 static u32 __rzg2l_cru_read(struct rzg2l_cru_dev *cru, u32 offset)
64 {
65 const u16 *regs = cru->info->regs;
66
67 /*
68 * CRUnCTRL is a first register on all CRU supported SoCs so validate
69 * rest of the registers have valid offset being set in cru->info->regs.
70 */
71 if (WARN_ON(offset >= RZG2L_CRU_MAX_REG) ||
72 WARN_ON(offset != CRUnCTRL && regs[offset] == 0))
73 return 0;
74
75 return ioread32(cru->base + regs[offset]);
76 }
77
78 static __always_inline void
__rzg2l_cru_write_constant(struct rzg2l_cru_dev * cru,u32 offset,u32 value)79 __rzg2l_cru_write_constant(struct rzg2l_cru_dev *cru, u32 offset, u32 value)
80 {
81 const u16 *regs = cru->info->regs;
82
83 BUILD_BUG_ON(offset >= RZG2L_CRU_MAX_REG);
84
85 iowrite32(value, cru->base + regs[offset]);
86 }
87
88 static __always_inline u32
__rzg2l_cru_read_constant(struct rzg2l_cru_dev * cru,u32 offset)89 __rzg2l_cru_read_constant(struct rzg2l_cru_dev *cru, u32 offset)
90 {
91 const u16 *regs = cru->info->regs;
92
93 BUILD_BUG_ON(offset >= RZG2L_CRU_MAX_REG);
94
95 return ioread32(cru->base + regs[offset]);
96 }
97
98 #define rzg2l_cru_write(cru, offset, value) \
99 (__builtin_constant_p(offset) ? \
100 __rzg2l_cru_write_constant(cru, offset, value) : \
101 __rzg2l_cru_write(cru, offset, value))
102
103 #define rzg2l_cru_read(cru, offset) \
104 (__builtin_constant_p(offset) ? \
105 __rzg2l_cru_read_constant(cru, offset) : \
106 __rzg2l_cru_read(cru, offset))
107
108 /* Need to hold qlock before calling */
return_unused_buffers(struct rzg2l_cru_dev * cru,enum vb2_buffer_state state)109 static void return_unused_buffers(struct rzg2l_cru_dev *cru,
110 enum vb2_buffer_state state)
111 {
112 struct rzg2l_cru_buffer *buf, *node;
113 unsigned long flags;
114 unsigned int i;
115
116 spin_lock_irqsave(&cru->qlock, flags);
117 for (i = 0; i < cru->num_buf; i++) {
118 if (cru->queue_buf[i]) {
119 vb2_buffer_done(&cru->queue_buf[i]->vb2_buf,
120 state);
121 cru->queue_buf[i] = NULL;
122 }
123 }
124
125 list_for_each_entry_safe(buf, node, &cru->buf_list, list) {
126 vb2_buffer_done(&buf->vb.vb2_buf, state);
127 list_del(&buf->list);
128 }
129 spin_unlock_irqrestore(&cru->qlock, flags);
130 }
131
rzg2l_cru_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])132 static int rzg2l_cru_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
133 unsigned int *nplanes, unsigned int sizes[],
134 struct device *alloc_devs[])
135 {
136 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
137
138 /* Make sure the image size is large enough. */
139 if (*nplanes)
140 return sizes[0] < cru->format.sizeimage ? -EINVAL : 0;
141
142 *nplanes = 1;
143 sizes[0] = cru->format.sizeimage;
144
145 return 0;
146 };
147
rzg2l_cru_buffer_prepare(struct vb2_buffer * vb)148 static int rzg2l_cru_buffer_prepare(struct vb2_buffer *vb)
149 {
150 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vb->vb2_queue);
151 unsigned long size = cru->format.sizeimage;
152
153 if (vb2_plane_size(vb, 0) < size) {
154 dev_err(cru->dev, "buffer too small (%lu < %lu)\n",
155 vb2_plane_size(vb, 0), size);
156 return -EINVAL;
157 }
158
159 vb2_set_plane_payload(vb, 0, size);
160
161 return 0;
162 }
163
rzg2l_cru_buffer_queue(struct vb2_buffer * vb)164 static void rzg2l_cru_buffer_queue(struct vb2_buffer *vb)
165 {
166 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
167 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vb->vb2_queue);
168 unsigned long flags;
169
170 spin_lock_irqsave(&cru->qlock, flags);
171
172 list_add_tail(to_buf_list(vbuf), &cru->buf_list);
173
174 spin_unlock_irqrestore(&cru->qlock, flags);
175 }
176
rzg2l_cru_set_slot_addr(struct rzg2l_cru_dev * cru,int slot,dma_addr_t addr)177 static void rzg2l_cru_set_slot_addr(struct rzg2l_cru_dev *cru,
178 int slot, dma_addr_t addr)
179 {
180 /*
181 * The address needs to be 512 bytes aligned. Driver should never accept
182 * settings that do not satisfy this in the first place...
183 */
184 if (WARN_ON((addr) & RZG2L_CRU_HW_BUFFER_MASK))
185 return;
186
187 /* Currently, we just use the buffer in 32 bits address */
188 rzg2l_cru_write(cru, AMnMBxADDRL(slot), addr);
189 rzg2l_cru_write(cru, AMnMBxADDRH(slot), 0);
190
191 cru->buf_addr[slot] = addr;
192 }
193
194 /*
195 * Moves a buffer from the queue to the HW slot. If no buffer is
196 * available use the scratch buffer. The scratch buffer is never
197 * returned to userspace, its only function is to enable the capture
198 * loop to keep running.
199 */
rzg2l_cru_fill_hw_slot(struct rzg2l_cru_dev * cru,int slot)200 static void rzg2l_cru_fill_hw_slot(struct rzg2l_cru_dev *cru, int slot)
201 {
202 struct vb2_v4l2_buffer *vbuf;
203 struct rzg2l_cru_buffer *buf;
204 dma_addr_t phys_addr;
205
206 /* A already populated slot shall never be overwritten. */
207 if (WARN_ON(cru->queue_buf[slot]))
208 return;
209
210 dev_dbg(cru->dev, "Filling HW slot: %d\n", slot);
211
212 if (list_empty(&cru->buf_list)) {
213 cru->queue_buf[slot] = NULL;
214 phys_addr = cru->scratch_phys;
215 } else {
216 /* Keep track of buffer we give to HW */
217 buf = list_entry(cru->buf_list.next,
218 struct rzg2l_cru_buffer, list);
219 vbuf = &buf->vb;
220 list_del_init(to_buf_list(vbuf));
221 cru->queue_buf[slot] = vbuf;
222
223 /* Setup DMA */
224 phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
225 }
226
227 rzg2l_cru_set_slot_addr(cru, slot, phys_addr);
228 }
229
rzg2l_cru_initialize_axi(struct rzg2l_cru_dev * cru)230 static void rzg2l_cru_initialize_axi(struct rzg2l_cru_dev *cru)
231 {
232 const struct rzg2l_cru_info *info = cru->info;
233 unsigned int slot;
234 u32 amnaxiattr;
235
236 /*
237 * Set image data memory banks.
238 * Currently, we will use maximum address.
239 */
240 rzg2l_cru_write(cru, AMnMBVALID, AMnMBVALID_MBVALID(cru->num_buf - 1));
241
242 for (slot = 0; slot < cru->num_buf; slot++)
243 rzg2l_cru_fill_hw_slot(cru, slot);
244
245 if (info->has_stride) {
246 u32 stride = cru->format.bytesperline;
247 u32 amnis;
248
249 stride /= RZG2L_CRU_STRIDE_ALIGN;
250 amnis = rzg2l_cru_read(cru, AMnIS) & ~AMnIS_IS_MASK;
251 rzg2l_cru_write(cru, AMnIS, amnis | AMnIS_IS(stride));
252 }
253
254 /* Set AXI burst max length to recommended setting */
255 amnaxiattr = rzg2l_cru_read(cru, AMnAXIATTR) & ~AMnAXIATTR_AXILEN_MASK;
256 amnaxiattr |= AMnAXIATTR_AXILEN;
257 rzg2l_cru_write(cru, AMnAXIATTR, amnaxiattr);
258 }
259
rzg3e_cru_csi2_setup(struct rzg2l_cru_dev * cru,const struct rzg2l_cru_ip_format * ip_fmt,u8 csi_vc)260 void rzg3e_cru_csi2_setup(struct rzg2l_cru_dev *cru,
261 const struct rzg2l_cru_ip_format *ip_fmt,
262 u8 csi_vc)
263 {
264 const struct rzg2l_cru_info *info = cru->info;
265 u32 icnmc = ICnMC_INF(ip_fmt->datatype);
266
267 icnmc |= rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_INF_MASK;
268
269 /* Set virtual channel CSI2 */
270 icnmc |= ICnMC_VCSEL(csi_vc);
271
272 rzg2l_cru_write(cru, ICnSVCNUM, csi_vc);
273 rzg2l_cru_write(cru, ICnSVC, ICnSVC_SVC0(0) | ICnSVC_SVC1(1) |
274 ICnSVC_SVC2(2) | ICnSVC_SVC3(3));
275 rzg2l_cru_write(cru, info->image_conv, icnmc);
276 }
277
rzg2l_cru_csi2_setup(struct rzg2l_cru_dev * cru,const struct rzg2l_cru_ip_format * ip_fmt,u8 csi_vc)278 void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
279 const struct rzg2l_cru_ip_format *ip_fmt,
280 u8 csi_vc)
281 {
282 const struct rzg2l_cru_info *info = cru->info;
283 u32 icnmc = ICnMC_INF(ip_fmt->datatype);
284
285 icnmc |= rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_INF_MASK;
286
287 /* Set virtual channel CSI2 */
288 icnmc |= ICnMC_VCSEL(csi_vc);
289
290 rzg2l_cru_write(cru, info->image_conv, icnmc);
291 }
292
rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev * cru,struct v4l2_mbus_framefmt * ip_sd_fmt,u8 csi_vc)293 static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru,
294 struct v4l2_mbus_framefmt *ip_sd_fmt,
295 u8 csi_vc)
296 {
297 const struct rzg2l_cru_info *info = cru->info;
298 const struct rzg2l_cru_ip_format *cru_video_fmt;
299 const struct rzg2l_cru_ip_format *cru_ip_fmt;
300
301 cru_ip_fmt = rzg2l_cru_ip_code_to_fmt(ip_sd_fmt->code);
302 info->csi_setup(cru, cru_ip_fmt, csi_vc);
303
304 /* Output format */
305 cru_video_fmt = rzg2l_cru_ip_format_to_fmt(cru->format.pixelformat);
306 if (!cru_video_fmt) {
307 dev_err(cru->dev, "Invalid pixelformat (0x%x)\n",
308 cru->format.pixelformat);
309 return -EINVAL;
310 }
311
312 /* If input and output use same colorspace, do bypass mode */
313 if (cru_ip_fmt->yuv == cru_video_fmt->yuv)
314 rzg2l_cru_write(cru, info->image_conv,
315 rzg2l_cru_read(cru, info->image_conv) | ICnMC_CSCTHR);
316 else
317 rzg2l_cru_write(cru, info->image_conv,
318 rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_CSCTHR);
319
320 /* Set output data format */
321 rzg2l_cru_write(cru, ICnDMR, cru_video_fmt->icndmr);
322
323 return 0;
324 }
325
rzg3e_fifo_empty(struct rzg2l_cru_dev * cru)326 bool rzg3e_fifo_empty(struct rzg2l_cru_dev *cru)
327 {
328 u32 amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR);
329
330 if ((((amnfifopntr & AMnFIFOPNTR_FIFORPNTR_B1) >> 24) ==
331 ((amnfifopntr & AMnFIFOPNTR_FIFOWPNTR_B1) >> 8)) &&
332 (((amnfifopntr & AMnFIFOPNTR_FIFORPNTR_B0) >> 16) ==
333 (amnfifopntr & AMnFIFOPNTR_FIFOWPNTR_B0)))
334 return true;
335
336 return false;
337 }
338
rzg2l_fifo_empty(struct rzg2l_cru_dev * cru)339 bool rzg2l_fifo_empty(struct rzg2l_cru_dev *cru)
340 {
341 u32 amnfifopntr, amnfifopntr_w, amnfifopntr_r_y;
342
343 amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR);
344
345 amnfifopntr_w = amnfifopntr & AMnFIFOPNTR_FIFOWPNTR;
346 amnfifopntr_r_y =
347 (amnfifopntr & AMnFIFOPNTR_FIFORPNTR_Y) >> 16;
348
349 return amnfifopntr_w == amnfifopntr_r_y;
350 }
351
rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev * cru)352 void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru)
353 {
354 unsigned int retries = 0;
355 unsigned long flags;
356 u32 icnms;
357
358 spin_lock_irqsave(&cru->qlock, flags);
359
360 /* Disable and clear the interrupt */
361 cru->info->disable_interrupts(cru);
362
363 /* Stop the operation of image conversion */
364 rzg2l_cru_write(cru, ICnEN, 0);
365
366 /* Wait for streaming to stop */
367 while ((rzg2l_cru_read(cru, ICnMS) & ICnMS_IA) && retries++ < RZG2L_RETRIES) {
368 spin_unlock_irqrestore(&cru->qlock, flags);
369 msleep(RZG2L_TIMEOUT_MS);
370 spin_lock_irqsave(&cru->qlock, flags);
371 }
372
373 icnms = rzg2l_cru_read(cru, ICnMS) & ICnMS_IA;
374 if (icnms)
375 dev_err(cru->dev, "Failed stop HW, something is seriously broken\n");
376
377 cru->state = RZG2L_CRU_DMA_STOPPED;
378
379 /* Wait until the FIFO becomes empty */
380 for (retries = 5; retries > 0; retries--) {
381 if (cru->info->fifo_empty(cru))
382 break;
383
384 usleep_range(10, 20);
385 }
386
387 /* Notify that FIFO is not empty here */
388 if (!retries)
389 dev_err(cru->dev, "Failed to empty FIFO\n");
390
391 /* Stop AXI bus */
392 rzg2l_cru_write(cru, AMnAXISTP, AMnAXISTP_AXI_STOP);
393
394 /* Wait until the AXI bus stop */
395 for (retries = 5; retries > 0; retries--) {
396 if (rzg2l_cru_read(cru, AMnAXISTPACK) &
397 AMnAXISTPACK_AXI_STOP_ACK)
398 break;
399
400 usleep_range(10, 20);
401 }
402
403 /* Notify that AXI bus can not stop here */
404 if (!retries)
405 dev_err(cru->dev, "Failed to stop AXI bus\n");
406
407 /* Cancel the AXI bus stop request */
408 rzg2l_cru_write(cru, AMnAXISTP, 0);
409
410 /* Reset the CRU (AXI-master) */
411 reset_control_assert(cru->aresetn);
412
413 /* Resets the image processing module */
414 rzg2l_cru_write(cru, CRUnRST, 0);
415
416 spin_unlock_irqrestore(&cru->qlock, flags);
417 }
418
rzg2l_cru_get_virtual_channel(struct rzg2l_cru_dev * cru)419 static int rzg2l_cru_get_virtual_channel(struct rzg2l_cru_dev *cru)
420 {
421 struct v4l2_mbus_frame_desc fd = { };
422 struct media_pad *remote_pad;
423 int ret;
424
425 remote_pad = media_pad_remote_pad_unique(&cru->ip.pads[RZG2L_CRU_IP_SINK]);
426 ret = v4l2_subdev_call(cru->ip.remote, pad, get_frame_desc, remote_pad->index, &fd);
427 if (ret < 0 && ret != -ENOIOCTLCMD) {
428 dev_err(cru->dev, "get_frame_desc failed on IP remote subdev\n");
429 return ret;
430 }
431 /* If remote subdev does not implement .get_frame_desc default to VC0. */
432 if (ret == -ENOIOCTLCMD)
433 return 0;
434
435 if (fd.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
436 dev_err(cru->dev, "get_frame_desc returned invalid bus type %d\n", fd.type);
437 return -EINVAL;
438 }
439
440 if (!fd.num_entries) {
441 dev_err(cru->dev, "get_frame_desc returned zero entries\n");
442 return -EINVAL;
443 }
444
445 return fd.entry[0].bus.csi2.vc;
446 }
447
rzg3e_cru_enable_interrupts(struct rzg2l_cru_dev * cru)448 void rzg3e_cru_enable_interrupts(struct rzg2l_cru_dev *cru)
449 {
450 rzg2l_cru_write(cru, CRUnIE2, CRUnIE2_FSxE(cru->svc_channel));
451 rzg2l_cru_write(cru, CRUnIE2, CRUnIE2_FExE(cru->svc_channel));
452 }
453
rzg3e_cru_disable_interrupts(struct rzg2l_cru_dev * cru)454 void rzg3e_cru_disable_interrupts(struct rzg2l_cru_dev *cru)
455 {
456 rzg2l_cru_write(cru, CRUnIE, 0);
457 rzg2l_cru_write(cru, CRUnIE2, 0);
458 rzg2l_cru_write(cru, CRUnINTS, rzg2l_cru_read(cru, CRUnINTS));
459 rzg2l_cru_write(cru, CRUnINTS2, rzg2l_cru_read(cru, CRUnINTS2));
460 }
461
rzg2l_cru_enable_interrupts(struct rzg2l_cru_dev * cru)462 void rzg2l_cru_enable_interrupts(struct rzg2l_cru_dev *cru)
463 {
464 rzg2l_cru_write(cru, CRUnIE, CRUnIE_EFE);
465 }
466
rzg2l_cru_disable_interrupts(struct rzg2l_cru_dev * cru)467 void rzg2l_cru_disable_interrupts(struct rzg2l_cru_dev *cru)
468 {
469 rzg2l_cru_write(cru, CRUnIE, 0);
470 rzg2l_cru_write(cru, CRUnINTS, 0x001f000f);
471 }
472
rzg2l_cru_start_image_processing(struct rzg2l_cru_dev * cru)473 int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
474 {
475 struct v4l2_mbus_framefmt *fmt = rzg2l_cru_ip_get_src_fmt(cru);
476 unsigned long flags;
477 u8 csi_vc;
478 int ret;
479
480 ret = rzg2l_cru_get_virtual_channel(cru);
481 if (ret < 0)
482 return ret;
483 csi_vc = ret;
484 cru->svc_channel = csi_vc;
485
486 spin_lock_irqsave(&cru->qlock, flags);
487
488 /* Select a video input */
489 rzg2l_cru_write(cru, CRUnCTRL, CRUnCTRL_VINSEL(0));
490
491 /* Cancel the software reset for image processing block */
492 rzg2l_cru_write(cru, CRUnRST, CRUnRST_VRESETN);
493
494 /* Disable and clear the interrupt before using */
495 cru->info->disable_interrupts(cru);
496
497 /* Initialize the AXI master */
498 rzg2l_cru_initialize_axi(cru);
499
500 /* Initialize image convert */
501 ret = rzg2l_cru_initialize_image_conv(cru, fmt, csi_vc);
502 if (ret) {
503 spin_unlock_irqrestore(&cru->qlock, flags);
504 return ret;
505 }
506
507 /* Enable interrupt */
508 cru->info->enable_interrupts(cru);
509
510 /* Enable image processing reception */
511 rzg2l_cru_write(cru, ICnEN, ICnEN_ICEN);
512
513 spin_unlock_irqrestore(&cru->qlock, flags);
514
515 return 0;
516 }
517
rzg2l_cru_set_stream(struct rzg2l_cru_dev * cru,int on)518 static int rzg2l_cru_set_stream(struct rzg2l_cru_dev *cru, int on)
519 {
520 struct media_pipeline *pipe;
521 struct v4l2_subdev *sd;
522 struct media_pad *pad;
523 int ret;
524
525 pad = media_pad_remote_pad_first(&cru->pad);
526 if (!pad)
527 return -EPIPE;
528
529 sd = media_entity_to_v4l2_subdev(pad->entity);
530
531 if (!on) {
532 int stream_off_ret = 0;
533
534 ret = v4l2_subdev_call(sd, video, s_stream, 0);
535 if (ret)
536 stream_off_ret = ret;
537
538 ret = v4l2_subdev_call(sd, video, post_streamoff);
539 if (ret == -ENOIOCTLCMD)
540 ret = 0;
541 if (ret && !stream_off_ret)
542 stream_off_ret = ret;
543
544 video_device_pipeline_stop(&cru->vdev);
545
546 return stream_off_ret;
547 }
548
549 pipe = media_entity_pipeline(&sd->entity) ? : &cru->vdev.pipe;
550 ret = video_device_pipeline_start(&cru->vdev, pipe);
551 if (ret)
552 return ret;
553
554 ret = v4l2_subdev_call(sd, video, pre_streamon, 0);
555 if (ret && ret != -ENOIOCTLCMD)
556 goto pipe_line_stop;
557
558 ret = v4l2_subdev_call(sd, video, s_stream, 1);
559 if (ret && ret != -ENOIOCTLCMD)
560 goto err_s_stream;
561
562 return 0;
563
564 err_s_stream:
565 v4l2_subdev_call(sd, video, post_streamoff);
566
567 pipe_line_stop:
568 video_device_pipeline_stop(&cru->vdev);
569
570 return ret;
571 }
572
rzg2l_cru_stop_streaming(struct rzg2l_cru_dev * cru)573 static void rzg2l_cru_stop_streaming(struct rzg2l_cru_dev *cru)
574 {
575 cru->state = RZG2L_CRU_DMA_STOPPING;
576
577 rzg2l_cru_set_stream(cru, 0);
578 }
579
rzg2l_cru_irq(int irq,void * data)580 irqreturn_t rzg2l_cru_irq(int irq, void *data)
581 {
582 struct rzg2l_cru_dev *cru = data;
583 unsigned int handled = 0;
584 unsigned long flags;
585 u32 irq_status;
586 u32 amnmbs;
587 int slot;
588
589 spin_lock_irqsave(&cru->qlock, flags);
590
591 irq_status = rzg2l_cru_read(cru, CRUnINTS);
592 if (!irq_status)
593 goto done;
594
595 handled = 1;
596
597 rzg2l_cru_write(cru, CRUnINTS, rzg2l_cru_read(cru, CRUnINTS));
598
599 /* Nothing to do if capture status is 'RZG2L_CRU_DMA_STOPPED' */
600 if (cru->state == RZG2L_CRU_DMA_STOPPED) {
601 dev_dbg(cru->dev, "IRQ while state stopped\n");
602 goto done;
603 }
604
605 /* Increase stop retries if capture status is 'RZG2L_CRU_DMA_STOPPING' */
606 if (cru->state == RZG2L_CRU_DMA_STOPPING) {
607 if (irq_status & CRUnINTS_SFS)
608 dev_dbg(cru->dev, "IRQ while state stopping\n");
609 goto done;
610 }
611
612 /* Prepare for capture and update state */
613 amnmbs = rzg2l_cru_read(cru, AMnMBS);
614 slot = amnmbs & AMnMBS_MBSTS;
615
616 /*
617 * AMnMBS.MBSTS indicates the destination of Memory Bank (MB).
618 * Recalculate to get the current transfer complete MB.
619 */
620 if (slot == 0)
621 slot = cru->num_buf - 1;
622 else
623 slot--;
624
625 /*
626 * To hand buffers back in a known order to userspace start
627 * to capture first from slot 0.
628 */
629 if (cru->state == RZG2L_CRU_DMA_STARTING) {
630 if (slot != 0) {
631 dev_dbg(cru->dev, "Starting sync slot: %d\n", slot);
632 goto done;
633 }
634
635 dev_dbg(cru->dev, "Capture start synced!\n");
636 cru->state = RZG2L_CRU_DMA_RUNNING;
637 }
638
639 /* Capture frame */
640 if (cru->queue_buf[slot]) {
641 cru->queue_buf[slot]->field = cru->format.field;
642 cru->queue_buf[slot]->sequence = cru->sequence;
643 cru->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
644 vb2_buffer_done(&cru->queue_buf[slot]->vb2_buf,
645 VB2_BUF_STATE_DONE);
646 cru->queue_buf[slot] = NULL;
647 } else {
648 /* Scratch buffer was used, dropping frame. */
649 dev_dbg(cru->dev, "Dropping frame %u\n", cru->sequence);
650 }
651
652 cru->sequence++;
653
654 /* Prepare for next frame */
655 rzg2l_cru_fill_hw_slot(cru, slot);
656
657 done:
658 spin_unlock_irqrestore(&cru->qlock, flags);
659
660 return IRQ_RETVAL(handled);
661 }
662
rzg3e_cru_get_current_slot(struct rzg2l_cru_dev * cru)663 static int rzg3e_cru_get_current_slot(struct rzg2l_cru_dev *cru)
664 {
665 u64 amnmadrs;
666 int slot;
667
668 /*
669 * When AMnMADRSL is read, AMnMADRSH of the higher-order
670 * address also latches the address.
671 *
672 * AMnMADRSH must be read after AMnMADRSL has been read.
673 */
674 amnmadrs = rzg2l_cru_read(cru, AMnMADRSL);
675 amnmadrs |= (u64)rzg2l_cru_read(cru, AMnMADRSH) << 32;
676
677 /* Ensure amnmadrs is within this buffer range */
678 for (slot = 0; slot < cru->num_buf; slot++) {
679 if (amnmadrs >= cru->buf_addr[slot] &&
680 amnmadrs < cru->buf_addr[slot] + cru->format.sizeimage)
681 return slot;
682 }
683
684 dev_err(cru->dev, "Invalid MB address 0x%llx (out of range)\n", amnmadrs);
685 return -EINVAL;
686 }
687
rzg3e_cru_irq(int irq,void * data)688 irqreturn_t rzg3e_cru_irq(int irq, void *data)
689 {
690 struct rzg2l_cru_dev *cru = data;
691 u32 irq_status;
692 int slot;
693
694 scoped_guard(spinlock, &cru->qlock) {
695 irq_status = rzg2l_cru_read(cru, CRUnINTS2);
696 if (!irq_status)
697 return IRQ_NONE;
698
699 dev_dbg(cru->dev, "CRUnINTS2 0x%x\n", irq_status);
700
701 rzg2l_cru_write(cru, CRUnINTS2, rzg2l_cru_read(cru, CRUnINTS2));
702
703 /* Nothing to do if capture status is 'RZG2L_CRU_DMA_STOPPED' */
704 if (cru->state == RZG2L_CRU_DMA_STOPPED) {
705 dev_dbg(cru->dev, "IRQ while state stopped\n");
706 return IRQ_HANDLED;
707 }
708
709 if (cru->state == RZG2L_CRU_DMA_STOPPING) {
710 if (irq_status & CRUnINTS2_FSxS(0) ||
711 irq_status & CRUnINTS2_FSxS(1) ||
712 irq_status & CRUnINTS2_FSxS(2) ||
713 irq_status & CRUnINTS2_FSxS(3))
714 dev_dbg(cru->dev, "IRQ while state stopping\n");
715 return IRQ_HANDLED;
716 }
717
718 slot = rzg3e_cru_get_current_slot(cru);
719 if (slot < 0)
720 return IRQ_HANDLED;
721
722 dev_dbg(cru->dev, "Current written slot: %d\n", slot);
723 cru->buf_addr[slot] = 0;
724
725 /*
726 * To hand buffers back in a known order to userspace start
727 * to capture first from slot 0.
728 */
729 if (cru->state == RZG2L_CRU_DMA_STARTING) {
730 if (slot != 0) {
731 dev_dbg(cru->dev, "Starting sync slot: %d\n", slot);
732 return IRQ_HANDLED;
733 }
734 dev_dbg(cru->dev, "Capture start synced!\n");
735 cru->state = RZG2L_CRU_DMA_RUNNING;
736 }
737
738 /* Capture frame */
739 if (cru->queue_buf[slot]) {
740 struct vb2_v4l2_buffer *buf = cru->queue_buf[slot];
741
742 buf->field = cru->format.field;
743 buf->sequence = cru->sequence;
744 buf->vb2_buf.timestamp = ktime_get_ns();
745 vb2_buffer_done(&buf->vb2_buf, VB2_BUF_STATE_DONE);
746 cru->queue_buf[slot] = NULL;
747 } else {
748 /* Scratch buffer was used, dropping frame. */
749 dev_dbg(cru->dev, "Dropping frame %u\n", cru->sequence);
750 }
751
752 cru->sequence++;
753
754 /* Prepare for next frame */
755 rzg2l_cru_fill_hw_slot(cru, slot);
756 }
757
758 return IRQ_HANDLED;
759 }
760
rzg2l_cru_start_streaming_vq(struct vb2_queue * vq,unsigned int count)761 static int rzg2l_cru_start_streaming_vq(struct vb2_queue *vq, unsigned int count)
762 {
763 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
764 int ret;
765
766 ret = pm_runtime_resume_and_get(cru->dev);
767 if (ret)
768 return ret;
769
770 ret = clk_prepare_enable(cru->vclk);
771 if (ret)
772 goto err_pm_put;
773
774 /* Release reset state */
775 ret = reset_control_deassert(cru->aresetn);
776 if (ret) {
777 dev_err(cru->dev, "failed to deassert aresetn\n");
778 goto err_vclk_disable;
779 }
780
781 ret = reset_control_deassert(cru->presetn);
782 if (ret) {
783 reset_control_assert(cru->aresetn);
784 dev_err(cru->dev, "failed to deassert presetn\n");
785 goto assert_aresetn;
786 }
787
788 /* Allocate scratch buffer */
789 cru->scratch = dma_alloc_coherent(cru->dev, cru->format.sizeimage,
790 &cru->scratch_phys, GFP_KERNEL);
791 if (!cru->scratch) {
792 return_unused_buffers(cru, VB2_BUF_STATE_QUEUED);
793 dev_err(cru->dev, "Failed to allocate scratch buffer\n");
794 ret = -ENOMEM;
795 goto assert_presetn;
796 }
797
798 cru->sequence = 0;
799
800 ret = rzg2l_cru_set_stream(cru, 1);
801 if (ret) {
802 return_unused_buffers(cru, VB2_BUF_STATE_QUEUED);
803 goto out;
804 }
805
806 cru->state = RZG2L_CRU_DMA_STARTING;
807 dev_dbg(cru->dev, "Starting to capture\n");
808 return 0;
809
810 out:
811 if (ret)
812 dma_free_coherent(cru->dev, cru->format.sizeimage, cru->scratch,
813 cru->scratch_phys);
814 assert_presetn:
815 reset_control_assert(cru->presetn);
816
817 assert_aresetn:
818 reset_control_assert(cru->aresetn);
819
820 err_vclk_disable:
821 clk_disable_unprepare(cru->vclk);
822
823 err_pm_put:
824 pm_runtime_put_sync(cru->dev);
825
826 return ret;
827 }
828
rzg2l_cru_stop_streaming_vq(struct vb2_queue * vq)829 static void rzg2l_cru_stop_streaming_vq(struct vb2_queue *vq)
830 {
831 struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
832
833 rzg2l_cru_stop_streaming(cru);
834
835 /* Free scratch buffer */
836 dma_free_coherent(cru->dev, cru->format.sizeimage,
837 cru->scratch, cru->scratch_phys);
838
839 return_unused_buffers(cru, VB2_BUF_STATE_ERROR);
840
841 reset_control_assert(cru->presetn);
842 clk_disable_unprepare(cru->vclk);
843 pm_runtime_put_sync(cru->dev);
844 }
845
846 static const struct vb2_ops rzg2l_cru_qops = {
847 .queue_setup = rzg2l_cru_queue_setup,
848 .buf_prepare = rzg2l_cru_buffer_prepare,
849 .buf_queue = rzg2l_cru_buffer_queue,
850 .start_streaming = rzg2l_cru_start_streaming_vq,
851 .stop_streaming = rzg2l_cru_stop_streaming_vq,
852 };
853
rzg2l_cru_dma_unregister(struct rzg2l_cru_dev * cru)854 void rzg2l_cru_dma_unregister(struct rzg2l_cru_dev *cru)
855 {
856 mutex_destroy(&cru->lock);
857
858 v4l2_device_unregister(&cru->v4l2_dev);
859 vb2_queue_release(&cru->queue);
860 }
861
rzg2l_cru_dma_register(struct rzg2l_cru_dev * cru)862 int rzg2l_cru_dma_register(struct rzg2l_cru_dev *cru)
863 {
864 struct vb2_queue *q = &cru->queue;
865 unsigned int i;
866 int ret;
867
868 /* Initialize the top-level structure */
869 ret = v4l2_device_register(cru->dev, &cru->v4l2_dev);
870 if (ret)
871 return ret;
872
873 mutex_init(&cru->lock);
874 INIT_LIST_HEAD(&cru->buf_list);
875
876 spin_lock_init(&cru->qlock);
877
878 cru->state = RZG2L_CRU_DMA_STOPPED;
879
880 for (i = 0; i < RZG2L_CRU_HW_BUFFER_MAX; i++)
881 cru->queue_buf[i] = NULL;
882
883 /* buffer queue */
884 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
885 q->io_modes = VB2_MMAP | VB2_DMABUF;
886 q->lock = &cru->lock;
887 q->drv_priv = cru;
888 q->buf_struct_size = sizeof(struct rzg2l_cru_buffer);
889 q->ops = &rzg2l_cru_qops;
890 q->mem_ops = &vb2_dma_contig_memops;
891 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
892 q->min_queued_buffers = 4;
893 q->dev = cru->dev;
894
895 ret = vb2_queue_init(q);
896 if (ret < 0) {
897 dev_err(cru->dev, "failed to initialize VB2 queue\n");
898 goto error;
899 }
900
901 return 0;
902
903 error:
904 mutex_destroy(&cru->lock);
905 v4l2_device_unregister(&cru->v4l2_dev);
906 return ret;
907 }
908
909 /* -----------------------------------------------------------------------------
910 * V4L2 stuff
911 */
912
rzg2l_cru_format_align(struct rzg2l_cru_dev * cru,struct v4l2_pix_format * pix)913 static void rzg2l_cru_format_align(struct rzg2l_cru_dev *cru,
914 struct v4l2_pix_format *pix)
915 {
916 const struct rzg2l_cru_info *info = cru->info;
917 const struct rzg2l_cru_ip_format *fmt;
918
919 fmt = rzg2l_cru_ip_format_to_fmt(pix->pixelformat);
920 if (!fmt) {
921 pix->pixelformat = RZG2L_CRU_DEFAULT_FORMAT;
922 fmt = rzg2l_cru_ip_format_to_fmt(pix->pixelformat);
923 }
924
925 switch (pix->field) {
926 case V4L2_FIELD_TOP:
927 case V4L2_FIELD_BOTTOM:
928 case V4L2_FIELD_NONE:
929 case V4L2_FIELD_INTERLACED_TB:
930 case V4L2_FIELD_INTERLACED_BT:
931 case V4L2_FIELD_INTERLACED:
932 break;
933 default:
934 pix->field = RZG2L_CRU_DEFAULT_FIELD;
935 break;
936 }
937
938 /* Limit to CRU capabilities */
939 v4l_bound_align_image(&pix->width, 320, info->max_width, 1,
940 &pix->height, 240, info->max_height, 2, 0);
941
942 v4l2_fill_pixfmt(pix, pix->pixelformat, pix->width, pix->height);
943
944 dev_dbg(cru->dev, "Format %ux%u bpl: %u size: %u\n",
945 pix->width, pix->height, pix->bytesperline, pix->sizeimage);
946 }
947
rzg2l_cru_try_format(struct rzg2l_cru_dev * cru,struct v4l2_pix_format * pix)948 static void rzg2l_cru_try_format(struct rzg2l_cru_dev *cru,
949 struct v4l2_pix_format *pix)
950 {
951 /*
952 * The V4L2 specification clearly documents the colorspace fields
953 * as being set by drivers for capture devices. Using the values
954 * supplied by userspace thus wouldn't comply with the API. Until
955 * the API is updated force fixed values.
956 */
957 pix->colorspace = RZG2L_CRU_DEFAULT_COLORSPACE;
958 pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
959 pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace);
960 pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, pix->colorspace,
961 pix->ycbcr_enc);
962
963 rzg2l_cru_format_align(cru, pix);
964 }
965
rzg2l_cru_querycap(struct file * file,void * priv,struct v4l2_capability * cap)966 static int rzg2l_cru_querycap(struct file *file, void *priv,
967 struct v4l2_capability *cap)
968 {
969 strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
970 strscpy(cap->card, "RZG2L_CRU", sizeof(cap->card));
971
972 return 0;
973 }
974
rzg2l_cru_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)975 static int rzg2l_cru_try_fmt_vid_cap(struct file *file, void *priv,
976 struct v4l2_format *f)
977 {
978 struct rzg2l_cru_dev *cru = video_drvdata(file);
979
980 rzg2l_cru_try_format(cru, &f->fmt.pix);
981
982 return 0;
983 }
984
rzg2l_cru_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)985 static int rzg2l_cru_s_fmt_vid_cap(struct file *file, void *priv,
986 struct v4l2_format *f)
987 {
988 struct rzg2l_cru_dev *cru = video_drvdata(file);
989
990 if (vb2_is_busy(&cru->queue))
991 return -EBUSY;
992
993 rzg2l_cru_try_format(cru, &f->fmt.pix);
994
995 cru->format = f->fmt.pix;
996
997 return 0;
998 }
999
rzg2l_cru_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)1000 static int rzg2l_cru_g_fmt_vid_cap(struct file *file, void *priv,
1001 struct v4l2_format *f)
1002 {
1003 struct rzg2l_cru_dev *cru = video_drvdata(file);
1004
1005 f->fmt.pix = cru->format;
1006
1007 return 0;
1008 }
1009
rzg2l_cru_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)1010 static int rzg2l_cru_enum_fmt_vid_cap(struct file *file, void *priv,
1011 struct v4l2_fmtdesc *f)
1012 {
1013 const struct rzg2l_cru_ip_format *fmt;
1014
1015 fmt = rzg2l_cru_ip_index_to_fmt(f->index);
1016 if (!fmt)
1017 return -EINVAL;
1018
1019 f->pixelformat = fmt->format;
1020
1021 return 0;
1022 }
1023
rzg2l_cru_enum_framesizes(struct file * file,void * fh,struct v4l2_frmsizeenum * fsize)1024 static int rzg2l_cru_enum_framesizes(struct file *file, void *fh,
1025 struct v4l2_frmsizeenum *fsize)
1026 {
1027 struct rzg2l_cru_dev *cru = video_drvdata(file);
1028 const struct rzg2l_cru_info *info = cru->info;
1029 const struct rzg2l_cru_ip_format *fmt;
1030
1031 if (fsize->index)
1032 return -EINVAL;
1033
1034 fmt = rzg2l_cru_ip_format_to_fmt(fsize->pixel_format);
1035 if (!fmt)
1036 return -EINVAL;
1037
1038 fsize->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
1039 fsize->stepwise.min_width = RZG2L_CRU_MIN_INPUT_WIDTH;
1040 fsize->stepwise.max_width = info->max_width;
1041 fsize->stepwise.step_width = 1;
1042 fsize->stepwise.min_height = RZG2L_CRU_MIN_INPUT_HEIGHT;
1043 fsize->stepwise.max_height = info->max_height;
1044 fsize->stepwise.step_height = 1;
1045
1046 return 0;
1047 }
1048
1049 static const struct v4l2_ioctl_ops rzg2l_cru_ioctl_ops = {
1050 .vidioc_querycap = rzg2l_cru_querycap,
1051 .vidioc_try_fmt_vid_cap = rzg2l_cru_try_fmt_vid_cap,
1052 .vidioc_g_fmt_vid_cap = rzg2l_cru_g_fmt_vid_cap,
1053 .vidioc_s_fmt_vid_cap = rzg2l_cru_s_fmt_vid_cap,
1054 .vidioc_enum_fmt_vid_cap = rzg2l_cru_enum_fmt_vid_cap,
1055
1056 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1057 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1058 .vidioc_querybuf = vb2_ioctl_querybuf,
1059 .vidioc_qbuf = vb2_ioctl_qbuf,
1060 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1061 .vidioc_expbuf = vb2_ioctl_expbuf,
1062 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1063 .vidioc_streamon = vb2_ioctl_streamon,
1064 .vidioc_streamoff = vb2_ioctl_streamoff,
1065 .vidioc_enum_framesizes = rzg2l_cru_enum_framesizes,
1066 };
1067
1068 /* -----------------------------------------------------------------------------
1069 * Media controller file operations
1070 */
1071
rzg2l_cru_open(struct file * file)1072 static int rzg2l_cru_open(struct file *file)
1073 {
1074 struct rzg2l_cru_dev *cru = video_drvdata(file);
1075 int ret;
1076
1077 ret = mutex_lock_interruptible(&cru->lock);
1078 if (ret)
1079 return ret;
1080
1081 file->private_data = cru;
1082 ret = v4l2_fh_open(file);
1083 if (ret)
1084 goto err_unlock;
1085
1086 mutex_unlock(&cru->lock);
1087
1088 return 0;
1089
1090 err_unlock:
1091 mutex_unlock(&cru->lock);
1092
1093 return ret;
1094 }
1095
rzg2l_cru_release(struct file * file)1096 static int rzg2l_cru_release(struct file *file)
1097 {
1098 struct rzg2l_cru_dev *cru = video_drvdata(file);
1099 int ret;
1100
1101 mutex_lock(&cru->lock);
1102
1103 /* the release helper will cleanup any on-going streaming. */
1104 ret = _vb2_fop_release(file, NULL);
1105
1106 mutex_unlock(&cru->lock);
1107
1108 return ret;
1109 }
1110
1111 static const struct v4l2_file_operations rzg2l_cru_fops = {
1112 .owner = THIS_MODULE,
1113 .unlocked_ioctl = video_ioctl2,
1114 .open = rzg2l_cru_open,
1115 .release = rzg2l_cru_release,
1116 .poll = vb2_fop_poll,
1117 .mmap = vb2_fop_mmap,
1118 .read = vb2_fop_read,
1119 };
1120
1121 /* -----------------------------------------------------------------------------
1122 * Media entity operations
1123 */
1124
rzg2l_cru_video_link_validate(struct media_link * link)1125 static int rzg2l_cru_video_link_validate(struct media_link *link)
1126 {
1127 struct v4l2_subdev_format fmt = {
1128 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1129 };
1130 const struct rzg2l_cru_ip_format *video_fmt;
1131 struct v4l2_subdev *subdev;
1132 struct rzg2l_cru_dev *cru;
1133 int ret;
1134
1135 subdev = media_entity_to_v4l2_subdev(link->source->entity);
1136 fmt.pad = link->source->index;
1137 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
1138 if (ret < 0)
1139 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
1140
1141 cru = container_of(media_entity_to_video_device(link->sink->entity),
1142 struct rzg2l_cru_dev, vdev);
1143 video_fmt = rzg2l_cru_ip_format_to_fmt(cru->format.pixelformat);
1144
1145 if (fmt.format.width != cru->format.width ||
1146 fmt.format.height != cru->format.height ||
1147 fmt.format.field != cru->format.field ||
1148 !rzg2l_cru_ip_fmt_supports_mbus_code(video_fmt, fmt.format.code))
1149 return -EPIPE;
1150
1151 return 0;
1152 }
1153
1154 static const struct media_entity_operations rzg2l_cru_video_media_ops = {
1155 .link_validate = rzg2l_cru_video_link_validate,
1156 };
1157
rzg2l_cru_v4l2_init(struct rzg2l_cru_dev * cru)1158 static void rzg2l_cru_v4l2_init(struct rzg2l_cru_dev *cru)
1159 {
1160 struct video_device *vdev = &cru->vdev;
1161
1162 vdev->v4l2_dev = &cru->v4l2_dev;
1163 vdev->queue = &cru->queue;
1164 snprintf(vdev->name, sizeof(vdev->name), "CRU output");
1165 vdev->release = video_device_release_empty;
1166 vdev->lock = &cru->lock;
1167 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1168 vdev->device_caps |= V4L2_CAP_IO_MC;
1169 vdev->entity.ops = &rzg2l_cru_video_media_ops;
1170 vdev->fops = &rzg2l_cru_fops;
1171 vdev->ioctl_ops = &rzg2l_cru_ioctl_ops;
1172
1173 /* Set a default format */
1174 cru->format.pixelformat = RZG2L_CRU_DEFAULT_FORMAT;
1175 cru->format.width = RZG2L_CRU_DEFAULT_WIDTH;
1176 cru->format.height = RZG2L_CRU_DEFAULT_HEIGHT;
1177 cru->format.field = RZG2L_CRU_DEFAULT_FIELD;
1178 cru->format.colorspace = RZG2L_CRU_DEFAULT_COLORSPACE;
1179 rzg2l_cru_format_align(cru, &cru->format);
1180 }
1181
rzg2l_cru_video_unregister(struct rzg2l_cru_dev * cru)1182 void rzg2l_cru_video_unregister(struct rzg2l_cru_dev *cru)
1183 {
1184 media_device_unregister(&cru->mdev);
1185 video_unregister_device(&cru->vdev);
1186 }
1187
rzg2l_cru_video_register(struct rzg2l_cru_dev * cru)1188 int rzg2l_cru_video_register(struct rzg2l_cru_dev *cru)
1189 {
1190 struct video_device *vdev = &cru->vdev;
1191 int ret;
1192
1193 if (video_is_registered(&cru->vdev)) {
1194 struct media_entity *entity;
1195
1196 entity = &cru->vdev.entity;
1197 if (!entity->graph_obj.mdev)
1198 entity->graph_obj.mdev = &cru->mdev;
1199 return 0;
1200 }
1201
1202 rzg2l_cru_v4l2_init(cru);
1203 video_set_drvdata(vdev, cru);
1204 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1205 if (ret) {
1206 dev_err(cru->dev, "Failed to register video device\n");
1207 return ret;
1208 }
1209
1210 ret = media_device_register(&cru->mdev);
1211 if (ret) {
1212 video_unregister_device(&cru->vdev);
1213 return ret;
1214 }
1215
1216 return 0;
1217 }
1218