1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * camss-vfe-gen1.c
4 *
5 * Qualcomm MSM Camera Subsystem - VFE Common functionality for Gen 1 versions of hw (4.1, 4.7..)
6 *
7 * Copyright (C) 2020 Linaro Ltd.
8 */
9
10 #include "camss.h"
11 #include "camss-vfe.h"
12 #include "camss-vfe-gen1.h"
13
14 /* Max number of frame drop updates per frame */
15 #define VFE_FRAME_DROP_UPDATES 2
16 #define VFE_NEXT_SOF_MS 500
17
vfe_gen1_halt(struct vfe_device * vfe)18 int vfe_gen1_halt(struct vfe_device *vfe)
19 {
20 unsigned long time;
21
22 reinit_completion(&vfe->halt_complete);
23
24 vfe->ops_gen1->halt_request(vfe);
25
26 time = wait_for_completion_timeout(&vfe->halt_complete,
27 msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
28 if (!time) {
29 dev_err(vfe->camss->dev, "VFE halt timeout\n");
30 return -EIO;
31 }
32
33 return 0;
34 }
35
vfe_disable_output(struct vfe_line * line)36 static int vfe_disable_output(struct vfe_line *line)
37 {
38 struct vfe_device *vfe = to_vfe(line);
39 struct vfe_output *output = &line->output;
40 const struct vfe_hw_ops *ops = vfe->res->hw_ops;
41 unsigned long flags;
42 unsigned long time;
43 unsigned int i;
44
45 spin_lock_irqsave(&vfe->output_lock, flags);
46
47 output->gen1.wait_sof = 1;
48 spin_unlock_irqrestore(&vfe->output_lock, flags);
49
50 time = wait_for_completion_timeout(&output->sof, msecs_to_jiffies(VFE_NEXT_SOF_MS));
51 if (!time)
52 dev_err(vfe->camss->dev, "VFE sof timeout\n");
53
54 spin_lock_irqsave(&vfe->output_lock, flags);
55 for (i = 0; i < output->wm_num; i++)
56 vfe->ops_gen1->wm_enable(vfe, output->wm_idx[i], 0);
57
58 ops->reg_update(vfe, line->id);
59 output->wait_reg_update = 1;
60 spin_unlock_irqrestore(&vfe->output_lock, flags);
61
62 time = wait_for_completion_timeout(&output->reg_update, msecs_to_jiffies(VFE_NEXT_SOF_MS));
63 if (!time)
64 dev_err(vfe->camss->dev, "VFE reg update timeout\n");
65
66 spin_lock_irqsave(&vfe->output_lock, flags);
67
68 if (line->id != VFE_LINE_PIX) {
69 vfe->ops_gen1->wm_frame_based(vfe, output->wm_idx[0], 0);
70 vfe->ops_gen1->bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0], line->id);
71 vfe->ops_gen1->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0);
72 vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[0], 0);
73 spin_unlock_irqrestore(&vfe->output_lock, flags);
74 } else {
75 for (i = 0; i < output->wm_num; i++) {
76 vfe->ops_gen1->wm_line_based(vfe, output->wm_idx[i], NULL, i, 0);
77 vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[i], 0);
78 }
79
80 vfe->ops_gen1->enable_irq_pix_line(vfe, 0, line->id, 0);
81 vfe->ops_gen1->set_module_cfg(vfe, 0);
82 vfe->ops_gen1->set_realign_cfg(vfe, line, 0);
83 vfe->ops_gen1->set_xbar_cfg(vfe, output, 0);
84 vfe->ops_gen1->set_camif_cmd(vfe, 0);
85
86 spin_unlock_irqrestore(&vfe->output_lock, flags);
87
88 vfe->ops_gen1->camif_wait_for_stop(vfe, vfe->camss->dev);
89 }
90
91 return 0;
92 }
93
94 /*
95 * vfe_gen1_disable - Disable streaming on VFE line
96 * @line: VFE line
97 *
98 * Return 0 on success or a negative error code otherwise
99 */
vfe_gen1_disable(struct vfe_line * line)100 int vfe_gen1_disable(struct vfe_line *line)
101 {
102 struct vfe_device *vfe = to_vfe(line);
103
104 vfe_disable_output(line);
105
106 vfe_put_output(line);
107
108 mutex_lock(&vfe->stream_lock);
109
110 if (vfe->stream_count == 1)
111 vfe->ops_gen1->bus_enable_wr_if(vfe, 0);
112
113 vfe->stream_count--;
114
115 mutex_unlock(&vfe->stream_lock);
116
117 return 0;
118 }
119
vfe_output_init_addrs(struct vfe_device * vfe,struct vfe_output * output,u8 sync,struct vfe_line * line)120 static void vfe_output_init_addrs(struct vfe_device *vfe,
121 struct vfe_output *output, u8 sync,
122 struct vfe_line *line)
123 {
124 u32 ping_addr;
125 u32 pong_addr;
126 unsigned int i;
127
128 output->gen1.active_buf = 0;
129
130 for (i = 0; i < output->wm_num; i++) {
131 if (output->buf[0])
132 ping_addr = output->buf[0]->addr[i];
133 else
134 ping_addr = 0;
135
136 if (output->buf[1])
137 pong_addr = output->buf[1]->addr[i];
138 else
139 pong_addr = ping_addr;
140
141 vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr);
142 vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr);
143 if (sync)
144 vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
145 }
146 }
147
vfe_output_frame_drop(struct vfe_device * vfe,struct vfe_output * output,u32 drop_pattern)148 static void vfe_output_frame_drop(struct vfe_device *vfe,
149 struct vfe_output *output,
150 u32 drop_pattern)
151 {
152 u8 drop_period;
153 unsigned int i;
154
155 /* We need to toggle update period to be valid on next frame */
156 output->drop_update_idx++;
157 output->drop_update_idx %= VFE_FRAME_DROP_UPDATES;
158 drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx;
159
160 for (i = 0; i < output->wm_num; i++) {
161 vfe->ops_gen1->wm_set_framedrop_period(vfe, output->wm_idx[i], drop_period);
162 vfe->ops_gen1->wm_set_framedrop_pattern(vfe, output->wm_idx[i], drop_pattern);
163 }
164
165 vfe->res->hw_ops->reg_update(vfe, container_of(output, struct vfe_line, output)->id);
166 }
167
vfe_enable_output(struct vfe_line * line)168 static int vfe_enable_output(struct vfe_line *line)
169 {
170 struct vfe_device *vfe = to_vfe(line);
171 struct vfe_output *output = &line->output;
172 const struct vfe_hw_ops *ops = vfe->res->hw_ops;
173 struct media_pad *sensor_pad;
174 unsigned long flags;
175 unsigned int frame_skip = 0;
176 unsigned int i;
177 u16 ub_size;
178
179 ub_size = vfe->ops_gen1->get_ub_size(vfe->id);
180 if (!ub_size)
181 return -EINVAL;
182
183 sensor_pad = camss_find_sensor_pad(&line->subdev.entity);
184 if (sensor_pad) {
185 struct v4l2_subdev *subdev =
186 media_entity_to_v4l2_subdev(sensor_pad->entity);
187
188 v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
189 /* Max frame skip is 29 frames */
190 if (frame_skip > VFE_FRAME_DROP_VAL - 1)
191 frame_skip = VFE_FRAME_DROP_VAL - 1;
192 }
193
194 spin_lock_irqsave(&vfe->output_lock, flags);
195
196 ops->reg_update_clear(vfe, line->id);
197
198 if (output->state > VFE_OUTPUT_RESERVED) {
199 dev_err(vfe->camss->dev, "Output is not in reserved state %d\n", output->state);
200 spin_unlock_irqrestore(&vfe->output_lock, flags);
201 return -EINVAL;
202 }
203 output->state = VFE_OUTPUT_IDLE;
204
205 output->buf[0] = vfe_buf_get_pending(output);
206 output->buf[1] = vfe_buf_get_pending(output);
207
208 if (!output->buf[0] && output->buf[1]) {
209 output->buf[0] = output->buf[1];
210 output->buf[1] = NULL;
211 }
212
213 if (output->buf[0])
214 output->state = VFE_OUTPUT_SINGLE;
215
216 if (output->buf[1])
217 output->state = VFE_OUTPUT_CONTINUOUS;
218
219 switch (output->state) {
220 case VFE_OUTPUT_SINGLE:
221 vfe_output_frame_drop(vfe, output, 1 << frame_skip);
222 break;
223 case VFE_OUTPUT_CONTINUOUS:
224 vfe_output_frame_drop(vfe, output, 3 << frame_skip);
225 break;
226 default:
227 vfe_output_frame_drop(vfe, output, 0);
228 break;
229 }
230
231 output->sequence = 0;
232 output->gen1.wait_sof = 0;
233 output->wait_reg_update = 0;
234 reinit_completion(&output->sof);
235 reinit_completion(&output->reg_update);
236
237 vfe_output_init_addrs(vfe, output, 0, line);
238
239 if (line->id != VFE_LINE_PIX) {
240 vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[0], 1);
241 vfe->ops_gen1->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1);
242 vfe->ops_gen1->bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id);
243 vfe->ops_gen1->wm_set_subsample(vfe, output->wm_idx[0]);
244 vfe->ops_gen1->set_rdi_cid(vfe, line->id, 0);
245 vfe->ops_gen1->wm_set_ub_cfg(vfe, output->wm_idx[0],
246 (ub_size + 1) * output->wm_idx[0], ub_size);
247 vfe->ops_gen1->wm_frame_based(vfe, output->wm_idx[0], 1);
248 vfe->ops_gen1->wm_enable(vfe, output->wm_idx[0], 1);
249 vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[0]);
250 } else {
251 ub_size /= output->wm_num;
252 for (i = 0; i < output->wm_num; i++) {
253 vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[i], 1);
254 vfe->ops_gen1->wm_set_subsample(vfe, output->wm_idx[i]);
255 vfe->ops_gen1->wm_set_ub_cfg(vfe, output->wm_idx[i],
256 (ub_size + 1) * output->wm_idx[i], ub_size);
257 vfe->ops_gen1->wm_line_based(vfe, output->wm_idx[i],
258 &line->video_out.active_fmt.fmt.pix_mp, i, 1);
259 vfe->ops_gen1->wm_enable(vfe, output->wm_idx[i], 1);
260 vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
261 }
262 vfe->ops_gen1->enable_irq_pix_line(vfe, 0, line->id, 1);
263 vfe->ops_gen1->set_module_cfg(vfe, 1);
264 vfe->ops_gen1->set_camif_cfg(vfe, line);
265 vfe->ops_gen1->set_realign_cfg(vfe, line, 1);
266 vfe->ops_gen1->set_xbar_cfg(vfe, output, 1);
267 vfe->ops_gen1->set_demux_cfg(vfe, line);
268 vfe->ops_gen1->set_scale_cfg(vfe, line);
269 vfe->ops_gen1->set_crop_cfg(vfe, line);
270 vfe->ops_gen1->set_clamp_cfg(vfe);
271 vfe->ops_gen1->set_camif_cmd(vfe, 1);
272 }
273
274 ops->reg_update(vfe, line->id);
275
276 spin_unlock_irqrestore(&vfe->output_lock, flags);
277
278 return 0;
279 }
280
vfe_get_output(struct vfe_line * line)281 static int vfe_get_output(struct vfe_line *line)
282 {
283 struct vfe_device *vfe = to_vfe(line);
284 struct vfe_output *output;
285 struct v4l2_format *f = &line->video_out.active_fmt;
286 unsigned long flags;
287 int i;
288 int wm_idx;
289
290 spin_lock_irqsave(&vfe->output_lock, flags);
291
292 output = &line->output;
293 if (output->state > VFE_OUTPUT_RESERVED) {
294 dev_err(vfe->camss->dev, "Output is running\n");
295 goto error;
296 }
297 output->state = VFE_OUTPUT_RESERVED;
298
299 output->gen1.active_buf = 0;
300
301 switch (f->fmt.pix_mp.pixelformat) {
302 case V4L2_PIX_FMT_NV12:
303 case V4L2_PIX_FMT_NV21:
304 case V4L2_PIX_FMT_NV16:
305 case V4L2_PIX_FMT_NV61:
306 output->wm_num = 2;
307 break;
308 default:
309 output->wm_num = 1;
310 break;
311 }
312
313 for (i = 0; i < output->wm_num; i++) {
314 wm_idx = vfe_reserve_wm(vfe, line->id);
315 if (wm_idx < 0) {
316 dev_err(vfe->camss->dev, "Can not reserve wm\n");
317 goto error_get_wm;
318 }
319 output->wm_idx[i] = wm_idx;
320 }
321
322 output->drop_update_idx = 0;
323
324 spin_unlock_irqrestore(&vfe->output_lock, flags);
325
326 return 0;
327
328 error_get_wm:
329 for (i--; i >= 0; i--)
330 vfe_release_wm(vfe, output->wm_idx[i]);
331 output->state = VFE_OUTPUT_OFF;
332 error:
333 spin_unlock_irqrestore(&vfe->output_lock, flags);
334
335 return -EINVAL;
336 }
337
vfe_gen1_enable(struct vfe_line * line)338 int vfe_gen1_enable(struct vfe_line *line)
339 {
340 struct vfe_device *vfe = to_vfe(line);
341 int ret;
342
343 mutex_lock(&vfe->stream_lock);
344
345 if (!vfe->stream_count) {
346 vfe->ops_gen1->enable_irq_common(vfe);
347 vfe->ops_gen1->bus_enable_wr_if(vfe, 1);
348 vfe->ops_gen1->set_qos(vfe);
349 vfe->ops_gen1->set_ds(vfe);
350 }
351
352 vfe->stream_count++;
353
354 mutex_unlock(&vfe->stream_lock);
355
356 ret = vfe_get_output(line);
357 if (ret < 0)
358 goto error_get_output;
359
360 ret = vfe_enable_output(line);
361 if (ret < 0)
362 goto error_enable_output;
363
364 vfe->was_streaming = 1;
365
366 return 0;
367
368 error_enable_output:
369 vfe_put_output(line);
370
371 error_get_output:
372 mutex_lock(&vfe->stream_lock);
373
374 if (vfe->stream_count == 1)
375 vfe->ops_gen1->bus_enable_wr_if(vfe, 0);
376
377 vfe->stream_count--;
378
379 mutex_unlock(&vfe->stream_lock);
380
381 return ret;
382 }
383
vfe_output_update_ping_addr(struct vfe_device * vfe,struct vfe_output * output,u8 sync,struct vfe_line * line)384 static void vfe_output_update_ping_addr(struct vfe_device *vfe,
385 struct vfe_output *output, u8 sync,
386 struct vfe_line *line)
387 {
388 u32 addr;
389 unsigned int i;
390
391 for (i = 0; i < output->wm_num; i++) {
392 if (output->buf[0])
393 addr = output->buf[0]->addr[i];
394 else
395 addr = 0;
396
397 vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], addr);
398 if (sync)
399 vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
400 }
401 }
402
vfe_output_update_pong_addr(struct vfe_device * vfe,struct vfe_output * output,u8 sync,struct vfe_line * line)403 static void vfe_output_update_pong_addr(struct vfe_device *vfe,
404 struct vfe_output *output, u8 sync,
405 struct vfe_line *line)
406 {
407 u32 addr;
408 unsigned int i;
409
410 for (i = 0; i < output->wm_num; i++) {
411 if (output->buf[1])
412 addr = output->buf[1]->addr[i];
413 else
414 addr = 0;
415
416 vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], addr);
417 if (sync)
418 vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
419 }
420 }
421
vfe_buf_update_wm_on_next(struct vfe_device * vfe,struct vfe_output * output)422 static void vfe_buf_update_wm_on_next(struct vfe_device *vfe,
423 struct vfe_output *output)
424 {
425 switch (output->state) {
426 case VFE_OUTPUT_CONTINUOUS:
427 vfe_output_frame_drop(vfe, output, 3);
428 break;
429 case VFE_OUTPUT_SINGLE:
430 default:
431 dev_err_ratelimited(vfe->camss->dev,
432 "Next buf in wrong state! %d\n",
433 output->state);
434 break;
435 }
436 }
437
vfe_buf_update_wm_on_last(struct vfe_device * vfe,struct vfe_output * output)438 static void vfe_buf_update_wm_on_last(struct vfe_device *vfe,
439 struct vfe_output *output)
440 {
441 switch (output->state) {
442 case VFE_OUTPUT_CONTINUOUS:
443 output->state = VFE_OUTPUT_SINGLE;
444 vfe_output_frame_drop(vfe, output, 1);
445 break;
446 case VFE_OUTPUT_SINGLE:
447 output->state = VFE_OUTPUT_STOPPING;
448 vfe_output_frame_drop(vfe, output, 0);
449 break;
450 default:
451 dev_err_ratelimited(vfe->camss->dev,
452 "Last buff in wrong state! %d\n",
453 output->state);
454 break;
455 }
456 }
457
vfe_buf_update_wm_on_new(struct vfe_device * vfe,struct vfe_output * output,struct camss_buffer * new_buf,struct vfe_line * line)458 static void vfe_buf_update_wm_on_new(struct vfe_device *vfe,
459 struct vfe_output *output,
460 struct camss_buffer *new_buf,
461 struct vfe_line *line)
462 {
463 int inactive_idx;
464
465 switch (output->state) {
466 case VFE_OUTPUT_SINGLE:
467 inactive_idx = !output->gen1.active_buf;
468
469 if (!output->buf[inactive_idx]) {
470 output->buf[inactive_idx] = new_buf;
471
472 if (inactive_idx)
473 vfe_output_update_pong_addr(vfe, output, 0, line);
474 else
475 vfe_output_update_ping_addr(vfe, output, 0, line);
476
477 vfe_output_frame_drop(vfe, output, 3);
478 output->state = VFE_OUTPUT_CONTINUOUS;
479 } else {
480 vfe_buf_add_pending(output, new_buf);
481 dev_err_ratelimited(vfe->camss->dev,
482 "Inactive buffer is busy\n");
483 }
484 break;
485
486 case VFE_OUTPUT_IDLE:
487 if (!output->buf[0]) {
488 output->buf[0] = new_buf;
489
490 vfe_output_init_addrs(vfe, output, 1, line);
491 vfe_output_frame_drop(vfe, output, 1);
492
493 output->state = VFE_OUTPUT_SINGLE;
494 } else {
495 vfe_buf_add_pending(output, new_buf);
496 dev_err_ratelimited(vfe->camss->dev,
497 "Output idle with buffer set!\n");
498 }
499 break;
500
501 case VFE_OUTPUT_CONTINUOUS:
502 default:
503 vfe_buf_add_pending(output, new_buf);
504 break;
505 }
506 }
507
508 /*
509 * vfe_isr_halt_ack - Process halt ack
510 * @vfe: VFE Device
511 */
vfe_isr_halt_ack(struct vfe_device * vfe)512 static void vfe_isr_halt_ack(struct vfe_device *vfe)
513 {
514 complete(&vfe->halt_complete);
515 vfe->ops_gen1->halt_clear(vfe);
516 }
517
518 /*
519 * vfe_isr_sof - Process start of frame interrupt
520 * @vfe: VFE Device
521 * @line_id: VFE line
522 */
vfe_isr_sof(struct vfe_device * vfe,enum vfe_line_id line_id)523 static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id)
524 {
525 struct vfe_output *output;
526 unsigned long flags;
527
528 spin_lock_irqsave(&vfe->output_lock, flags);
529 output = &vfe->line[line_id].output;
530 if (output->gen1.wait_sof) {
531 output->gen1.wait_sof = 0;
532 complete(&output->sof);
533 }
534 spin_unlock_irqrestore(&vfe->output_lock, flags);
535 }
536
537 /*
538 * vfe_isr_reg_update - Process reg update interrupt
539 * @vfe: VFE Device
540 * @line_id: VFE line
541 */
vfe_isr_reg_update(struct vfe_device * vfe,enum vfe_line_id line_id)542 static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
543 {
544 struct vfe_output *output;
545 struct vfe_line *line = &vfe->line[line_id];
546 unsigned long flags;
547
548 spin_lock_irqsave(&vfe->output_lock, flags);
549 vfe->res->hw_ops->reg_update_clear(vfe, line_id);
550
551 output = &line->output;
552
553 if (output->wait_reg_update) {
554 output->wait_reg_update = 0;
555 complete(&output->reg_update);
556 spin_unlock_irqrestore(&vfe->output_lock, flags);
557 return;
558 }
559
560 if (output->state == VFE_OUTPUT_STOPPING) {
561 /* Release last buffer when hw is idle */
562 if (output->last_buffer) {
563 vb2_buffer_done(&output->last_buffer->vb.vb2_buf,
564 VB2_BUF_STATE_DONE);
565 output->last_buffer = NULL;
566 }
567 output->state = VFE_OUTPUT_IDLE;
568
569 /* Buffers received in stopping state are queued in */
570 /* dma pending queue, start next capture here */
571
572 output->buf[0] = vfe_buf_get_pending(output);
573 output->buf[1] = vfe_buf_get_pending(output);
574
575 if (!output->buf[0] && output->buf[1]) {
576 output->buf[0] = output->buf[1];
577 output->buf[1] = NULL;
578 }
579
580 if (output->buf[0])
581 output->state = VFE_OUTPUT_SINGLE;
582
583 if (output->buf[1])
584 output->state = VFE_OUTPUT_CONTINUOUS;
585
586 switch (output->state) {
587 case VFE_OUTPUT_SINGLE:
588 vfe_output_frame_drop(vfe, output, 2);
589 break;
590 case VFE_OUTPUT_CONTINUOUS:
591 vfe_output_frame_drop(vfe, output, 3);
592 break;
593 default:
594 vfe_output_frame_drop(vfe, output, 0);
595 break;
596 }
597
598 vfe_output_init_addrs(vfe, output, 1, &vfe->line[line_id]);
599 }
600
601 spin_unlock_irqrestore(&vfe->output_lock, flags);
602 }
603
604 /*
605 * vfe_isr_wm_done - Process write master done interrupt
606 * @vfe: VFE Device
607 * @wm: Write master id
608 */
vfe_isr_wm_done(struct vfe_device * vfe,u8 wm)609 static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
610 {
611 struct camss_buffer *ready_buf;
612 struct vfe_output *output;
613 dma_addr_t *new_addr;
614 unsigned long flags;
615 u32 active_index;
616 u64 ts = ktime_get_ns();
617 unsigned int i;
618
619 active_index = vfe->ops_gen1->wm_get_ping_pong_status(vfe, wm);
620
621 spin_lock_irqsave(&vfe->output_lock, flags);
622
623 if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
624 dev_err_ratelimited(vfe->camss->dev,
625 "Received wm done for unmapped index\n");
626 goto out_unlock;
627 }
628 output = &vfe->line[vfe->wm_output_map[wm]].output;
629
630 if (output->gen1.active_buf == active_index && 0) {
631 dev_err_ratelimited(vfe->camss->dev,
632 "Active buffer mismatch!\n");
633 goto out_unlock;
634 }
635 output->gen1.active_buf = active_index;
636
637 ready_buf = output->buf[!active_index];
638 if (!ready_buf) {
639 dev_err_ratelimited(vfe->camss->dev,
640 "Missing ready buf %d %d!\n",
641 !active_index, output->state);
642 goto out_unlock;
643 }
644
645 ready_buf->vb.vb2_buf.timestamp = ts;
646 ready_buf->vb.sequence = output->sequence++;
647
648 /* Get next buffer */
649 output->buf[!active_index] = vfe_buf_get_pending(output);
650 if (!output->buf[!active_index]) {
651 /* No next buffer - set same address */
652 new_addr = ready_buf->addr;
653 vfe_buf_update_wm_on_last(vfe, output);
654 } else {
655 new_addr = output->buf[!active_index]->addr;
656 vfe_buf_update_wm_on_next(vfe, output);
657 }
658
659 if (active_index)
660 for (i = 0; i < output->wm_num; i++)
661 vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], new_addr[i]);
662 else
663 for (i = 0; i < output->wm_num; i++)
664 vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], new_addr[i]);
665
666 spin_unlock_irqrestore(&vfe->output_lock, flags);
667
668 if (output->state == VFE_OUTPUT_STOPPING)
669 output->last_buffer = ready_buf;
670 else
671 vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
672
673 return;
674
675 out_unlock:
676 spin_unlock_irqrestore(&vfe->output_lock, flags);
677 }
678
679 /*
680 * vfe_queue_buffer - Add empty buffer
681 * @vid: Video device structure
682 * @buf: Buffer to be enqueued
683 *
684 * Add an empty buffer - depending on the current number of buffers it will be
685 * put in pending buffer queue or directly given to the hardware to be filled.
686 *
687 * Return 0 on success or a negative error code otherwise
688 */
vfe_queue_buffer(struct camss_video * vid,struct camss_buffer * buf)689 static int vfe_queue_buffer(struct camss_video *vid, struct camss_buffer *buf)
690 {
691 struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
692 struct vfe_device *vfe = to_vfe(line);
693 struct vfe_output *output;
694 unsigned long flags;
695
696 output = &line->output;
697
698 spin_lock_irqsave(&vfe->output_lock, flags);
699
700 vfe_buf_update_wm_on_new(vfe, output, buf, line);
701
702 spin_unlock_irqrestore(&vfe->output_lock, flags);
703
704 return 0;
705 }
706
707 #define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
708
vfe_word_per_line(u32 format,u32 width)709 int vfe_word_per_line(u32 format, u32 width)
710 {
711 int val = 0;
712
713 switch (format) {
714 case V4L2_PIX_FMT_NV12:
715 case V4L2_PIX_FMT_NV21:
716 case V4L2_PIX_FMT_NV16:
717 case V4L2_PIX_FMT_NV61:
718 val = CALC_WORD(width, 1, 8);
719 break;
720 case V4L2_PIX_FMT_YUYV:
721 case V4L2_PIX_FMT_YVYU:
722 case V4L2_PIX_FMT_UYVY:
723 case V4L2_PIX_FMT_VYUY:
724 val = CALC_WORD(width, 2, 8);
725 break;
726 }
727
728 return val;
729 }
730
731 const struct vfe_isr_ops vfe_isr_ops_gen1 = {
732 .reset_ack = vfe_isr_reset_ack,
733 .halt_ack = vfe_isr_halt_ack,
734 .reg_update = vfe_isr_reg_update,
735 .sof = vfe_isr_sof,
736 .comp_done = vfe_isr_comp_done,
737 .wm_done = vfe_isr_wm_done,
738 };
739
740 const struct camss_video_ops vfe_video_ops_gen1 = {
741 .queue_buffer = vfe_queue_buffer,
742 .flush_buffers = vfe_flush_buffers,
743 };
744