1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * camss-vfe-170.c
4 *
5 * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v170
6 *
7 * Copyright (C) 2020-2021 Linaro Ltd.
8 */
9
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13
14 #include "camss.h"
15 #include "camss-vfe.h"
16
17 #define VFE_GLOBAL_RESET_CMD (0x018)
18 #define GLOBAL_RESET_CMD_CORE BIT(0)
19 #define GLOBAL_RESET_CMD_CAMIF BIT(1)
20 #define GLOBAL_RESET_CMD_BUS BIT(2)
21 #define GLOBAL_RESET_CMD_BUS_BDG BIT(3)
22 #define GLOBAL_RESET_CMD_REGISTER BIT(4)
23 #define GLOBAL_RESET_CMD_PM BIT(5)
24 #define GLOBAL_RESET_CMD_BUS_MISR BIT(6)
25 #define GLOBAL_RESET_CMD_TESTGEN BIT(7)
26 #define GLOBAL_RESET_CMD_DSP BIT(8)
27 #define GLOBAL_RESET_CMD_IDLE_CGC BIT(9)
28 #define GLOBAL_RESET_CMD_RDI0 BIT(10)
29 #define GLOBAL_RESET_CMD_RDI1 BIT(11)
30 #define GLOBAL_RESET_CMD_RDI2 BIT(12)
31 #define GLOBAL_RESET_CMD_RDI3 BIT(13)
32 #define GLOBAL_RESET_CMD_VFE_DOMAIN BIT(30)
33 #define GLOBAL_RESET_CMD_RESET_BYPASS BIT(31)
34
35 #define VFE_CORE_CFG (0x050)
36 #define CFG_PIXEL_PATTERN_YCBYCR (0x4)
37 #define CFG_PIXEL_PATTERN_YCRYCB (0x5)
38 #define CFG_PIXEL_PATTERN_CBYCRY (0x6)
39 #define CFG_PIXEL_PATTERN_CRYCBY (0x7)
40 #define CFG_COMPOSITE_REG_UPDATE_EN BIT(4)
41
42 #define VFE_IRQ_CMD (0x058)
43 #define CMD_GLOBAL_CLEAR BIT(0)
44
45 #define VFE_IRQ_MASK_0 (0x05c)
46 #define MASK_0_CAMIF_SOF BIT(0)
47 #define MASK_0_CAMIF_EOF BIT(1)
48 #define MASK_0_RDI_REG_UPDATE(n) BIT((n) + 5)
49 #define MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
50 #define MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
51 #define MASK_0_RESET_ACK BIT(31)
52
53 #define VFE_IRQ_MASK_1 (0x060)
54 #define MASK_1_CAMIF_ERROR BIT(0)
55 #define MASK_1_VIOLATION BIT(7)
56 #define MASK_1_BUS_BDG_HALT_ACK BIT(8)
57 #define MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9)
58 #define MASK_1_RDI_SOF(n) BIT((n) + 29)
59
60 #define VFE_IRQ_CLEAR_0 (0x064)
61 #define VFE_IRQ_CLEAR_1 (0x068)
62
63 #define VFE_IRQ_STATUS_0 (0x06c)
64 #define STATUS_0_CAMIF_SOF BIT(0)
65 #define STATUS_0_RDI_REG_UPDATE(n) BIT((n) + 5)
66 #define STATUS_0_IMAGE_MASTER_PING_PONG(n) BIT((n) + 8)
67 #define STATUS_0_IMAGE_COMPOSITE_DONE(n) BIT((n) + 25)
68 #define STATUS_0_RESET_ACK BIT(31)
69
70 #define VFE_IRQ_STATUS_1 (0x070)
71 #define STATUS_1_VIOLATION BIT(7)
72 #define STATUS_1_BUS_BDG_HALT_ACK BIT(8)
73 #define STATUS_1_RDI_SOF(n) BIT((n) + 27)
74
75 #define VFE_VIOLATION_STATUS (0x07c)
76
77 #define VFE_CAMIF_CMD (0x478)
78 #define CMD_CLEAR_CAMIF_STATUS BIT(2)
79
80 #define VFE_CAMIF_CFG (0x47c)
81 #define CFG_VSYNC_SYNC_EDGE (0)
82 #define VSYNC_ACTIVE_HIGH (0)
83 #define VSYNC_ACTIVE_LOW (1)
84 #define CFG_HSYNC_SYNC_EDGE (1)
85 #define HSYNC_ACTIVE_HIGH (0)
86 #define HSYNC_ACTIVE_LOW (1)
87 #define CFG_VFE_SUBSAMPLE_ENABLE BIT(4)
88 #define CFG_BUS_SUBSAMPLE_ENABLE BIT(5)
89 #define CFG_VFE_OUTPUT_EN BIT(6)
90 #define CFG_BUS_OUTPUT_EN BIT(7)
91 #define CFG_BINNING_EN BIT(9)
92 #define CFG_FRAME_BASED_EN BIT(10)
93 #define CFG_RAW_CROP_EN BIT(22)
94
95 #define VFE_REG_UPDATE_CMD (0x4ac)
96 #define REG_UPDATE_RDI(n) BIT(1 + (n))
97
98 #define VFE_BUS_IRQ_MASK(n) (0x2044 + (n) * 4)
99 #define VFE_BUS_IRQ_CLEAR(n) (0x2050 + (n) * 4)
100 #define VFE_BUS_IRQ_STATUS(n) (0x205c + (n) * 4)
101 #define STATUS0_COMP_RESET_DONE BIT(0)
102 #define STATUS0_COMP_REG_UPDATE0_DONE BIT(1)
103 #define STATUS0_COMP_REG_UPDATE1_DONE BIT(2)
104 #define STATUS0_COMP_REG_UPDATE2_DONE BIT(3)
105 #define STATUS0_COMP_REG_UPDATE3_DONE BIT(4)
106 #define STATUS0_COMP_REG_UPDATE_DONE(n) BIT((n) + 1)
107 #define STATUS0_COMP0_BUF_DONE BIT(5)
108 #define STATUS0_COMP1_BUF_DONE BIT(6)
109 #define STATUS0_COMP2_BUF_DONE BIT(7)
110 #define STATUS0_COMP3_BUF_DONE BIT(8)
111 #define STATUS0_COMP4_BUF_DONE BIT(9)
112 #define STATUS0_COMP5_BUF_DONE BIT(10)
113 #define STATUS0_COMP_BUF_DONE(n) BIT((n) + 5)
114 #define STATUS0_COMP_ERROR BIT(11)
115 #define STATUS0_COMP_OVERWRITE BIT(12)
116 #define STATUS0_OVERFLOW BIT(13)
117 #define STATUS0_VIOLATION BIT(14)
118 /* WM_CLIENT_BUF_DONE defined for buffers 0:19 */
119 #define STATUS1_WM_CLIENT_BUF_DONE(n) BIT(n)
120 #define STATUS1_EARLY_DONE BIT(24)
121 #define STATUS2_DUAL_COMP0_BUF_DONE BIT(0)
122 #define STATUS2_DUAL_COMP1_BUF_DONE BIT(1)
123 #define STATUS2_DUAL_COMP2_BUF_DONE BIT(2)
124 #define STATUS2_DUAL_COMP3_BUF_DONE BIT(3)
125 #define STATUS2_DUAL_COMP4_BUF_DONE BIT(4)
126 #define STATUS2_DUAL_COMP5_BUF_DONE BIT(5)
127 #define STATUS2_DUAL_COMP_BUF_DONE(n) BIT(n)
128 #define STATUS2_DUAL_COMP_ERROR BIT(6)
129 #define STATUS2_DUAL_COMP_OVERWRITE BIT(7)
130
131 #define VFE_BUS_IRQ_CLEAR_GLOBAL (0x2068)
132
133 #define VFE_BUS_WM_DEBUG_STATUS_CFG (0x226c)
134 #define DEBUG_STATUS_CFG_STATUS0(n) BIT(n)
135 #define DEBUG_STATUS_CFG_STATUS1(n) BIT(8 + (n))
136
137 #define VFE_BUS_WM_ADDR_SYNC_FRAME_HEADER (0x2080)
138
139 #define VFE_BUS_WM_ADDR_SYNC_NO_SYNC (0x2084)
140 #define BUS_VER2_MAX_CLIENTS (24)
141 #define WM_ADDR_NO_SYNC_DEFAULT_VAL \
142 ((1 << BUS_VER2_MAX_CLIENTS) - 1)
143
144 #define VFE_BUS_WM_CGC_OVERRIDE (0x200c)
145 #define WM_CGC_OVERRIDE_ALL (0xFFFFF)
146
147 #define VFE_BUS_WM_TEST_BUS_CTRL (0x211c)
148
149 #define VFE_BUS_WM_STATUS0(n) (0x2200 + (n) * 0x100)
150 #define VFE_BUS_WM_STATUS1(n) (0x2204 + (n) * 0x100)
151 #define VFE_BUS_WM_CFG(n) (0x2208 + (n) * 0x100)
152 #define WM_CFG_EN (0)
153 #define WM_CFG_MODE (1)
154 #define MODE_QCOM_PLAIN (0)
155 #define MODE_MIPI_RAW (1)
156 #define WM_CFG_VIRTUALFRAME (2)
157 #define VFE_BUS_WM_HEADER_ADDR(n) (0x220c + (n) * 0x100)
158 #define VFE_BUS_WM_HEADER_CFG(n) (0x2210 + (n) * 0x100)
159 #define VFE_BUS_WM_IMAGE_ADDR(n) (0x2214 + (n) * 0x100)
160 #define VFE_BUS_WM_IMAGE_ADDR_OFFSET(n) (0x2218 + (n) * 0x100)
161 #define VFE_BUS_WM_BUFFER_WIDTH_CFG(n) (0x221c + (n) * 0x100)
162 #define WM_BUFFER_DEFAULT_WIDTH (0xFF01)
163
164 #define VFE_BUS_WM_BUFFER_HEIGHT_CFG(n) (0x2220 + (n) * 0x100)
165 #define VFE_BUS_WM_PACKER_CFG(n) (0x2224 + (n) * 0x100)
166
167 #define VFE_BUS_WM_STRIDE(n) (0x2228 + (n) * 0x100)
168 #define WM_STRIDE_DEFAULT_STRIDE (0xFF01)
169
170 #define VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(n) (0x2248 + (n) * 0x100)
171 #define VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(n) (0x224c + (n) * 0x100)
172 #define VFE_BUS_WM_FRAMEDROP_PERIOD(n) (0x2250 + (n) * 0x100)
173 #define VFE_BUS_WM_FRAMEDROP_PATTERN(n) (0x2254 + (n) * 0x100)
174 #define VFE_BUS_WM_FRAME_INC(n) (0x2258 + (n) * 0x100)
175 #define VFE_BUS_WM_BURST_LIMIT(n) (0x225c + (n) * 0x100)
176
vfe_reg_set(struct vfe_device * vfe,u32 reg,u32 set_bits)177 static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
178 {
179 u32 bits = readl_relaxed(vfe->base + reg);
180
181 writel_relaxed(bits | set_bits, vfe->base + reg);
182 }
183
vfe_global_reset(struct vfe_device * vfe)184 static void vfe_global_reset(struct vfe_device *vfe)
185 {
186 u32 reset_bits = GLOBAL_RESET_CMD_CORE |
187 GLOBAL_RESET_CMD_CAMIF |
188 GLOBAL_RESET_CMD_BUS |
189 GLOBAL_RESET_CMD_BUS_BDG |
190 GLOBAL_RESET_CMD_REGISTER |
191 GLOBAL_RESET_CMD_TESTGEN |
192 GLOBAL_RESET_CMD_DSP |
193 GLOBAL_RESET_CMD_IDLE_CGC |
194 GLOBAL_RESET_CMD_RDI0 |
195 GLOBAL_RESET_CMD_RDI1 |
196 GLOBAL_RESET_CMD_RDI2 |
197 GLOBAL_RESET_CMD_RDI3;
198
199 writel_relaxed(BIT(31), vfe->base + VFE_IRQ_MASK_0);
200
201 /* Make sure IRQ mask has been written before resetting */
202 wmb();
203
204 writel_relaxed(reset_bits, vfe->base + VFE_GLOBAL_RESET_CMD);
205 }
206
vfe_wm_start(struct vfe_device * vfe,u8 wm,struct vfe_line * line)207 static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
208 {
209 u32 val;
210
211 /*Set Debug Registers*/
212 val = DEBUG_STATUS_CFG_STATUS0(1) |
213 DEBUG_STATUS_CFG_STATUS0(7);
214 writel_relaxed(val, vfe->base + VFE_BUS_WM_DEBUG_STATUS_CFG);
215
216 /* BUS_WM_INPUT_IF_ADDR_SYNC_FRAME_HEADER */
217 writel_relaxed(0, vfe->base + VFE_BUS_WM_ADDR_SYNC_FRAME_HEADER);
218
219 /* no clock gating at bus input */
220 val = WM_CGC_OVERRIDE_ALL;
221 writel_relaxed(val, vfe->base + VFE_BUS_WM_CGC_OVERRIDE);
222
223 writel_relaxed(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL);
224
225 /* if addr_no_sync has default value then config the addr no sync reg */
226 val = WM_ADDR_NO_SYNC_DEFAULT_VAL;
227 writel_relaxed(val, vfe->base + VFE_BUS_WM_ADDR_SYNC_NO_SYNC);
228
229 writel_relaxed(0xf, vfe->base + VFE_BUS_WM_BURST_LIMIT(wm));
230
231 val = WM_BUFFER_DEFAULT_WIDTH;
232 writel_relaxed(val, vfe->base + VFE_BUS_WM_BUFFER_WIDTH_CFG(wm));
233
234 val = 0;
235 writel_relaxed(val, vfe->base + VFE_BUS_WM_BUFFER_HEIGHT_CFG(wm));
236
237 val = 0;
238 writel_relaxed(val, vfe->base + VFE_BUS_WM_PACKER_CFG(wm)); // XXX 1 for PLAIN8?
239
240 /* Configure stride for RDIs */
241 val = WM_STRIDE_DEFAULT_STRIDE;
242 writel_relaxed(val, vfe->base + VFE_BUS_WM_STRIDE(wm));
243
244 /* Enable WM */
245 val = 1 << WM_CFG_EN |
246 MODE_MIPI_RAW << WM_CFG_MODE;
247 writel_relaxed(val, vfe->base + VFE_BUS_WM_CFG(wm));
248 }
249
vfe_wm_stop(struct vfe_device * vfe,u8 wm)250 static void vfe_wm_stop(struct vfe_device *vfe, u8 wm)
251 {
252 /* Disable WM */
253 writel_relaxed(0, vfe->base + VFE_BUS_WM_CFG(wm));
254 }
255
vfe_wm_update(struct vfe_device * vfe,u8 wm,u32 addr,struct vfe_line * line)256 static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr,
257 struct vfe_line *line)
258 {
259 struct v4l2_pix_format_mplane *pix =
260 &line->video_out.active_fmt.fmt.pix_mp;
261 u32 stride = pix->plane_fmt[0].bytesperline;
262
263 writel_relaxed(addr, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
264 writel_relaxed(stride * pix->height, vfe->base + VFE_BUS_WM_FRAME_INC(wm));
265 }
266
vfe_reg_update(struct vfe_device * vfe,enum vfe_line_id line_id)267 static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
268 {
269 vfe->reg_update |= REG_UPDATE_RDI(line_id);
270
271 /* Enforce ordering between previous reg writes and reg update */
272 wmb();
273
274 writel_relaxed(vfe->reg_update, vfe->base + VFE_REG_UPDATE_CMD);
275
276 /* Enforce ordering between reg update and subsequent reg writes */
277 wmb();
278 }
279
vfe_reg_update_clear(struct vfe_device * vfe,enum vfe_line_id line_id)280 static inline void vfe_reg_update_clear(struct vfe_device *vfe,
281 enum vfe_line_id line_id)
282 {
283 vfe->reg_update &= ~REG_UPDATE_RDI(line_id);
284 }
285
vfe_enable_irq_common(struct vfe_device * vfe)286 static void vfe_enable_irq_common(struct vfe_device *vfe)
287 {
288 vfe_reg_set(vfe, VFE_IRQ_MASK_0, ~0u);
289 vfe_reg_set(vfe, VFE_IRQ_MASK_1, ~0u);
290
291 writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(0));
292 writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(1));
293 writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(2));
294 }
295
vfe_isr_halt_ack(struct vfe_device * vfe)296 static void vfe_isr_halt_ack(struct vfe_device *vfe)
297 {
298 complete(&vfe->halt_complete);
299 }
300
vfe_isr_read(struct vfe_device * vfe,u32 * status0,u32 * status1)301 static void vfe_isr_read(struct vfe_device *vfe, u32 *status0, u32 *status1)
302 {
303 *status0 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_0);
304 *status1 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_1);
305
306 writel_relaxed(*status0, vfe->base + VFE_IRQ_CLEAR_0);
307 writel_relaxed(*status1, vfe->base + VFE_IRQ_CLEAR_1);
308
309 /* Enforce ordering between IRQ Clear and Global IRQ Clear */
310 wmb();
311 writel_relaxed(CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD);
312 }
313
vfe_violation_read(struct vfe_device * vfe)314 static void vfe_violation_read(struct vfe_device *vfe)
315 {
316 u32 violation = readl_relaxed(vfe->base + VFE_VIOLATION_STATUS);
317
318 pr_err_ratelimited("VFE: violation = 0x%08x\n", violation);
319 }
320
321 /*
322 * vfe_isr - VFE module interrupt handler
323 * @irq: Interrupt line
324 * @dev: VFE device
325 *
326 * Return IRQ_HANDLED on success
327 */
vfe_isr(int irq,void * dev)328 static irqreturn_t vfe_isr(int irq, void *dev)
329 {
330 struct vfe_device *vfe = dev;
331 u32 status0, status1, vfe_bus_status[VFE_LINE_NUM_MAX];
332 int i, wm;
333
334 status0 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_0);
335 status1 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_1);
336
337 writel_relaxed(status0, vfe->base + VFE_IRQ_CLEAR_0);
338 writel_relaxed(status1, vfe->base + VFE_IRQ_CLEAR_1);
339
340 for (i = VFE_LINE_RDI0; i < vfe->res->line_num; i++) {
341 vfe_bus_status[i] = readl_relaxed(vfe->base + VFE_BUS_IRQ_STATUS(i));
342 writel_relaxed(vfe_bus_status[i], vfe->base + VFE_BUS_IRQ_CLEAR(i));
343 }
344
345 /* Enforce ordering between IRQ reading and interpretation */
346 wmb();
347
348 writel_relaxed(CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD);
349 writel_relaxed(1, vfe->base + VFE_BUS_IRQ_CLEAR_GLOBAL);
350
351 if (status0 & STATUS_0_RESET_ACK)
352 vfe->isr_ops.reset_ack(vfe);
353
354 for (i = VFE_LINE_RDI0; i < vfe->res->line_num; i++)
355 if (status0 & STATUS_0_RDI_REG_UPDATE(i))
356 vfe->isr_ops.reg_update(vfe, i);
357
358 for (i = VFE_LINE_RDI0; i < vfe->res->line_num; i++)
359 if (status0 & STATUS_1_RDI_SOF(i))
360 vfe->isr_ops.sof(vfe, i);
361
362 for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
363 if (vfe_bus_status[0] & STATUS0_COMP_BUF_DONE(i))
364 vfe->isr_ops.comp_done(vfe, i);
365
366 for (wm = 0; wm < MSM_VFE_IMAGE_MASTERS_NUM; wm++)
367 if (status0 & BIT(9))
368 if (vfe_bus_status[1] & STATUS1_WM_CLIENT_BUF_DONE(wm))
369 vfe->isr_ops.wm_done(vfe, wm);
370
371 return IRQ_HANDLED;
372 }
373
374 /*
375 * vfe_halt - Trigger halt on VFE module and wait to complete
376 * @vfe: VFE device
377 *
378 * Return 0 on success or a negative error code otherwise
379 */
vfe_halt(struct vfe_device * vfe)380 static int vfe_halt(struct vfe_device *vfe)
381 {
382 /* rely on vfe_disable_output() to stop the VFE */
383 return 0;
384 }
385
vfe_get_output(struct vfe_line * line)386 static int vfe_get_output(struct vfe_line *line)
387 {
388 struct vfe_device *vfe = to_vfe(line);
389 struct vfe_output *output;
390 unsigned long flags;
391 int wm_idx;
392
393 spin_lock_irqsave(&vfe->output_lock, flags);
394
395 output = &line->output;
396 if (output->state > VFE_OUTPUT_RESERVED) {
397 dev_err(vfe->camss->dev, "Output is running\n");
398 goto error;
399 }
400
401 output->wm_num = 1;
402
403 wm_idx = vfe_reserve_wm(vfe, line->id);
404 if (wm_idx < 0) {
405 dev_err(vfe->camss->dev, "Can not reserve wm\n");
406 goto error_get_wm;
407 }
408 output->wm_idx[0] = wm_idx;
409
410 output->drop_update_idx = 0;
411
412 spin_unlock_irqrestore(&vfe->output_lock, flags);
413
414 return 0;
415
416 error_get_wm:
417 vfe_release_wm(vfe, output->wm_idx[0]);
418 output->state = VFE_OUTPUT_OFF;
419 error:
420 spin_unlock_irqrestore(&vfe->output_lock, flags);
421
422 return -EINVAL;
423 }
424
425 /*
426 * vfe_enable - Enable streaming on VFE line
427 * @line: VFE line
428 *
429 * Return 0 on success or a negative error code otherwise
430 */
vfe_enable(struct vfe_line * line)431 static int vfe_enable(struct vfe_line *line)
432 {
433 struct vfe_device *vfe = to_vfe(line);
434 int ret;
435
436 mutex_lock(&vfe->stream_lock);
437
438 if (!vfe->stream_count)
439 vfe_enable_irq_common(vfe);
440
441 vfe->stream_count++;
442
443 mutex_unlock(&vfe->stream_lock);
444
445 ret = vfe_get_output(line);
446 if (ret < 0)
447 goto error_get_output;
448
449 ret = vfe_enable_output_v2(line);
450 if (ret < 0)
451 goto error_enable_output;
452
453 vfe->was_streaming = 1;
454
455 return 0;
456
457 error_enable_output:
458 vfe_put_output(line);
459
460 error_get_output:
461 mutex_lock(&vfe->stream_lock);
462
463 vfe->stream_count--;
464
465 mutex_unlock(&vfe->stream_lock);
466
467 return ret;
468 }
469
470 /*
471 * vfe_isr_sof - Process start of frame interrupt
472 * @vfe: VFE Device
473 * @line_id: VFE line
474 */
vfe_isr_sof(struct vfe_device * vfe,enum vfe_line_id line_id)475 static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id)
476 {
477 /* nop */
478 }
479
480 /*
481 * vfe_isr_reg_update - Process reg update interrupt
482 * @vfe: VFE Device
483 * @line_id: VFE line
484 */
vfe_isr_reg_update(struct vfe_device * vfe,enum vfe_line_id line_id)485 static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
486 {
487 struct vfe_output *output;
488 unsigned long flags;
489
490 spin_lock_irqsave(&vfe->output_lock, flags);
491 vfe->res->hw_ops->reg_update_clear(vfe, line_id);
492
493 output = &vfe->line[line_id].output;
494
495 if (output->wait_reg_update) {
496 output->wait_reg_update = 0;
497 complete(&output->reg_update);
498 }
499
500 spin_unlock_irqrestore(&vfe->output_lock, flags);
501 }
502
503 /*
504 * vfe_isr_wm_done - Process write master done interrupt
505 * @vfe: VFE Device
506 * @wm: Write master id
507 */
vfe_isr_wm_done(struct vfe_device * vfe,u8 wm)508 static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
509 {
510 struct vfe_line *line = &vfe->line[vfe->wm_output_map[wm]];
511 struct camss_buffer *ready_buf;
512 struct vfe_output *output;
513 unsigned long flags;
514 u32 index;
515 u64 ts = ktime_get_ns();
516
517 spin_lock_irqsave(&vfe->output_lock, flags);
518
519 if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
520 dev_err_ratelimited(vfe->camss->dev,
521 "Received wm done for unmapped index\n");
522 goto out_unlock;
523 }
524 output = &vfe->line[vfe->wm_output_map[wm]].output;
525
526 ready_buf = output->buf[0];
527 if (!ready_buf) {
528 dev_err_ratelimited(vfe->camss->dev,
529 "Missing ready buf %d!\n", output->state);
530 goto out_unlock;
531 }
532
533 ready_buf->vb.vb2_buf.timestamp = ts;
534 ready_buf->vb.sequence = output->sequence++;
535
536 index = 0;
537 output->buf[0] = output->buf[1];
538 if (output->buf[0])
539 index = 1;
540
541 output->buf[index] = vfe_buf_get_pending(output);
542
543 if (output->buf[index])
544 vfe_wm_update(vfe, output->wm_idx[0], output->buf[index]->addr[0], line);
545 else
546 output->gen2.active_num--;
547
548 spin_unlock_irqrestore(&vfe->output_lock, flags);
549
550 vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
551
552 return;
553
554 out_unlock:
555 spin_unlock_irqrestore(&vfe->output_lock, flags);
556 }
557
558 static const struct vfe_isr_ops vfe_isr_ops_170 = {
559 .reset_ack = vfe_isr_reset_ack,
560 .halt_ack = vfe_isr_halt_ack,
561 .reg_update = vfe_isr_reg_update,
562 .sof = vfe_isr_sof,
563 .comp_done = vfe_isr_comp_done,
564 .wm_done = vfe_isr_wm_done,
565 };
566
567 static const struct camss_video_ops vfe_video_ops_170 = {
568 .queue_buffer = vfe_queue_buffer_v2,
569 .flush_buffers = vfe_flush_buffers,
570 };
571
vfe_subdev_init(struct device * dev,struct vfe_device * vfe)572 static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
573 {
574 vfe->isr_ops = vfe_isr_ops_170;
575 vfe->video_ops = vfe_video_ops_170;
576 }
577
578 const struct vfe_hw_ops vfe_ops_170 = {
579 .global_reset = vfe_global_reset,
580 .hw_version = vfe_hw_version,
581 .isr_read = vfe_isr_read,
582 .isr = vfe_isr,
583 .pm_domain_off = vfe_pm_domain_off,
584 .pm_domain_on = vfe_pm_domain_on,
585 .reg_update_clear = vfe_reg_update_clear,
586 .reg_update = vfe_reg_update,
587 .subdev_init = vfe_subdev_init,
588 .vfe_disable = vfe_disable,
589 .vfe_enable = vfe_enable,
590 .vfe_halt = vfe_halt,
591 .violation_read = vfe_violation_read,
592 .vfe_wm_start = vfe_wm_start,
593 .vfe_wm_stop = vfe_wm_stop,
594 .vfe_wm_update = vfe_wm_update,
595 };
596