1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
3 */
4
5 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
6 #include "dpu_encoder_phys.h"
7 #include "dpu_hw_interrupts.h"
8 #include "dpu_hw_merge3d.h"
9 #include "dpu_core_irq.h"
10 #include "dpu_formats.h"
11 #include "dpu_trace.h"
12 #include "disp/msm_disp_snapshot.h"
13
14 #include <drm/drm_managed.h>
15
16 #define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
17 (e) && (e)->parent ? \
18 (e)->parent->base.id : -1, \
19 (e) && (e)->hw_intf ? \
20 (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
21
22 #define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
23 (e) && (e)->parent ? \
24 (e)->parent->base.id : -1, \
25 (e) && (e)->hw_intf ? \
26 (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
27
28 #define to_dpu_encoder_phys_vid(x) \
29 container_of(x, struct dpu_encoder_phys_vid, base)
30
dpu_encoder_phys_vid_is_master(struct dpu_encoder_phys * phys_enc)31 static bool dpu_encoder_phys_vid_is_master(
32 struct dpu_encoder_phys *phys_enc)
33 {
34 bool ret = false;
35
36 if (phys_enc->split_role != ENC_ROLE_SLAVE)
37 ret = true;
38
39 return ret;
40 }
41
drm_mode_to_intf_timing_params(const struct dpu_encoder_phys * phys_enc,const struct drm_display_mode * mode,struct dpu_hw_intf_timing_params * timing)42 static void drm_mode_to_intf_timing_params(
43 const struct dpu_encoder_phys *phys_enc,
44 const struct drm_display_mode *mode,
45 struct dpu_hw_intf_timing_params *timing)
46 {
47 memset(timing, 0, sizeof(*timing));
48
49 if ((mode->htotal < mode->hsync_end)
50 || (mode->hsync_start < mode->hdisplay)
51 || (mode->vtotal < mode->vsync_end)
52 || (mode->vsync_start < mode->vdisplay)
53 || (mode->hsync_end < mode->hsync_start)
54 || (mode->vsync_end < mode->vsync_start)) {
55 DPU_ERROR(
56 "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
57 mode->hsync_start, mode->hsync_end,
58 mode->htotal, mode->hdisplay);
59 DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
60 mode->vsync_start, mode->vsync_end,
61 mode->vtotal, mode->vdisplay);
62 return;
63 }
64
65 /*
66 * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
67 * Active Region Front Porch Sync Back Porch
68 * <-----------------><------------><-----><----------->
69 * <- [hv]display --->
70 * <--------- [hv]sync_start ------>
71 * <----------------- [hv]sync_end ------->
72 * <---------------------------- [hv]total ------------->
73 */
74 timing->width = mode->hdisplay; /* active width */
75 timing->height = mode->vdisplay; /* active height */
76 timing->xres = timing->width;
77 timing->yres = timing->height;
78 timing->h_back_porch = mode->htotal - mode->hsync_end;
79 timing->h_front_porch = mode->hsync_start - mode->hdisplay;
80 timing->v_back_porch = mode->vtotal - mode->vsync_end;
81 timing->v_front_porch = mode->vsync_start - mode->vdisplay;
82 timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
83 timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
84 timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
85 timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
86 timing->border_clr = 0;
87 timing->underflow_clr = 0xff;
88 timing->hsync_skew = mode->hskew;
89
90 /* DSI controller cannot handle active-low sync signals. */
91 if (phys_enc->hw_intf->cap->type == INTF_DSI) {
92 timing->hsync_polarity = 0;
93 timing->vsync_polarity = 0;
94 }
95
96 /* for DP/EDP, Shift timings to align it to bottom right */
97 if (phys_enc->hw_intf->cap->type == INTF_DP) {
98 timing->h_back_porch += timing->h_front_porch;
99 timing->h_front_porch = 0;
100 timing->v_back_porch += timing->v_front_porch;
101 timing->v_front_porch = 0;
102 }
103
104 timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
105
106 /*
107 * for DP, divide the horizonal parameters by 2 when
108 * widebus is enabled
109 */
110 if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) {
111 timing->width = timing->width >> 1;
112 timing->xres = timing->xres >> 1;
113 timing->h_back_porch = timing->h_back_porch >> 1;
114 timing->h_front_porch = timing->h_front_porch >> 1;
115 timing->hsync_pulse_width = timing->hsync_pulse_width >> 1;
116 }
117 }
118
get_horizontal_total(const struct dpu_hw_intf_timing_params * timing)119 static u32 get_horizontal_total(const struct dpu_hw_intf_timing_params *timing)
120 {
121 u32 active = timing->xres;
122 u32 inactive =
123 timing->h_back_porch + timing->h_front_porch +
124 timing->hsync_pulse_width;
125 return active + inactive;
126 }
127
get_vertical_total(const struct dpu_hw_intf_timing_params * timing)128 static u32 get_vertical_total(const struct dpu_hw_intf_timing_params *timing)
129 {
130 u32 active = timing->yres;
131 u32 inactive =
132 timing->v_back_porch + timing->v_front_porch +
133 timing->vsync_pulse_width;
134 return active + inactive;
135 }
136
137 /*
138 * programmable_fetch_get_num_lines:
139 * Number of fetch lines in vertical front porch
140 * @timing: Pointer to the intf timing information for the requested mode
141 *
142 * Returns the number of fetch lines in vertical front porch at which mdp
143 * can start fetching the next frame.
144 *
145 * Number of needed prefetch lines is anything that cannot be absorbed in the
146 * start of frame time (back porch + vsync pulse width).
147 *
148 * Some panels have very large VFP, however we only need a total number of
149 * lines based on the chip worst case latencies.
150 */
programmable_fetch_get_num_lines(struct dpu_encoder_phys * phys_enc,const struct dpu_hw_intf_timing_params * timing)151 static u32 programmable_fetch_get_num_lines(
152 struct dpu_encoder_phys *phys_enc,
153 const struct dpu_hw_intf_timing_params *timing)
154 {
155 u32 worst_case_needed_lines =
156 phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
157 u32 start_of_frame_lines =
158 timing->v_back_porch + timing->vsync_pulse_width;
159 u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
160 u32 actual_vfp_lines = 0;
161
162 /* Fetch must be outside active lines, otherwise undefined. */
163 if (start_of_frame_lines >= worst_case_needed_lines) {
164 DPU_DEBUG_VIDENC(phys_enc,
165 "prog fetch is not needed, large vbp+vsw\n");
166 actual_vfp_lines = 0;
167 } else if (timing->v_front_porch < needed_vfp_lines) {
168 /* Warn fetch needed, but not enough porch in panel config */
169 pr_warn_once
170 ("low vbp+vfp may lead to perf issues in some cases\n");
171 DPU_DEBUG_VIDENC(phys_enc,
172 "less vfp than fetch req, using entire vfp\n");
173 actual_vfp_lines = timing->v_front_porch;
174 } else {
175 DPU_DEBUG_VIDENC(phys_enc, "room in vfp for needed prefetch\n");
176 actual_vfp_lines = needed_vfp_lines;
177 }
178
179 DPU_DEBUG_VIDENC(phys_enc,
180 "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
181 timing->v_front_porch, timing->v_back_porch,
182 timing->vsync_pulse_width);
183 DPU_DEBUG_VIDENC(phys_enc,
184 "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
185 worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
186
187 return actual_vfp_lines;
188 }
189
190 /*
191 * programmable_fetch_config: Programs HW to prefetch lines by offsetting
192 * the start of fetch into the vertical front porch for cases where the
193 * vsync pulse width and vertical back porch time is insufficient
194 *
195 * Gets # of lines to pre-fetch, then calculate VSYNC counter value.
196 * HW layer requires VSYNC counter of first pixel of tgt VFP line.
197 *
198 * @timing: Pointer to the intf timing information for the requested mode
199 */
programmable_fetch_config(struct dpu_encoder_phys * phys_enc,const struct dpu_hw_intf_timing_params * timing)200 static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
201 const struct dpu_hw_intf_timing_params *timing)
202 {
203 struct dpu_hw_intf_prog_fetch f = { 0 };
204 u32 vfp_fetch_lines = 0;
205 u32 horiz_total = 0;
206 u32 vert_total = 0;
207 u32 vfp_fetch_start_vsync_counter = 0;
208 unsigned long lock_flags;
209
210 if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch))
211 return;
212
213 vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing);
214 if (vfp_fetch_lines) {
215 vert_total = get_vertical_total(timing);
216 horiz_total = get_horizontal_total(timing);
217 vfp_fetch_start_vsync_counter =
218 (vert_total - vfp_fetch_lines) * horiz_total + 1;
219 f.enable = 1;
220 f.fetch_start = vfp_fetch_start_vsync_counter;
221 }
222
223 DPU_DEBUG_VIDENC(phys_enc,
224 "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
225 vfp_fetch_lines, vfp_fetch_start_vsync_counter);
226
227 spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
228 phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f);
229 spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
230 }
231
dpu_encoder_phys_vid_setup_timing_engine(struct dpu_encoder_phys * phys_enc)232 static void dpu_encoder_phys_vid_setup_timing_engine(
233 struct dpu_encoder_phys *phys_enc)
234 {
235 struct drm_display_mode mode;
236 struct dpu_hw_intf_timing_params timing_params = { 0 };
237 const struct dpu_format *fmt = NULL;
238 u32 fmt_fourcc = DRM_FORMAT_RGB888;
239 unsigned long lock_flags;
240 struct dpu_hw_intf_cfg intf_cfg = { 0 };
241
242 drm_mode_init(&mode, &phys_enc->cached_mode);
243
244 if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
245 DPU_ERROR("invalid encoder %d\n", phys_enc != NULL);
246 return;
247 }
248
249 if (!phys_enc->hw_intf->ops.setup_timing_gen) {
250 DPU_ERROR("timing engine setup is not supported\n");
251 return;
252 }
253
254 DPU_DEBUG_VIDENC(phys_enc, "enabling mode:\n");
255 drm_mode_debug_printmodeline(&mode);
256
257 if (phys_enc->split_role != ENC_ROLE_SOLO) {
258 mode.hdisplay >>= 1;
259 mode.htotal >>= 1;
260 mode.hsync_start >>= 1;
261 mode.hsync_end >>= 1;
262
263 DPU_DEBUG_VIDENC(phys_enc,
264 "split_role %d, halve horizontal %d %d %d %d\n",
265 phys_enc->split_role,
266 mode.hdisplay, mode.htotal,
267 mode.hsync_start, mode.hsync_end);
268 }
269
270 drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
271
272 fmt = dpu_get_dpu_format(fmt_fourcc);
273 DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
274
275 intf_cfg.intf = phys_enc->hw_intf->idx;
276 intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
277 intf_cfg.stream_sel = 0; /* Don't care value for video mode */
278 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
279 intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
280 if (phys_enc->hw_pp->merge_3d)
281 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
282
283 spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
284 phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
285 &timing_params, fmt);
286 phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
287
288 /* setup which pp blk will connect to this intf */
289 if (phys_enc->hw_intf->ops.bind_pingpong_blk)
290 phys_enc->hw_intf->ops.bind_pingpong_blk(
291 phys_enc->hw_intf,
292 phys_enc->hw_pp->idx);
293
294 if (phys_enc->hw_pp->merge_3d)
295 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, intf_cfg.mode_3d);
296
297 spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
298
299 programmable_fetch_config(phys_enc, &timing_params);
300 }
301
dpu_encoder_phys_vid_vblank_irq(void * arg)302 static void dpu_encoder_phys_vid_vblank_irq(void *arg)
303 {
304 struct dpu_encoder_phys *phys_enc = arg;
305 struct dpu_hw_ctl *hw_ctl;
306 unsigned long lock_flags;
307 u32 flush_register = 0;
308
309 hw_ctl = phys_enc->hw_ctl;
310
311 DPU_ATRACE_BEGIN("vblank_irq");
312
313 dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
314
315 atomic_read(&phys_enc->pending_kickoff_cnt);
316
317 /*
318 * only decrement the pending flush count if we've actually flushed
319 * hardware. due to sw irq latency, vblank may have already happened
320 * so we need to double-check with hw that it accepted the flush bits
321 */
322 spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
323 if (hw_ctl->ops.get_flush_register)
324 flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
325
326 if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
327 atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
328 spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
329
330 /* Signal any waiting atomic commit thread */
331 wake_up_all(&phys_enc->pending_kickoff_wq);
332
333 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
334 DPU_ENCODER_FRAME_EVENT_DONE);
335
336 DPU_ATRACE_END("vblank_irq");
337 }
338
dpu_encoder_phys_vid_underrun_irq(void * arg)339 static void dpu_encoder_phys_vid_underrun_irq(void *arg)
340 {
341 struct dpu_encoder_phys *phys_enc = arg;
342
343 dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
344 }
345
dpu_encoder_phys_vid_needs_single_flush(struct dpu_encoder_phys * phys_enc)346 static bool dpu_encoder_phys_vid_needs_single_flush(
347 struct dpu_encoder_phys *phys_enc)
348 {
349 return phys_enc->split_role != ENC_ROLE_SOLO;
350 }
351
dpu_encoder_phys_vid_atomic_mode_set(struct dpu_encoder_phys * phys_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)352 static void dpu_encoder_phys_vid_atomic_mode_set(
353 struct dpu_encoder_phys *phys_enc,
354 struct drm_crtc_state *crtc_state,
355 struct drm_connector_state *conn_state)
356 {
357 phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
358
359 phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
360 }
361
dpu_encoder_phys_vid_control_vblank_irq(struct dpu_encoder_phys * phys_enc,bool enable)362 static int dpu_encoder_phys_vid_control_vblank_irq(
363 struct dpu_encoder_phys *phys_enc,
364 bool enable)
365 {
366 int ret = 0;
367 int refcount;
368
369 mutex_lock(&phys_enc->vblank_ctl_lock);
370 refcount = phys_enc->vblank_refcount;
371
372 /* Slave encoders don't report vblank */
373 if (!dpu_encoder_phys_vid_is_master(phys_enc))
374 goto end;
375
376 /* protect against negative */
377 if (!enable && refcount == 0) {
378 ret = -EINVAL;
379 goto end;
380 }
381
382 DRM_DEBUG_VBL("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
383 refcount);
384
385 if (enable) {
386 if (phys_enc->vblank_refcount == 0)
387 ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
388 phys_enc->irq[INTR_IDX_VSYNC],
389 dpu_encoder_phys_vid_vblank_irq,
390 phys_enc);
391 if (!ret)
392 phys_enc->vblank_refcount++;
393 } else if (!enable) {
394 if (phys_enc->vblank_refcount == 1)
395 ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
396 phys_enc->irq[INTR_IDX_VSYNC]);
397 if (!ret)
398 phys_enc->vblank_refcount--;
399 }
400
401 end:
402 mutex_unlock(&phys_enc->vblank_ctl_lock);
403 if (ret) {
404 DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
405 DRMID(phys_enc->parent),
406 phys_enc->hw_intf->idx - INTF_0, ret, enable,
407 refcount);
408 }
409 return ret;
410 }
411
dpu_encoder_phys_vid_enable(struct dpu_encoder_phys * phys_enc)412 static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
413 {
414 struct dpu_hw_ctl *ctl;
415
416 ctl = phys_enc->hw_ctl;
417
418 DPU_DEBUG_VIDENC(phys_enc, "\n");
419
420 if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
421 return;
422
423 dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
424
425 dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
426
427 /*
428 * For single flush cases (dual-ctl or pp-split), skip setting the
429 * flush bit for the slave intf, since both intfs use same ctl
430 * and HW will only flush the master.
431 */
432 if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
433 !dpu_encoder_phys_vid_is_master(phys_enc))
434 goto skip_flush;
435
436 ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
437 if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
438 ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx);
439
440 skip_flush:
441 DPU_DEBUG_VIDENC(phys_enc,
442 "update pending flush ctl %d intf %d\n",
443 ctl->idx - CTL_0, phys_enc->hw_intf->idx);
444
445 atomic_set(&phys_enc->underrun_cnt, 0);
446
447 /* ctl_flush & timing engine enable will be triggered by framework */
448 if (phys_enc->enable_state == DPU_ENC_DISABLED)
449 phys_enc->enable_state = DPU_ENC_ENABLING;
450 }
451
dpu_encoder_phys_vid_wait_for_tx_complete(struct dpu_encoder_phys * phys_enc)452 static int dpu_encoder_phys_vid_wait_for_tx_complete(
453 struct dpu_encoder_phys *phys_enc)
454 {
455 struct dpu_encoder_wait_info wait_info;
456 int ret;
457
458 wait_info.wq = &phys_enc->pending_kickoff_wq;
459 wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
460 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
461
462 if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
463 return 0;
464 }
465
466 /* Wait for kickoff to complete */
467 ret = dpu_encoder_helper_wait_for_irq(phys_enc,
468 phys_enc->irq[INTR_IDX_VSYNC],
469 dpu_encoder_phys_vid_vblank_irq,
470 &wait_info);
471
472 if (ret == -ETIMEDOUT) {
473 dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
474 }
475
476 return ret;
477 }
478
dpu_encoder_phys_vid_wait_for_commit_done(struct dpu_encoder_phys * phys_enc)479 static int dpu_encoder_phys_vid_wait_for_commit_done(
480 struct dpu_encoder_phys *phys_enc)
481 {
482 struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
483 int ret;
484
485 if (!hw_ctl)
486 return 0;
487
488 ret = wait_event_timeout(phys_enc->pending_kickoff_wq,
489 (hw_ctl->ops.get_flush_register(hw_ctl) == 0),
490 msecs_to_jiffies(50));
491 if (ret <= 0) {
492 DPU_ERROR("vblank timeout\n");
493 return -ETIMEDOUT;
494 }
495
496 return 0;
497 }
498
dpu_encoder_phys_vid_prepare_for_kickoff(struct dpu_encoder_phys * phys_enc)499 static void dpu_encoder_phys_vid_prepare_for_kickoff(
500 struct dpu_encoder_phys *phys_enc)
501 {
502 struct dpu_hw_ctl *ctl;
503 int rc;
504 struct drm_encoder *drm_enc;
505
506 drm_enc = phys_enc->parent;
507
508 ctl = phys_enc->hw_ctl;
509 if (!ctl->ops.wait_reset_status)
510 return;
511
512 /*
513 * hw supports hardware initiated ctl reset, so before we kickoff a new
514 * frame, need to check and wait for hw initiated ctl reset completion
515 */
516 rc = ctl->ops.wait_reset_status(ctl);
517 if (rc) {
518 DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n",
519 ctl->idx, rc);
520 msm_disp_snapshot_state(drm_enc->dev);
521 dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
522 phys_enc->irq[INTR_IDX_VSYNC]);
523 }
524 }
525
dpu_encoder_phys_vid_disable(struct dpu_encoder_phys * phys_enc)526 static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
527 {
528 unsigned long lock_flags;
529 int ret;
530 struct dpu_hw_intf_status intf_status = {0};
531
532 if (!phys_enc->parent || !phys_enc->parent->dev) {
533 DPU_ERROR("invalid encoder/device\n");
534 return;
535 }
536
537 if (!phys_enc->hw_intf) {
538 DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
539 phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL);
540 return;
541 }
542
543 if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
544 return;
545
546 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
547 DPU_ERROR("already disabled\n");
548 return;
549 }
550
551 spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
552 phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
553 if (dpu_encoder_phys_vid_is_master(phys_enc))
554 dpu_encoder_phys_inc_pending(phys_enc);
555 spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
556
557 /*
558 * Wait for a vsync so we know the ENABLE=0 latched before
559 * the (connector) source of the vsync's gets disabled,
560 * otherwise we end up in a funny state if we re-enable
561 * before the disable latches, which results that some of
562 * the settings changes for the new modeset (like new
563 * scanout buffer) don't latch properly..
564 */
565 if (dpu_encoder_phys_vid_is_master(phys_enc)) {
566 ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
567 if (ret) {
568 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
569 DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
570 DRMID(phys_enc->parent),
571 phys_enc->hw_intf->idx - INTF_0, ret);
572 }
573 }
574
575 if (phys_enc->hw_intf && phys_enc->hw_intf->ops.get_status)
576 phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &intf_status);
577
578 /*
579 * Wait for a vsync if timing en status is on after timing engine
580 * is disabled.
581 */
582 if (intf_status.is_en && dpu_encoder_phys_vid_is_master(phys_enc)) {
583 spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
584 dpu_encoder_phys_inc_pending(phys_enc);
585 spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
586 ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
587 if (ret) {
588 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
589 DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
590 DRMID(phys_enc->parent),
591 phys_enc->hw_intf->idx - INTF_0, ret);
592 }
593 }
594
595 dpu_encoder_helper_phys_cleanup(phys_enc);
596 phys_enc->enable_state = DPU_ENC_DISABLED;
597 }
598
dpu_encoder_phys_vid_handle_post_kickoff(struct dpu_encoder_phys * phys_enc)599 static void dpu_encoder_phys_vid_handle_post_kickoff(
600 struct dpu_encoder_phys *phys_enc)
601 {
602 unsigned long lock_flags;
603
604 /*
605 * Video mode must flush CTL before enabling timing engine
606 * Video encoders need to turn on their interfaces now
607 */
608 if (phys_enc->enable_state == DPU_ENC_ENABLING) {
609 trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
610 phys_enc->hw_intf->idx - INTF_0);
611 spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
612 phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1);
613 spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
614 phys_enc->enable_state = DPU_ENC_ENABLED;
615 }
616 }
617
dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys * phys_enc,bool enable)618 static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
619 bool enable)
620 {
621 int ret;
622
623 trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
624 phys_enc->hw_intf->idx - INTF_0,
625 enable,
626 phys_enc->vblank_refcount);
627
628 if (enable) {
629 ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
630 if (WARN_ON(ret))
631 return;
632
633 dpu_core_irq_register_callback(phys_enc->dpu_kms,
634 phys_enc->irq[INTR_IDX_UNDERRUN],
635 dpu_encoder_phys_vid_underrun_irq,
636 phys_enc);
637 } else {
638 dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
639 dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
640 phys_enc->irq[INTR_IDX_UNDERRUN]);
641 }
642 }
643
dpu_encoder_phys_vid_get_line_count(struct dpu_encoder_phys * phys_enc)644 static int dpu_encoder_phys_vid_get_line_count(
645 struct dpu_encoder_phys *phys_enc)
646 {
647 if (!dpu_encoder_phys_vid_is_master(phys_enc))
648 return -EINVAL;
649
650 if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
651 return -EINVAL;
652
653 return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
654 }
655
dpu_encoder_phys_vid_get_frame_count(struct dpu_encoder_phys * phys_enc)656 static int dpu_encoder_phys_vid_get_frame_count(
657 struct dpu_encoder_phys *phys_enc)
658 {
659 struct dpu_hw_intf_status s = {0};
660 u32 fetch_start = 0;
661 struct drm_display_mode mode;
662
663 drm_mode_init(&mode, &phys_enc->cached_mode);
664
665 if (!dpu_encoder_phys_vid_is_master(phys_enc))
666 return -EINVAL;
667
668 if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_status)
669 return -EINVAL;
670
671 phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &s);
672
673 if (s.is_prog_fetch_en && s.is_en) {
674 fetch_start = mode.vtotal - (mode.vsync_start - mode.vdisplay);
675 if ((s.line_count > fetch_start) &&
676 (s.line_count <= mode.vtotal))
677 return s.frame_count + 1;
678 }
679
680 return s.frame_count;
681 }
682
dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops * ops)683 static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
684 {
685 ops->is_master = dpu_encoder_phys_vid_is_master;
686 ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set;
687 ops->enable = dpu_encoder_phys_vid_enable;
688 ops->disable = dpu_encoder_phys_vid_disable;
689 ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
690 ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
691 ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_tx_complete;
692 ops->irq_control = dpu_encoder_phys_vid_irq_control;
693 ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
694 ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
695 ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
696 ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
697 ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
698 }
699
dpu_encoder_phys_vid_init(struct drm_device * dev,struct dpu_enc_phys_init_params * p)700 struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
701 struct dpu_enc_phys_init_params *p)
702 {
703 struct dpu_encoder_phys *phys_enc = NULL;
704
705 if (!p) {
706 DPU_ERROR("failed to create encoder due to invalid parameter\n");
707 return ERR_PTR(-EINVAL);
708 }
709
710 phys_enc = drmm_kzalloc(dev, sizeof(*phys_enc), GFP_KERNEL);
711 if (!phys_enc) {
712 DPU_ERROR("failed to create encoder due to memory allocation error\n");
713 return ERR_PTR(-ENOMEM);
714 }
715
716 DPU_DEBUG_VIDENC(phys_enc, "\n");
717
718 dpu_encoder_phys_init(phys_enc, p);
719 mutex_init(&phys_enc->vblank_ctl_lock);
720 phys_enc->vblank_refcount = 0;
721
722 dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
723 phys_enc->intf_mode = INTF_MODE_VIDEO;
724
725 DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->hw_intf->idx);
726
727 return phys_enc;
728 }
729