1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 */
5
6 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
7 #include <linux/delay.h>
8 #include <linux/string_choices.h>
9 #include "dpu_encoder_phys.h"
10 #include "dpu_hw_interrupts.h"
11 #include "dpu_hw_pingpong.h"
12 #include "dpu_core_irq.h"
13 #include "dpu_formats.h"
14 #include "dpu_trace.h"
15 #include "disp/msm_disp_snapshot.h"
16
17 #include <drm/drm_managed.h>
18
19 #define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
20 (e) && (e)->base.parent ? \
21 (e)->base.parent->base.id : -1, \
22 (e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
23
24 #define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
25 (e) && (e)->base.parent ? \
26 (e)->base.parent->base.id : -1, \
27 (e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
28
29 #define to_dpu_encoder_phys_cmd(x) \
30 container_of(x, struct dpu_encoder_phys_cmd, base)
31
32 #define PP_TIMEOUT_MAX_TRIALS 10
33
34 /*
35 * Tearcheck sync start and continue thresholds are empirically found
36 * based on common panels In the future, may want to allow panels to override
37 * these default values
38 */
39 #define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
40 #define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
41
42 static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc);
43
dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys * phys_enc)44 static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
45 {
46 return (phys_enc->split_role != ENC_ROLE_SLAVE);
47 }
48
_dpu_encoder_phys_cmd_update_intf_cfg(struct dpu_encoder_phys * phys_enc)49 static void _dpu_encoder_phys_cmd_update_intf_cfg(
50 struct dpu_encoder_phys *phys_enc)
51 {
52 struct dpu_encoder_phys_cmd *cmd_enc =
53 to_dpu_encoder_phys_cmd(phys_enc);
54 struct dpu_hw_ctl *ctl;
55 struct dpu_hw_intf_cfg intf_cfg = { 0 };
56 struct dpu_hw_intf_cmd_mode_cfg cmd_mode_cfg = {};
57
58 ctl = phys_enc->hw_ctl;
59 if (!ctl->ops.setup_intf_cfg)
60 return;
61
62 intf_cfg.intf = phys_enc->hw_intf->idx;
63 if (phys_enc->split_role == ENC_ROLE_MASTER)
64 intf_cfg.intf_master = phys_enc->hw_intf->idx;
65 intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
66 intf_cfg.stream_sel = cmd_enc->stream_sel;
67 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
68 intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
69 ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
70
71 /* setup which pp blk will connect to this intf */
72 if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5 &&
73 phys_enc->hw_intf->ops.bind_pingpong_blk)
74 phys_enc->hw_intf->ops.bind_pingpong_blk(
75 phys_enc->hw_intf,
76 phys_enc->hw_pp->idx);
77
78 if (intf_cfg.dsc != 0)
79 cmd_mode_cfg.data_compress = true;
80
81 cmd_mode_cfg.wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
82
83 if (phys_enc->hw_intf->ops.program_intf_cmd_cfg)
84 phys_enc->hw_intf->ops.program_intf_cmd_cfg(phys_enc->hw_intf, &cmd_mode_cfg);
85 }
86
dpu_encoder_phys_cmd_pp_tx_done_irq(void * arg)87 static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg)
88 {
89 struct dpu_encoder_phys *phys_enc = arg;
90 unsigned long lock_flags;
91 int new_cnt;
92 u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
93
94 if (!phys_enc->hw_pp)
95 return;
96
97 DPU_ATRACE_BEGIN("pp_done_irq");
98 /* notify all synchronous clients first, then asynchronous clients */
99 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, event);
100
101 spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
102 new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
103 spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
104
105 trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
106 phys_enc->hw_pp->idx - PINGPONG_0,
107 new_cnt, event);
108
109 /* Signal any waiting atomic commit thread */
110 wake_up_all(&phys_enc->pending_kickoff_wq);
111 DPU_ATRACE_END("pp_done_irq");
112 }
113
dpu_encoder_phys_cmd_te_rd_ptr_irq(void * arg)114 static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg)
115 {
116 struct dpu_encoder_phys *phys_enc = arg;
117 struct dpu_encoder_phys_cmd *cmd_enc;
118
119 DPU_ATRACE_BEGIN("rd_ptr_irq");
120 cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
121
122 dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
123
124 atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
125 wake_up_all(&cmd_enc->pending_vblank_wq);
126 DPU_ATRACE_END("rd_ptr_irq");
127 }
128
dpu_encoder_phys_cmd_ctl_start_irq(void * arg)129 static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg)
130 {
131 struct dpu_encoder_phys *phys_enc = arg;
132
133 DPU_ATRACE_BEGIN("ctl_start_irq");
134
135 atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
136
137 /* Signal any waiting ctl start interrupt */
138 wake_up_all(&phys_enc->pending_kickoff_wq);
139 DPU_ATRACE_END("ctl_start_irq");
140 }
141
dpu_encoder_phys_cmd_underrun_irq(void * arg)142 static void dpu_encoder_phys_cmd_underrun_irq(void *arg)
143 {
144 struct dpu_encoder_phys *phys_enc = arg;
145
146 dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
147 }
148
dpu_encoder_phys_cmd_atomic_mode_set(struct dpu_encoder_phys * phys_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)149 static void dpu_encoder_phys_cmd_atomic_mode_set(
150 struct dpu_encoder_phys *phys_enc,
151 struct drm_crtc_state *crtc_state,
152 struct drm_connector_state *conn_state)
153 {
154 phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
155
156 phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
157
158 if (phys_enc->has_intf_te)
159 phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
160 else
161 phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
162
163 phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
164 }
165
_dpu_encoder_phys_cmd_handle_ppdone_timeout(struct dpu_encoder_phys * phys_enc)166 static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
167 struct dpu_encoder_phys *phys_enc)
168 {
169 struct dpu_encoder_phys_cmd *cmd_enc =
170 to_dpu_encoder_phys_cmd(phys_enc);
171 u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
172 bool do_log = false;
173 struct drm_encoder *drm_enc;
174
175 if (!phys_enc->hw_pp)
176 return -EINVAL;
177
178 drm_enc = phys_enc->parent;
179
180 cmd_enc->pp_timeout_report_cnt++;
181 if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
182 frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
183 do_log = true;
184 } else if (cmd_enc->pp_timeout_report_cnt == 1) {
185 do_log = true;
186 }
187
188 trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(drm_enc),
189 phys_enc->hw_pp->idx - PINGPONG_0,
190 cmd_enc->pp_timeout_report_cnt,
191 atomic_read(&phys_enc->pending_kickoff_cnt),
192 frame_event);
193
194 /* to avoid flooding, only log first time, and "dead" time */
195 if (do_log) {
196 DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
197 DRMID(drm_enc),
198 phys_enc->hw_pp->idx - PINGPONG_0,
199 phys_enc->hw_ctl->idx - CTL_0,
200 cmd_enc->pp_timeout_report_cnt,
201 atomic_read(&phys_enc->pending_kickoff_cnt));
202 msm_disp_snapshot_state(drm_enc->dev);
203 dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
204 phys_enc->irq[INTR_IDX_RDPTR]);
205 }
206
207 atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
208
209 /* request a ctl reset before the next kickoff */
210 phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
211
212 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, frame_event);
213
214 return -ETIMEDOUT;
215 }
216
_dpu_encoder_phys_cmd_wait_for_idle(struct dpu_encoder_phys * phys_enc)217 static int _dpu_encoder_phys_cmd_wait_for_idle(
218 struct dpu_encoder_phys *phys_enc)
219 {
220 struct dpu_encoder_phys_cmd *cmd_enc =
221 to_dpu_encoder_phys_cmd(phys_enc);
222 struct dpu_encoder_wait_info wait_info;
223 int ret;
224
225 wait_info.wq = &phys_enc->pending_kickoff_wq;
226 wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
227 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
228
229 ret = dpu_encoder_helper_wait_for_irq(phys_enc,
230 phys_enc->irq[INTR_IDX_PINGPONG],
231 dpu_encoder_phys_cmd_pp_tx_done_irq,
232 &wait_info);
233 if (ret == -ETIMEDOUT)
234 _dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
235 else if (!ret)
236 cmd_enc->pp_timeout_report_cnt = 0;
237
238 return ret;
239 }
240
dpu_encoder_phys_cmd_control_vblank_irq(struct dpu_encoder_phys * phys_enc,bool enable)241 static int dpu_encoder_phys_cmd_control_vblank_irq(
242 struct dpu_encoder_phys *phys_enc,
243 bool enable)
244 {
245 int ret = 0;
246 int refcount;
247
248 if (!phys_enc->hw_pp) {
249 DPU_ERROR("invalid encoder\n");
250 return -EINVAL;
251 }
252
253 mutex_lock(&phys_enc->vblank_ctl_lock);
254 refcount = phys_enc->vblank_refcount;
255
256 /* Slave encoders don't report vblank */
257 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
258 goto end;
259
260 /* protect against negative */
261 if (!enable && refcount == 0) {
262 ret = -EINVAL;
263 goto end;
264 }
265
266 DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
267 phys_enc->hw_pp->idx - PINGPONG_0,
268 str_true_false(enable), refcount);
269
270 if (enable) {
271 if (phys_enc->vblank_refcount == 0)
272 ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
273 phys_enc->irq[INTR_IDX_RDPTR],
274 dpu_encoder_phys_cmd_te_rd_ptr_irq,
275 phys_enc);
276 if (!ret)
277 phys_enc->vblank_refcount++;
278 } else if (!enable) {
279 if (phys_enc->vblank_refcount == 1)
280 ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
281 phys_enc->irq[INTR_IDX_RDPTR]);
282 if (!ret)
283 phys_enc->vblank_refcount--;
284 }
285
286 end:
287 mutex_unlock(&phys_enc->vblank_ctl_lock);
288 if (ret) {
289 DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
290 DRMID(phys_enc->parent),
291 phys_enc->hw_pp->idx - PINGPONG_0, ret,
292 str_true_false(enable), refcount);
293 }
294
295 return ret;
296 }
297
dpu_encoder_phys_cmd_irq_enable(struct dpu_encoder_phys * phys_enc)298 static void dpu_encoder_phys_cmd_irq_enable(struct dpu_encoder_phys *phys_enc)
299 {
300 trace_dpu_enc_phys_cmd_irq_enable(DRMID(phys_enc->parent),
301 phys_enc->hw_pp->idx - PINGPONG_0,
302 phys_enc->vblank_refcount);
303
304 dpu_core_irq_register_callback(phys_enc->dpu_kms,
305 phys_enc->irq[INTR_IDX_PINGPONG],
306 dpu_encoder_phys_cmd_pp_tx_done_irq,
307 phys_enc);
308 dpu_core_irq_register_callback(phys_enc->dpu_kms,
309 phys_enc->irq[INTR_IDX_UNDERRUN],
310 dpu_encoder_phys_cmd_underrun_irq,
311 phys_enc);
312 dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
313
314 if (dpu_encoder_phys_cmd_is_master(phys_enc) && phys_enc->irq[INTR_IDX_CTL_START])
315 dpu_core_irq_register_callback(phys_enc->dpu_kms,
316 phys_enc->irq[INTR_IDX_CTL_START],
317 dpu_encoder_phys_cmd_ctl_start_irq,
318 phys_enc);
319 }
320
dpu_encoder_phys_cmd_irq_disable(struct dpu_encoder_phys * phys_enc)321 static void dpu_encoder_phys_cmd_irq_disable(struct dpu_encoder_phys *phys_enc)
322 {
323 trace_dpu_enc_phys_cmd_irq_disable(DRMID(phys_enc->parent),
324 phys_enc->hw_pp->idx - PINGPONG_0,
325 phys_enc->vblank_refcount);
326
327 if (dpu_encoder_phys_cmd_is_master(phys_enc) && phys_enc->irq[INTR_IDX_CTL_START])
328 dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
329 phys_enc->irq[INTR_IDX_CTL_START]);
330
331 dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_UNDERRUN]);
332 dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
333 dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_PINGPONG]);
334 }
335
dpu_encoder_phys_cmd_tearcheck_config(struct dpu_encoder_phys * phys_enc)336 static void dpu_encoder_phys_cmd_tearcheck_config(
337 struct dpu_encoder_phys *phys_enc)
338 {
339 struct dpu_encoder_phys_cmd *cmd_enc =
340 to_dpu_encoder_phys_cmd(phys_enc);
341 struct dpu_hw_tear_check tc_cfg = { 0 };
342 struct drm_display_mode *mode;
343 bool tc_enable = true;
344 unsigned long vsync_hz;
345 struct dpu_kms *dpu_kms;
346
347 /*
348 * TODO: if/when resource allocation is refactored, move this to a
349 * place where the driver can actually return an error.
350 */
351 if (!phys_enc->has_intf_te &&
352 (!phys_enc->hw_pp ||
353 !phys_enc->hw_pp->ops.enable_tearcheck)) {
354 DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
355 return;
356 }
357
358 DPU_DEBUG_CMDENC(cmd_enc, "intf %d pp %d\n",
359 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
360 phys_enc->hw_pp ? phys_enc->hw_pp->idx - PINGPONG_0 : -1);
361
362 mode = &phys_enc->cached_mode;
363
364 dpu_kms = phys_enc->dpu_kms;
365
366 /*
367 * TE default: dsi byte clock calculated base on 70 fps;
368 * around 14 ms to complete a kickoff cycle if te disabled;
369 * vclk_line base on 60 fps; write is faster than read;
370 * init == start == rdptr;
371 *
372 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
373 * frequency divided by the no. of rows (lines) in the LCDpanel.
374 */
375 vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
376 if (!vsync_hz) {
377 DPU_DEBUG_CMDENC(cmd_enc, "invalid - no vsync clock\n");
378 return;
379 }
380
381 tc_cfg.vsync_count = vsync_hz /
382 (mode->vtotal * drm_mode_vrefresh(mode));
383
384 /*
385 * Set the sync_cfg_height to twice vtotal so that if we lose a
386 * TE event coming from the display TE pin we won't stall immediately
387 */
388 tc_cfg.hw_vsync_mode = 1;
389 tc_cfg.sync_cfg_height = mode->vtotal * 2;
390 tc_cfg.vsync_init_val = mode->vdisplay;
391 tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
392 tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
393 tc_cfg.start_pos = mode->vdisplay;
394 tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
395
396 DPU_DEBUG_CMDENC(cmd_enc,
397 "tc vsync_clk_speed_hz %lu vtotal %u vrefresh %u\n",
398 vsync_hz, mode->vtotal, drm_mode_vrefresh(mode));
399 DPU_DEBUG_CMDENC(cmd_enc,
400 "tc enable %u start_pos %u rd_ptr_irq %u\n",
401 tc_enable, tc_cfg.start_pos, tc_cfg.rd_ptr_irq);
402 DPU_DEBUG_CMDENC(cmd_enc,
403 "tc hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
404 tc_cfg.hw_vsync_mode, tc_cfg.vsync_count,
405 tc_cfg.vsync_init_val);
406 DPU_DEBUG_CMDENC(cmd_enc,
407 "tc cfgheight %u thresh_start %u thresh_cont %u\n",
408 tc_cfg.sync_cfg_height, tc_cfg.sync_threshold_start,
409 tc_cfg.sync_threshold_continue);
410
411 if (phys_enc->has_intf_te)
412 phys_enc->hw_intf->ops.enable_tearcheck(phys_enc->hw_intf, &tc_cfg);
413 else
414 phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, &tc_cfg);
415 }
416
_dpu_encoder_phys_cmd_pingpong_config(struct dpu_encoder_phys * phys_enc)417 static void _dpu_encoder_phys_cmd_pingpong_config(
418 struct dpu_encoder_phys *phys_enc)
419 {
420 struct dpu_encoder_phys_cmd *cmd_enc =
421 to_dpu_encoder_phys_cmd(phys_enc);
422
423 if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
424 DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
425 return;
426 }
427
428 DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
429 phys_enc->hw_pp->idx - PINGPONG_0);
430 drm_mode_debug_printmodeline(&phys_enc->cached_mode);
431
432 _dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
433 dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
434 }
435
dpu_encoder_phys_cmd_needs_single_flush(struct dpu_encoder_phys * phys_enc)436 static bool dpu_encoder_phys_cmd_needs_single_flush(
437 struct dpu_encoder_phys *phys_enc)
438 {
439 /**
440 * we do separate flush for each CTL and let
441 * CTL_START synchronize them
442 */
443 return false;
444 }
445
dpu_encoder_phys_cmd_enable_helper(struct dpu_encoder_phys * phys_enc)446 static void dpu_encoder_phys_cmd_enable_helper(
447 struct dpu_encoder_phys *phys_enc)
448 {
449 struct dpu_hw_ctl *ctl;
450
451 if (!phys_enc->hw_pp) {
452 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
453 return;
454 }
455
456 dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
457
458 _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
459
460 ctl = phys_enc->hw_ctl;
461 ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
462 }
463
dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys * phys_enc)464 static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
465 {
466 struct dpu_encoder_phys_cmd *cmd_enc =
467 to_dpu_encoder_phys_cmd(phys_enc);
468
469 if (!phys_enc->hw_pp) {
470 DPU_ERROR("invalid phys encoder\n");
471 return;
472 }
473
474 DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
475
476 if (phys_enc->enable_state == DPU_ENC_ENABLED) {
477 DPU_ERROR("already enabled\n");
478 return;
479 }
480
481 dpu_encoder_phys_cmd_enable_helper(phys_enc);
482 phys_enc->enable_state = DPU_ENC_ENABLED;
483 }
484
_dpu_encoder_phys_cmd_connect_te(struct dpu_encoder_phys * phys_enc,bool enable)485 static void _dpu_encoder_phys_cmd_connect_te(
486 struct dpu_encoder_phys *phys_enc, bool enable)
487 {
488 if (phys_enc->has_intf_te) {
489 if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.connect_external_te)
490 return;
491
492 trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
493 phys_enc->hw_intf->ops.connect_external_te(phys_enc->hw_intf, enable);
494 } else {
495 if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
496 return;
497
498 trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
499 phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
500 }
501 }
502
dpu_encoder_phys_cmd_prepare_idle_pc(struct dpu_encoder_phys * phys_enc)503 static void dpu_encoder_phys_cmd_prepare_idle_pc(
504 struct dpu_encoder_phys *phys_enc)
505 {
506 _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
507 }
508
dpu_encoder_phys_cmd_get_line_count(struct dpu_encoder_phys * phys_enc)509 static int dpu_encoder_phys_cmd_get_line_count(
510 struct dpu_encoder_phys *phys_enc)
511 {
512 struct dpu_hw_pingpong *hw_pp;
513 struct dpu_hw_intf *hw_intf;
514
515 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
516 return -EINVAL;
517
518 if (phys_enc->has_intf_te) {
519 hw_intf = phys_enc->hw_intf;
520 if (!hw_intf || !hw_intf->ops.get_line_count)
521 return -EINVAL;
522 return hw_intf->ops.get_line_count(hw_intf);
523 }
524
525 hw_pp = phys_enc->hw_pp;
526 if (!hw_pp || !hw_pp->ops.get_line_count)
527 return -EINVAL;
528 return hw_pp->ops.get_line_count(hw_pp);
529 }
530
dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys * phys_enc)531 static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
532 {
533 struct dpu_encoder_phys_cmd *cmd_enc =
534 to_dpu_encoder_phys_cmd(phys_enc);
535 struct dpu_hw_ctl *ctl;
536
537 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
538 DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
539 return;
540 }
541
542 if (phys_enc->has_intf_te) {
543 DRM_DEBUG_KMS("id:%u intf:%d state:%d\n", DRMID(phys_enc->parent),
544 phys_enc->hw_intf->idx - INTF_0,
545 phys_enc->enable_state);
546
547 if (phys_enc->hw_intf->ops.disable_tearcheck)
548 phys_enc->hw_intf->ops.disable_tearcheck(phys_enc->hw_intf);
549 } else {
550 if (!phys_enc->hw_pp) {
551 DPU_ERROR("invalid encoder\n");
552 return;
553 }
554
555 DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
556 phys_enc->hw_pp->idx - PINGPONG_0,
557 phys_enc->enable_state);
558
559 if (phys_enc->hw_pp->ops.disable_tearcheck)
560 phys_enc->hw_pp->ops.disable_tearcheck(phys_enc->hw_pp);
561 }
562
563 if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
564 phys_enc->hw_intf->ops.bind_pingpong_blk(
565 phys_enc->hw_intf,
566 PINGPONG_NONE);
567
568 ctl = phys_enc->hw_ctl;
569 ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
570 }
571
572 phys_enc->enable_state = DPU_ENC_DISABLED;
573 }
574
dpu_encoder_phys_cmd_prepare_for_kickoff(struct dpu_encoder_phys * phys_enc)575 static void dpu_encoder_phys_cmd_prepare_for_kickoff(
576 struct dpu_encoder_phys *phys_enc)
577 {
578 struct dpu_encoder_phys_cmd *cmd_enc =
579 to_dpu_encoder_phys_cmd(phys_enc);
580 int ret;
581
582 if (!phys_enc->hw_pp) {
583 DPU_ERROR("invalid encoder\n");
584 return;
585 }
586 DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
587 phys_enc->hw_pp->idx - PINGPONG_0,
588 atomic_read(&phys_enc->pending_kickoff_cnt));
589
590 /*
591 * Mark kickoff request as outstanding. If there are more than one,
592 * outstanding, then we have to wait for the previous one to complete
593 */
594 ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
595 if (ret) {
596 /* force pending_kickoff_cnt 0 to discard failed kickoff */
597 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
598 DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
599 DRMID(phys_enc->parent), ret,
600 phys_enc->hw_pp->idx - PINGPONG_0);
601 }
602
603 dpu_encoder_phys_cmd_enable_te(phys_enc);
604
605 DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
606 phys_enc->hw_pp->idx - PINGPONG_0,
607 atomic_read(&phys_enc->pending_kickoff_cnt));
608 }
609
dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys * phys_enc)610 static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc)
611 {
612 if (!phys_enc)
613 return;
614 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
615 return;
616
617 if (phys_enc->has_intf_te) {
618 if (!phys_enc->hw_intf->ops.disable_autorefresh)
619 return;
620
621 phys_enc->hw_intf->ops.disable_autorefresh(
622 phys_enc->hw_intf,
623 DRMID(phys_enc->parent),
624 phys_enc->cached_mode.vdisplay);
625 } else {
626 if (!phys_enc->hw_pp ||
627 !phys_enc->hw_pp->ops.disable_autorefresh)
628 return;
629
630 phys_enc->hw_pp->ops.disable_autorefresh(
631 phys_enc->hw_pp,
632 DRMID(phys_enc->parent),
633 phys_enc->cached_mode.vdisplay);
634 }
635 }
636
_dpu_encoder_phys_cmd_wait_for_ctl_start(struct dpu_encoder_phys * phys_enc)637 static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
638 struct dpu_encoder_phys *phys_enc)
639 {
640 struct dpu_encoder_phys_cmd *cmd_enc =
641 to_dpu_encoder_phys_cmd(phys_enc);
642 struct dpu_encoder_wait_info wait_info;
643 int ret;
644
645 wait_info.wq = &phys_enc->pending_kickoff_wq;
646 wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
647 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
648
649 ret = dpu_encoder_helper_wait_for_irq(phys_enc,
650 phys_enc->irq[INTR_IDX_CTL_START],
651 dpu_encoder_phys_cmd_ctl_start_irq,
652 &wait_info);
653 if (ret == -ETIMEDOUT) {
654 DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
655 ret = -EINVAL;
656 } else if (!ret)
657 ret = 0;
658
659 return ret;
660 }
661
dpu_encoder_phys_cmd_wait_for_tx_complete(struct dpu_encoder_phys * phys_enc)662 static int dpu_encoder_phys_cmd_wait_for_tx_complete(
663 struct dpu_encoder_phys *phys_enc)
664 {
665 int rc;
666
667 rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
668 if (rc) {
669 DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
670 DRMID(phys_enc->parent), rc,
671 phys_enc->hw_intf->idx - INTF_0);
672 }
673
674 return rc;
675 }
676
dpu_encoder_phys_cmd_wait_for_commit_done(struct dpu_encoder_phys * phys_enc)677 static int dpu_encoder_phys_cmd_wait_for_commit_done(
678 struct dpu_encoder_phys *phys_enc)
679 {
680 /* only required for master controller */
681 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
682 return 0;
683
684 if (phys_enc->hw_ctl->ops.is_started(phys_enc->hw_ctl))
685 return dpu_encoder_phys_cmd_wait_for_tx_complete(phys_enc);
686
687 return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
688 }
689
dpu_encoder_phys_cmd_handle_post_kickoff(struct dpu_encoder_phys * phys_enc)690 static void dpu_encoder_phys_cmd_handle_post_kickoff(
691 struct dpu_encoder_phys *phys_enc)
692 {
693 /**
694 * re-enable external TE, either for the first time after enabling
695 * or if disabled for Autorefresh
696 */
697 _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
698 }
699
dpu_encoder_phys_cmd_trigger_start(struct dpu_encoder_phys * phys_enc)700 static void dpu_encoder_phys_cmd_trigger_start(
701 struct dpu_encoder_phys *phys_enc)
702 {
703 dpu_encoder_helper_trigger_start(phys_enc);
704 }
705
dpu_encoder_phys_cmd_init_ops(struct dpu_encoder_phys_ops * ops)706 static void dpu_encoder_phys_cmd_init_ops(
707 struct dpu_encoder_phys_ops *ops)
708 {
709 ops->is_master = dpu_encoder_phys_cmd_is_master;
710 ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
711 ops->enable = dpu_encoder_phys_cmd_enable;
712 ops->disable = dpu_encoder_phys_cmd_disable;
713 ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
714 ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
715 ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
716 ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
717 ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
718 ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
719 ops->irq_enable = dpu_encoder_phys_cmd_irq_enable;
720 ops->irq_disable = dpu_encoder_phys_cmd_irq_disable;
721 ops->restore = dpu_encoder_phys_cmd_enable_helper;
722 ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
723 ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
724 ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
725 }
726
727 /**
728 * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
729 * @dev: Corresponding device for devres management
730 * @p: Pointer to init params structure
731 * Return: Error code or newly allocated encoder
732 */
dpu_encoder_phys_cmd_init(struct drm_device * dev,struct dpu_enc_phys_init_params * p)733 struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
734 struct dpu_enc_phys_init_params *p)
735 {
736 struct dpu_encoder_phys *phys_enc = NULL;
737 struct dpu_encoder_phys_cmd *cmd_enc = NULL;
738
739 DPU_DEBUG("intf\n");
740
741 cmd_enc = drmm_kzalloc(dev, sizeof(*cmd_enc), GFP_KERNEL);
742 if (!cmd_enc) {
743 DPU_ERROR("failed to allocate\n");
744 return ERR_PTR(-ENOMEM);
745 }
746 phys_enc = &cmd_enc->base;
747
748 dpu_encoder_phys_init(phys_enc, p);
749
750 mutex_init(&phys_enc->vblank_ctl_lock);
751 phys_enc->vblank_refcount = 0;
752
753 dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
754 phys_enc->intf_mode = INTF_MODE_CMD;
755 cmd_enc->stream_sel = 0;
756
757 if (!phys_enc->hw_intf) {
758 DPU_ERROR_CMDENC(cmd_enc, "no INTF provided\n");
759 return ERR_PTR(-EINVAL);
760 }
761
762 /* DPU before 5.0 use PINGPONG for TE handling */
763 if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5)
764 phys_enc->has_intf_te = true;
765
766 if (phys_enc->has_intf_te && !phys_enc->hw_intf->ops.enable_tearcheck) {
767 DPU_ERROR_CMDENC(cmd_enc, "tearcheck not supported\n");
768 return ERR_PTR(-EINVAL);
769 }
770
771 atomic_set(&cmd_enc->pending_vblank_cnt, 0);
772 init_waitqueue_head(&cmd_enc->pending_vblank_wq);
773
774 DPU_DEBUG_CMDENC(cmd_enc, "created\n");
775
776 return phys_enc;
777 }
778