xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c (revision e78f70bad29c5ae1e1076698b690b15794e9b81e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
4  */
5 
6 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
7 #include <linux/delay.h>
8 #include <linux/string_choices.h>
9 #include "dpu_encoder_phys.h"
10 #include "dpu_hw_interrupts.h"
11 #include "dpu_hw_pingpong.h"
12 #include "dpu_core_irq.h"
13 #include "dpu_formats.h"
14 #include "dpu_trace.h"
15 #include "disp/msm_disp_snapshot.h"
16 
17 #include <drm/drm_managed.h>
18 
19 #define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
20 		(e) && (e)->base.parent ? \
21 		(e)->base.parent->base.id : -1, \
22 		(e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
23 
24 #define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
25 		(e) && (e)->base.parent ? \
26 		(e)->base.parent->base.id : -1, \
27 		(e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
28 
29 #define to_dpu_encoder_phys_cmd(x) \
30 	container_of(x, struct dpu_encoder_phys_cmd, base)
31 
32 #define PP_TIMEOUT_MAX_TRIALS	10
33 
34 /*
35  * Tearcheck sync start and continue thresholds are empirically found
36  * based on common panels In the future, may want to allow panels to override
37  * these default values
38  */
39 #define DEFAULT_TEARCHECK_SYNC_THRESH_START	4
40 #define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE	4
41 
42 static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc);
43 
44 static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
45 {
46 	return (phys_enc->split_role != ENC_ROLE_SLAVE);
47 }
48 
49 static void _dpu_encoder_phys_cmd_update_intf_cfg(
50 		struct dpu_encoder_phys *phys_enc)
51 {
52 	struct dpu_encoder_phys_cmd *cmd_enc =
53 			to_dpu_encoder_phys_cmd(phys_enc);
54 	struct dpu_hw_ctl *ctl;
55 	struct dpu_hw_intf_cfg intf_cfg = { 0 };
56 	struct dpu_hw_intf_cmd_mode_cfg cmd_mode_cfg = {};
57 
58 	ctl = phys_enc->hw_ctl;
59 	if (!ctl->ops.setup_intf_cfg)
60 		return;
61 
62 	intf_cfg.intf = phys_enc->hw_intf->idx;
63 	if (phys_enc->split_role == ENC_ROLE_MASTER)
64 		intf_cfg.intf_master = phys_enc->hw_intf->idx;
65 	intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
66 	intf_cfg.stream_sel = cmd_enc->stream_sel;
67 	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
68 	intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
69 	ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
70 
71 	/* setup which pp blk will connect to this intf */
72 	if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
73 		phys_enc->hw_intf->ops.bind_pingpong_blk(
74 				phys_enc->hw_intf,
75 				phys_enc->hw_pp->idx);
76 
77 	if (intf_cfg.dsc != 0)
78 		cmd_mode_cfg.data_compress = true;
79 
80 	cmd_mode_cfg.wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
81 
82 	if (phys_enc->hw_intf->ops.program_intf_cmd_cfg)
83 		phys_enc->hw_intf->ops.program_intf_cmd_cfg(phys_enc->hw_intf, &cmd_mode_cfg);
84 }
85 
86 static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg)
87 {
88 	struct dpu_encoder_phys *phys_enc = arg;
89 	unsigned long lock_flags;
90 	int new_cnt;
91 	u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
92 
93 	if (!phys_enc->hw_pp)
94 		return;
95 
96 	DPU_ATRACE_BEGIN("pp_done_irq");
97 	/* notify all synchronous clients first, then asynchronous clients */
98 	dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, event);
99 
100 	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
101 	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
102 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
103 
104 	trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
105 					  phys_enc->hw_pp->idx - PINGPONG_0,
106 					  new_cnt, event);
107 
108 	/* Signal any waiting atomic commit thread */
109 	wake_up_all(&phys_enc->pending_kickoff_wq);
110 	DPU_ATRACE_END("pp_done_irq");
111 }
112 
113 static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg)
114 {
115 	struct dpu_encoder_phys *phys_enc = arg;
116 	struct dpu_encoder_phys_cmd *cmd_enc;
117 
118 	DPU_ATRACE_BEGIN("rd_ptr_irq");
119 	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
120 
121 	dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
122 
123 	atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
124 	wake_up_all(&cmd_enc->pending_vblank_wq);
125 	DPU_ATRACE_END("rd_ptr_irq");
126 }
127 
128 static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg)
129 {
130 	struct dpu_encoder_phys *phys_enc = arg;
131 
132 	DPU_ATRACE_BEGIN("ctl_start_irq");
133 
134 	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
135 
136 	/* Signal any waiting ctl start interrupt */
137 	wake_up_all(&phys_enc->pending_kickoff_wq);
138 	DPU_ATRACE_END("ctl_start_irq");
139 }
140 
141 static void dpu_encoder_phys_cmd_underrun_irq(void *arg)
142 {
143 	struct dpu_encoder_phys *phys_enc = arg;
144 
145 	dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
146 }
147 
148 static void dpu_encoder_phys_cmd_atomic_mode_set(
149 		struct dpu_encoder_phys *phys_enc,
150 		struct drm_crtc_state *crtc_state,
151 		struct drm_connector_state *conn_state)
152 {
153 	phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
154 
155 	phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
156 
157 	if (phys_enc->has_intf_te)
158 		phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
159 	else
160 		phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
161 
162 	phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
163 }
164 
165 static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
166 		struct dpu_encoder_phys *phys_enc)
167 {
168 	struct dpu_encoder_phys_cmd *cmd_enc =
169 			to_dpu_encoder_phys_cmd(phys_enc);
170 	u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
171 	bool do_log = false;
172 	struct drm_encoder *drm_enc;
173 
174 	if (!phys_enc->hw_pp)
175 		return -EINVAL;
176 
177 	drm_enc = phys_enc->parent;
178 
179 	cmd_enc->pp_timeout_report_cnt++;
180 	if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
181 		frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
182 		do_log = true;
183 	} else if (cmd_enc->pp_timeout_report_cnt == 1) {
184 		do_log = true;
185 	}
186 
187 	trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(drm_enc),
188 		     phys_enc->hw_pp->idx - PINGPONG_0,
189 		     cmd_enc->pp_timeout_report_cnt,
190 		     atomic_read(&phys_enc->pending_kickoff_cnt),
191 		     frame_event);
192 
193 	/* to avoid flooding, only log first time, and "dead" time */
194 	if (do_log) {
195 		DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
196 			  DRMID(drm_enc),
197 			  phys_enc->hw_pp->idx - PINGPONG_0,
198 			  phys_enc->hw_ctl->idx - CTL_0,
199 			  cmd_enc->pp_timeout_report_cnt,
200 			  atomic_read(&phys_enc->pending_kickoff_cnt));
201 		msm_disp_snapshot_state(drm_enc->dev);
202 		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
203 				phys_enc->irq[INTR_IDX_RDPTR]);
204 	}
205 
206 	atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
207 
208 	/* request a ctl reset before the next kickoff */
209 	phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
210 
211 	dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, frame_event);
212 
213 	return -ETIMEDOUT;
214 }
215 
216 static int _dpu_encoder_phys_cmd_wait_for_idle(
217 		struct dpu_encoder_phys *phys_enc)
218 {
219 	struct dpu_encoder_phys_cmd *cmd_enc =
220 			to_dpu_encoder_phys_cmd(phys_enc);
221 	struct dpu_encoder_wait_info wait_info;
222 	int ret;
223 
224 	wait_info.wq = &phys_enc->pending_kickoff_wq;
225 	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
226 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
227 
228 	ret = dpu_encoder_helper_wait_for_irq(phys_enc,
229 			phys_enc->irq[INTR_IDX_PINGPONG],
230 			dpu_encoder_phys_cmd_pp_tx_done_irq,
231 			&wait_info);
232 	if (ret == -ETIMEDOUT)
233 		_dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
234 	else if (!ret)
235 		cmd_enc->pp_timeout_report_cnt = 0;
236 
237 	return ret;
238 }
239 
240 static int dpu_encoder_phys_cmd_control_vblank_irq(
241 		struct dpu_encoder_phys *phys_enc,
242 		bool enable)
243 {
244 	int ret = 0;
245 	int refcount;
246 
247 	if (!phys_enc->hw_pp) {
248 		DPU_ERROR("invalid encoder\n");
249 		return -EINVAL;
250 	}
251 
252 	mutex_lock(&phys_enc->vblank_ctl_lock);
253 	refcount = phys_enc->vblank_refcount;
254 
255 	/* Slave encoders don't report vblank */
256 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
257 		goto end;
258 
259 	/* protect against negative */
260 	if (!enable && refcount == 0) {
261 		ret = -EINVAL;
262 		goto end;
263 	}
264 
265 	DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
266 		      phys_enc->hw_pp->idx - PINGPONG_0,
267 		      str_true_false(enable), refcount);
268 
269 	if (enable) {
270 		if (phys_enc->vblank_refcount == 0)
271 			ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
272 					phys_enc->irq[INTR_IDX_RDPTR],
273 					dpu_encoder_phys_cmd_te_rd_ptr_irq,
274 					phys_enc);
275 		if (!ret)
276 			phys_enc->vblank_refcount++;
277 	} else if (!enable) {
278 		if (phys_enc->vblank_refcount == 1)
279 			ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
280 					phys_enc->irq[INTR_IDX_RDPTR]);
281 		if (!ret)
282 			phys_enc->vblank_refcount--;
283 	}
284 
285 end:
286 	mutex_unlock(&phys_enc->vblank_ctl_lock);
287 	if (ret) {
288 		DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
289 			  DRMID(phys_enc->parent),
290 			  phys_enc->hw_pp->idx - PINGPONG_0, ret,
291 			  str_true_false(enable), refcount);
292 	}
293 
294 	return ret;
295 }
296 
297 static void dpu_encoder_phys_cmd_irq_enable(struct dpu_encoder_phys *phys_enc)
298 {
299 	trace_dpu_enc_phys_cmd_irq_enable(DRMID(phys_enc->parent),
300 					  phys_enc->hw_pp->idx - PINGPONG_0,
301 					  phys_enc->vblank_refcount);
302 
303 	dpu_core_irq_register_callback(phys_enc->dpu_kms,
304 				       phys_enc->irq[INTR_IDX_PINGPONG],
305 				       dpu_encoder_phys_cmd_pp_tx_done_irq,
306 				       phys_enc);
307 	dpu_core_irq_register_callback(phys_enc->dpu_kms,
308 				       phys_enc->irq[INTR_IDX_UNDERRUN],
309 				       dpu_encoder_phys_cmd_underrun_irq,
310 				       phys_enc);
311 	dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
312 
313 	if (dpu_encoder_phys_cmd_is_master(phys_enc) && phys_enc->irq[INTR_IDX_CTL_START])
314 		dpu_core_irq_register_callback(phys_enc->dpu_kms,
315 					       phys_enc->irq[INTR_IDX_CTL_START],
316 					       dpu_encoder_phys_cmd_ctl_start_irq,
317 					       phys_enc);
318 }
319 
320 static void dpu_encoder_phys_cmd_irq_disable(struct dpu_encoder_phys *phys_enc)
321 {
322 	trace_dpu_enc_phys_cmd_irq_disable(DRMID(phys_enc->parent),
323 					   phys_enc->hw_pp->idx - PINGPONG_0,
324 					   phys_enc->vblank_refcount);
325 
326 	if (dpu_encoder_phys_cmd_is_master(phys_enc) && phys_enc->irq[INTR_IDX_CTL_START])
327 		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
328 						 phys_enc->irq[INTR_IDX_CTL_START]);
329 
330 	dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_UNDERRUN]);
331 	dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
332 	dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_PINGPONG]);
333 }
334 
335 static void dpu_encoder_phys_cmd_tearcheck_config(
336 		struct dpu_encoder_phys *phys_enc)
337 {
338 	struct dpu_encoder_phys_cmd *cmd_enc =
339 		to_dpu_encoder_phys_cmd(phys_enc);
340 	struct dpu_hw_tear_check tc_cfg = { 0 };
341 	struct drm_display_mode *mode;
342 	bool tc_enable = true;
343 	unsigned long vsync_hz;
344 	struct dpu_kms *dpu_kms;
345 
346 	/*
347 	 * TODO: if/when resource allocation is refactored, move this to a
348 	 * place where the driver can actually return an error.
349 	 */
350 	if (!phys_enc->has_intf_te &&
351 	    (!phys_enc->hw_pp ||
352 	     !phys_enc->hw_pp->ops.enable_tearcheck)) {
353 		DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
354 		return;
355 	}
356 
357 	DPU_DEBUG_CMDENC(cmd_enc, "intf %d pp %d\n",
358 			 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
359 			 phys_enc->hw_pp ? phys_enc->hw_pp->idx - PINGPONG_0 : -1);
360 
361 	mode = &phys_enc->cached_mode;
362 
363 	dpu_kms = phys_enc->dpu_kms;
364 
365 	/*
366 	 * TE default: dsi byte clock calculated base on 70 fps;
367 	 * around 14 ms to complete a kickoff cycle if te disabled;
368 	 * vclk_line base on 60 fps; write is faster than read;
369 	 * init == start == rdptr;
370 	 *
371 	 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
372 	 * frequency divided by the no. of rows (lines) in the LCDpanel.
373 	 */
374 	vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
375 	if (!vsync_hz) {
376 		DPU_DEBUG_CMDENC(cmd_enc, "invalid - no vsync clock\n");
377 		return;
378 	}
379 
380 	tc_cfg.vsync_count = vsync_hz /
381 				(mode->vtotal * drm_mode_vrefresh(mode));
382 
383 	/*
384 	 * Set the sync_cfg_height to twice vtotal so that if we lose a
385 	 * TE event coming from the display TE pin we won't stall immediately
386 	 */
387 	tc_cfg.hw_vsync_mode = 1;
388 	tc_cfg.sync_cfg_height = mode->vtotal * 2;
389 	tc_cfg.vsync_init_val = mode->vdisplay;
390 	tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
391 	tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
392 	tc_cfg.start_pos = mode->vdisplay;
393 	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
394 
395 	DPU_DEBUG_CMDENC(cmd_enc,
396 		"tc vsync_clk_speed_hz %lu vtotal %u vrefresh %u\n",
397 		vsync_hz, mode->vtotal, drm_mode_vrefresh(mode));
398 	DPU_DEBUG_CMDENC(cmd_enc,
399 		"tc enable %u start_pos %u rd_ptr_irq %u\n",
400 		tc_enable, tc_cfg.start_pos, tc_cfg.rd_ptr_irq);
401 	DPU_DEBUG_CMDENC(cmd_enc,
402 		"tc hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
403 		tc_cfg.hw_vsync_mode, tc_cfg.vsync_count,
404 		tc_cfg.vsync_init_val);
405 	DPU_DEBUG_CMDENC(cmd_enc,
406 		"tc cfgheight %u thresh_start %u thresh_cont %u\n",
407 		tc_cfg.sync_cfg_height, tc_cfg.sync_threshold_start,
408 		tc_cfg.sync_threshold_continue);
409 
410 	if (phys_enc->has_intf_te)
411 		phys_enc->hw_intf->ops.enable_tearcheck(phys_enc->hw_intf, &tc_cfg);
412 	else
413 		phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, &tc_cfg);
414 }
415 
416 static void _dpu_encoder_phys_cmd_pingpong_config(
417 		struct dpu_encoder_phys *phys_enc)
418 {
419 	struct dpu_encoder_phys_cmd *cmd_enc =
420 		to_dpu_encoder_phys_cmd(phys_enc);
421 
422 	if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
423 		DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
424 		return;
425 	}
426 
427 	DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
428 			phys_enc->hw_pp->idx - PINGPONG_0);
429 	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
430 
431 	_dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
432 	dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
433 }
434 
435 static bool dpu_encoder_phys_cmd_needs_single_flush(
436 		struct dpu_encoder_phys *phys_enc)
437 {
438 	/**
439 	 * we do separate flush for each CTL and let
440 	 * CTL_START synchronize them
441 	 */
442 	return false;
443 }
444 
445 static void dpu_encoder_phys_cmd_enable_helper(
446 		struct dpu_encoder_phys *phys_enc)
447 {
448 	struct dpu_hw_ctl *ctl;
449 
450 	if (!phys_enc->hw_pp) {
451 		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
452 		return;
453 	}
454 
455 	dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
456 
457 	_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
458 
459 	ctl = phys_enc->hw_ctl;
460 	ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
461 }
462 
463 static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
464 {
465 	struct dpu_encoder_phys_cmd *cmd_enc =
466 		to_dpu_encoder_phys_cmd(phys_enc);
467 
468 	if (!phys_enc->hw_pp) {
469 		DPU_ERROR("invalid phys encoder\n");
470 		return;
471 	}
472 
473 	DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
474 
475 	if (phys_enc->enable_state == DPU_ENC_ENABLED) {
476 		DPU_ERROR("already enabled\n");
477 		return;
478 	}
479 
480 	dpu_encoder_phys_cmd_enable_helper(phys_enc);
481 	phys_enc->enable_state = DPU_ENC_ENABLED;
482 }
483 
484 static void _dpu_encoder_phys_cmd_connect_te(
485 		struct dpu_encoder_phys *phys_enc, bool enable)
486 {
487 	if (phys_enc->has_intf_te) {
488 		if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.connect_external_te)
489 			return;
490 
491 		trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
492 		phys_enc->hw_intf->ops.connect_external_te(phys_enc->hw_intf, enable);
493 	} else {
494 		if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
495 			return;
496 
497 		trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
498 		phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
499 	}
500 }
501 
502 static void dpu_encoder_phys_cmd_prepare_idle_pc(
503 		struct dpu_encoder_phys *phys_enc)
504 {
505 	_dpu_encoder_phys_cmd_connect_te(phys_enc, false);
506 }
507 
508 static int dpu_encoder_phys_cmd_get_line_count(
509 		struct dpu_encoder_phys *phys_enc)
510 {
511 	struct dpu_hw_pingpong *hw_pp;
512 	struct dpu_hw_intf *hw_intf;
513 
514 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
515 		return -EINVAL;
516 
517 	if (phys_enc->has_intf_te) {
518 		hw_intf = phys_enc->hw_intf;
519 		if (!hw_intf || !hw_intf->ops.get_line_count)
520 			return -EINVAL;
521 		return hw_intf->ops.get_line_count(hw_intf);
522 	}
523 
524 	hw_pp = phys_enc->hw_pp;
525 	if (!hw_pp || !hw_pp->ops.get_line_count)
526 		return -EINVAL;
527 	return hw_pp->ops.get_line_count(hw_pp);
528 }
529 
530 static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
531 {
532 	struct dpu_encoder_phys_cmd *cmd_enc =
533 		to_dpu_encoder_phys_cmd(phys_enc);
534 	struct dpu_hw_ctl *ctl;
535 
536 	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
537 		DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
538 		return;
539 	}
540 
541 	if (phys_enc->has_intf_te) {
542 		DRM_DEBUG_KMS("id:%u intf:%d state:%d\n", DRMID(phys_enc->parent),
543 			      phys_enc->hw_intf->idx - INTF_0,
544 			      phys_enc->enable_state);
545 
546 		if (phys_enc->hw_intf->ops.disable_tearcheck)
547 			phys_enc->hw_intf->ops.disable_tearcheck(phys_enc->hw_intf);
548 	} else {
549 		if (!phys_enc->hw_pp) {
550 			DPU_ERROR("invalid encoder\n");
551 			return;
552 		}
553 
554 		DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
555 			      phys_enc->hw_pp->idx - PINGPONG_0,
556 			      phys_enc->enable_state);
557 
558 		if (phys_enc->hw_pp->ops.disable_tearcheck)
559 			phys_enc->hw_pp->ops.disable_tearcheck(phys_enc->hw_pp);
560 	}
561 
562 	if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
563 		phys_enc->hw_intf->ops.bind_pingpong_blk(
564 				phys_enc->hw_intf,
565 				PINGPONG_NONE);
566 
567 		ctl = phys_enc->hw_ctl;
568 		ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
569 	}
570 
571 	phys_enc->enable_state = DPU_ENC_DISABLED;
572 }
573 
574 static void dpu_encoder_phys_cmd_prepare_for_kickoff(
575 		struct dpu_encoder_phys *phys_enc)
576 {
577 	struct dpu_encoder_phys_cmd *cmd_enc =
578 			to_dpu_encoder_phys_cmd(phys_enc);
579 	int ret;
580 
581 	if (!phys_enc->hw_pp) {
582 		DPU_ERROR("invalid encoder\n");
583 		return;
584 	}
585 	DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
586 		      phys_enc->hw_pp->idx - PINGPONG_0,
587 		      atomic_read(&phys_enc->pending_kickoff_cnt));
588 
589 	/*
590 	 * Mark kickoff request as outstanding. If there are more than one,
591 	 * outstanding, then we have to wait for the previous one to complete
592 	 */
593 	ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
594 	if (ret) {
595 		/* force pending_kickoff_cnt 0 to discard failed kickoff */
596 		atomic_set(&phys_enc->pending_kickoff_cnt, 0);
597 		DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
598 			  DRMID(phys_enc->parent), ret,
599 			  phys_enc->hw_pp->idx - PINGPONG_0);
600 	}
601 
602 	dpu_encoder_phys_cmd_enable_te(phys_enc);
603 
604 	DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
605 			phys_enc->hw_pp->idx - PINGPONG_0,
606 			atomic_read(&phys_enc->pending_kickoff_cnt));
607 }
608 
609 static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc)
610 {
611 	if (!phys_enc)
612 		return;
613 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
614 		return;
615 
616 	if (phys_enc->has_intf_te) {
617 		if (!phys_enc->hw_intf->ops.disable_autorefresh)
618 			return;
619 
620 		phys_enc->hw_intf->ops.disable_autorefresh(
621 				phys_enc->hw_intf,
622 				DRMID(phys_enc->parent),
623 				phys_enc->cached_mode.vdisplay);
624 	} else {
625 		if (!phys_enc->hw_pp ||
626 		    !phys_enc->hw_pp->ops.disable_autorefresh)
627 			return;
628 
629 		phys_enc->hw_pp->ops.disable_autorefresh(
630 				phys_enc->hw_pp,
631 				DRMID(phys_enc->parent),
632 				phys_enc->cached_mode.vdisplay);
633 	}
634 }
635 
636 static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
637 		struct dpu_encoder_phys *phys_enc)
638 {
639 	struct dpu_encoder_phys_cmd *cmd_enc =
640 			to_dpu_encoder_phys_cmd(phys_enc);
641 	struct dpu_encoder_wait_info wait_info;
642 	int ret;
643 
644 	wait_info.wq = &phys_enc->pending_kickoff_wq;
645 	wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
646 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
647 
648 	ret = dpu_encoder_helper_wait_for_irq(phys_enc,
649 			phys_enc->irq[INTR_IDX_CTL_START],
650 			dpu_encoder_phys_cmd_ctl_start_irq,
651 			&wait_info);
652 	if (ret == -ETIMEDOUT) {
653 		DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
654 		ret = -EINVAL;
655 	} else if (!ret)
656 		ret = 0;
657 
658 	return ret;
659 }
660 
661 static int dpu_encoder_phys_cmd_wait_for_tx_complete(
662 		struct dpu_encoder_phys *phys_enc)
663 {
664 	int rc;
665 
666 	rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
667 	if (rc) {
668 		DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
669 			  DRMID(phys_enc->parent), rc,
670 			  phys_enc->hw_intf->idx - INTF_0);
671 	}
672 
673 	return rc;
674 }
675 
676 static int dpu_encoder_phys_cmd_wait_for_commit_done(
677 		struct dpu_encoder_phys *phys_enc)
678 {
679 	/* only required for master controller */
680 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
681 		return 0;
682 
683 	if (phys_enc->hw_ctl->ops.is_started(phys_enc->hw_ctl))
684 		return dpu_encoder_phys_cmd_wait_for_tx_complete(phys_enc);
685 
686 	return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
687 }
688 
689 static void dpu_encoder_phys_cmd_handle_post_kickoff(
690 		struct dpu_encoder_phys *phys_enc)
691 {
692 	/**
693 	 * re-enable external TE, either for the first time after enabling
694 	 * or if disabled for Autorefresh
695 	 */
696 	_dpu_encoder_phys_cmd_connect_te(phys_enc, true);
697 }
698 
699 static void dpu_encoder_phys_cmd_trigger_start(
700 		struct dpu_encoder_phys *phys_enc)
701 {
702 	dpu_encoder_helper_trigger_start(phys_enc);
703 }
704 
705 static void dpu_encoder_phys_cmd_init_ops(
706 		struct dpu_encoder_phys_ops *ops)
707 {
708 	ops->is_master = dpu_encoder_phys_cmd_is_master;
709 	ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
710 	ops->enable = dpu_encoder_phys_cmd_enable;
711 	ops->disable = dpu_encoder_phys_cmd_disable;
712 	ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
713 	ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
714 	ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
715 	ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
716 	ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
717 	ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
718 	ops->irq_enable = dpu_encoder_phys_cmd_irq_enable;
719 	ops->irq_disable = dpu_encoder_phys_cmd_irq_disable;
720 	ops->restore = dpu_encoder_phys_cmd_enable_helper;
721 	ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
722 	ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
723 	ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
724 }
725 
726 /**
727  * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
728  * @dev:  Corresponding device for devres management
729  * @p:	Pointer to init params structure
730  * Return: Error code or newly allocated encoder
731  */
732 struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
733 		struct dpu_enc_phys_init_params *p)
734 {
735 	struct dpu_encoder_phys *phys_enc = NULL;
736 	struct dpu_encoder_phys_cmd *cmd_enc = NULL;
737 
738 	DPU_DEBUG("intf\n");
739 
740 	cmd_enc = drmm_kzalloc(dev, sizeof(*cmd_enc), GFP_KERNEL);
741 	if (!cmd_enc) {
742 		DPU_ERROR("failed to allocate\n");
743 		return ERR_PTR(-ENOMEM);
744 	}
745 	phys_enc = &cmd_enc->base;
746 
747 	dpu_encoder_phys_init(phys_enc, p);
748 
749 	mutex_init(&phys_enc->vblank_ctl_lock);
750 	phys_enc->vblank_refcount = 0;
751 
752 	dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
753 	phys_enc->intf_mode = INTF_MODE_CMD;
754 	cmd_enc->stream_sel = 0;
755 
756 	if (!phys_enc->hw_intf) {
757 		DPU_ERROR_CMDENC(cmd_enc, "no INTF provided\n");
758 		return ERR_PTR(-EINVAL);
759 	}
760 
761 	/* DPU before 5.0 use PINGPONG for TE handling */
762 	if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5)
763 		phys_enc->has_intf_te = true;
764 
765 	if (phys_enc->has_intf_te && !phys_enc->hw_intf->ops.enable_tearcheck) {
766 		DPU_ERROR_CMDENC(cmd_enc, "tearcheck not supported\n");
767 		return ERR_PTR(-EINVAL);
768 	}
769 
770 	atomic_set(&cmd_enc->pending_vblank_cnt, 0);
771 	init_waitqueue_head(&cmd_enc->pending_vblank_wq);
772 
773 	DPU_DEBUG_CMDENC(cmd_enc, "created\n");
774 
775 	return phys_enc;
776 }
777