1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
4  */
5 
6 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
7 #include <linux/delay.h>
8 #include "dpu_encoder_phys.h"
9 #include "dpu_hw_interrupts.h"
10 #include "dpu_hw_pingpong.h"
11 #include "dpu_core_irq.h"
12 #include "dpu_formats.h"
13 #include "dpu_trace.h"
14 #include "disp/msm_disp_snapshot.h"
15 
16 #include <drm/drm_managed.h>
17 
18 #define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
19 		(e) && (e)->base.parent ? \
20 		(e)->base.parent->base.id : -1, \
21 		(e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
22 
23 #define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
24 		(e) && (e)->base.parent ? \
25 		(e)->base.parent->base.id : -1, \
26 		(e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
27 
28 #define to_dpu_encoder_phys_cmd(x) \
29 	container_of(x, struct dpu_encoder_phys_cmd, base)
30 
31 #define PP_TIMEOUT_MAX_TRIALS	10
32 
33 /*
34  * Tearcheck sync start and continue thresholds are empirically found
35  * based on common panels In the future, may want to allow panels to override
36  * these default values
37  */
38 #define DEFAULT_TEARCHECK_SYNC_THRESH_START	4
39 #define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE	4
40 
41 static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc);
42 
dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys * phys_enc)43 static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
44 {
45 	return (phys_enc->split_role != ENC_ROLE_SLAVE);
46 }
47 
_dpu_encoder_phys_cmd_update_intf_cfg(struct dpu_encoder_phys * phys_enc)48 static void _dpu_encoder_phys_cmd_update_intf_cfg(
49 		struct dpu_encoder_phys *phys_enc)
50 {
51 	struct dpu_encoder_phys_cmd *cmd_enc =
52 			to_dpu_encoder_phys_cmd(phys_enc);
53 	struct dpu_hw_ctl *ctl;
54 	struct dpu_hw_intf_cfg intf_cfg = { 0 };
55 	struct dpu_hw_intf_cmd_mode_cfg cmd_mode_cfg = {};
56 
57 	ctl = phys_enc->hw_ctl;
58 	if (!ctl->ops.setup_intf_cfg)
59 		return;
60 
61 	intf_cfg.intf = phys_enc->hw_intf->idx;
62 	intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
63 	intf_cfg.stream_sel = cmd_enc->stream_sel;
64 	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
65 	intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
66 	ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
67 
68 	/* setup which pp blk will connect to this intf */
69 	if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
70 		phys_enc->hw_intf->ops.bind_pingpong_blk(
71 				phys_enc->hw_intf,
72 				phys_enc->hw_pp->idx);
73 
74 	if (intf_cfg.dsc != 0)
75 		cmd_mode_cfg.data_compress = true;
76 
77 	cmd_mode_cfg.wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
78 
79 	if (phys_enc->hw_intf->ops.program_intf_cmd_cfg)
80 		phys_enc->hw_intf->ops.program_intf_cmd_cfg(phys_enc->hw_intf, &cmd_mode_cfg);
81 }
82 
dpu_encoder_phys_cmd_pp_tx_done_irq(void * arg)83 static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg)
84 {
85 	struct dpu_encoder_phys *phys_enc = arg;
86 	unsigned long lock_flags;
87 	int new_cnt;
88 	u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
89 
90 	if (!phys_enc->hw_pp)
91 		return;
92 
93 	DPU_ATRACE_BEGIN("pp_done_irq");
94 	/* notify all synchronous clients first, then asynchronous clients */
95 	dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, event);
96 
97 	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
98 	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
99 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
100 
101 	trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
102 					  phys_enc->hw_pp->idx - PINGPONG_0,
103 					  new_cnt, event);
104 
105 	/* Signal any waiting atomic commit thread */
106 	wake_up_all(&phys_enc->pending_kickoff_wq);
107 	DPU_ATRACE_END("pp_done_irq");
108 }
109 
dpu_encoder_phys_cmd_te_rd_ptr_irq(void * arg)110 static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg)
111 {
112 	struct dpu_encoder_phys *phys_enc = arg;
113 	struct dpu_encoder_phys_cmd *cmd_enc;
114 
115 	DPU_ATRACE_BEGIN("rd_ptr_irq");
116 	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
117 
118 	dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
119 
120 	atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
121 	wake_up_all(&cmd_enc->pending_vblank_wq);
122 	DPU_ATRACE_END("rd_ptr_irq");
123 }
124 
dpu_encoder_phys_cmd_ctl_start_irq(void * arg)125 static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg)
126 {
127 	struct dpu_encoder_phys *phys_enc = arg;
128 
129 	DPU_ATRACE_BEGIN("ctl_start_irq");
130 
131 	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
132 
133 	/* Signal any waiting ctl start interrupt */
134 	wake_up_all(&phys_enc->pending_kickoff_wq);
135 	DPU_ATRACE_END("ctl_start_irq");
136 }
137 
dpu_encoder_phys_cmd_underrun_irq(void * arg)138 static void dpu_encoder_phys_cmd_underrun_irq(void *arg)
139 {
140 	struct dpu_encoder_phys *phys_enc = arg;
141 
142 	dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
143 }
144 
dpu_encoder_phys_cmd_atomic_mode_set(struct dpu_encoder_phys * phys_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)145 static void dpu_encoder_phys_cmd_atomic_mode_set(
146 		struct dpu_encoder_phys *phys_enc,
147 		struct drm_crtc_state *crtc_state,
148 		struct drm_connector_state *conn_state)
149 {
150 	phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
151 
152 	phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
153 
154 	if (phys_enc->has_intf_te)
155 		phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
156 	else
157 		phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
158 
159 	phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
160 }
161 
_dpu_encoder_phys_cmd_handle_ppdone_timeout(struct dpu_encoder_phys * phys_enc)162 static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
163 		struct dpu_encoder_phys *phys_enc)
164 {
165 	struct dpu_encoder_phys_cmd *cmd_enc =
166 			to_dpu_encoder_phys_cmd(phys_enc);
167 	u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
168 	bool do_log = false;
169 	struct drm_encoder *drm_enc;
170 
171 	if (!phys_enc->hw_pp)
172 		return -EINVAL;
173 
174 	drm_enc = phys_enc->parent;
175 
176 	cmd_enc->pp_timeout_report_cnt++;
177 	if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
178 		frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
179 		do_log = true;
180 	} else if (cmd_enc->pp_timeout_report_cnt == 1) {
181 		do_log = true;
182 	}
183 
184 	trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(drm_enc),
185 		     phys_enc->hw_pp->idx - PINGPONG_0,
186 		     cmd_enc->pp_timeout_report_cnt,
187 		     atomic_read(&phys_enc->pending_kickoff_cnt),
188 		     frame_event);
189 
190 	/* to avoid flooding, only log first time, and "dead" time */
191 	if (do_log) {
192 		DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
193 			  DRMID(drm_enc),
194 			  phys_enc->hw_pp->idx - PINGPONG_0,
195 			  phys_enc->hw_ctl->idx - CTL_0,
196 			  cmd_enc->pp_timeout_report_cnt,
197 			  atomic_read(&phys_enc->pending_kickoff_cnt));
198 		msm_disp_snapshot_state(drm_enc->dev);
199 		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
200 				phys_enc->irq[INTR_IDX_RDPTR]);
201 	}
202 
203 	atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
204 
205 	/* request a ctl reset before the next kickoff */
206 	phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
207 
208 	dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, frame_event);
209 
210 	return -ETIMEDOUT;
211 }
212 
_dpu_encoder_phys_cmd_wait_for_idle(struct dpu_encoder_phys * phys_enc)213 static int _dpu_encoder_phys_cmd_wait_for_idle(
214 		struct dpu_encoder_phys *phys_enc)
215 {
216 	struct dpu_encoder_phys_cmd *cmd_enc =
217 			to_dpu_encoder_phys_cmd(phys_enc);
218 	struct dpu_encoder_wait_info wait_info;
219 	int ret;
220 
221 	wait_info.wq = &phys_enc->pending_kickoff_wq;
222 	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
223 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
224 
225 	ret = dpu_encoder_helper_wait_for_irq(phys_enc,
226 			phys_enc->irq[INTR_IDX_PINGPONG],
227 			dpu_encoder_phys_cmd_pp_tx_done_irq,
228 			&wait_info);
229 	if (ret == -ETIMEDOUT)
230 		_dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
231 	else if (!ret)
232 		cmd_enc->pp_timeout_report_cnt = 0;
233 
234 	return ret;
235 }
236 
dpu_encoder_phys_cmd_control_vblank_irq(struct dpu_encoder_phys * phys_enc,bool enable)237 static int dpu_encoder_phys_cmd_control_vblank_irq(
238 		struct dpu_encoder_phys *phys_enc,
239 		bool enable)
240 {
241 	int ret = 0;
242 	int refcount;
243 
244 	if (!phys_enc->hw_pp) {
245 		DPU_ERROR("invalid encoder\n");
246 		return -EINVAL;
247 	}
248 
249 	mutex_lock(&phys_enc->vblank_ctl_lock);
250 	refcount = phys_enc->vblank_refcount;
251 
252 	/* Slave encoders don't report vblank */
253 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
254 		goto end;
255 
256 	/* protect against negative */
257 	if (!enable && refcount == 0) {
258 		ret = -EINVAL;
259 		goto end;
260 	}
261 
262 	DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
263 		      phys_enc->hw_pp->idx - PINGPONG_0,
264 		      enable ? "true" : "false", refcount);
265 
266 	if (enable) {
267 		if (phys_enc->vblank_refcount == 0)
268 			ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
269 					phys_enc->irq[INTR_IDX_RDPTR],
270 					dpu_encoder_phys_cmd_te_rd_ptr_irq,
271 					phys_enc);
272 		if (!ret)
273 			phys_enc->vblank_refcount++;
274 	} else if (!enable) {
275 		if (phys_enc->vblank_refcount == 1)
276 			ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
277 					phys_enc->irq[INTR_IDX_RDPTR]);
278 		if (!ret)
279 			phys_enc->vblank_refcount--;
280 	}
281 
282 end:
283 	mutex_unlock(&phys_enc->vblank_ctl_lock);
284 	if (ret) {
285 		DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
286 			  DRMID(phys_enc->parent),
287 			  phys_enc->hw_pp->idx - PINGPONG_0, ret,
288 			  enable ? "true" : "false", refcount);
289 	}
290 
291 	return ret;
292 }
293 
dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys * phys_enc,bool enable)294 static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
295 		bool enable)
296 {
297 	trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
298 			phys_enc->hw_pp->idx - PINGPONG_0,
299 			enable, phys_enc->vblank_refcount);
300 
301 	if (enable) {
302 		dpu_core_irq_register_callback(phys_enc->dpu_kms,
303 				phys_enc->irq[INTR_IDX_PINGPONG],
304 				dpu_encoder_phys_cmd_pp_tx_done_irq,
305 				phys_enc);
306 		dpu_core_irq_register_callback(phys_enc->dpu_kms,
307 				phys_enc->irq[INTR_IDX_UNDERRUN],
308 				dpu_encoder_phys_cmd_underrun_irq,
309 				phys_enc);
310 		dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
311 
312 		if (dpu_encoder_phys_cmd_is_master(phys_enc))
313 			dpu_core_irq_register_callback(phys_enc->dpu_kms,
314 					phys_enc->irq[INTR_IDX_CTL_START],
315 					dpu_encoder_phys_cmd_ctl_start_irq,
316 					phys_enc);
317 	} else {
318 		if (dpu_encoder_phys_cmd_is_master(phys_enc))
319 			dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
320 					phys_enc->irq[INTR_IDX_CTL_START]);
321 
322 		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
323 				phys_enc->irq[INTR_IDX_UNDERRUN]);
324 		dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
325 		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
326 				phys_enc->irq[INTR_IDX_PINGPONG]);
327 	}
328 }
329 
dpu_encoder_phys_cmd_tearcheck_config(struct dpu_encoder_phys * phys_enc)330 static void dpu_encoder_phys_cmd_tearcheck_config(
331 		struct dpu_encoder_phys *phys_enc)
332 {
333 	struct dpu_encoder_phys_cmd *cmd_enc =
334 		to_dpu_encoder_phys_cmd(phys_enc);
335 	struct dpu_hw_tear_check tc_cfg = { 0 };
336 	struct drm_display_mode *mode;
337 	bool tc_enable = true;
338 	unsigned long vsync_hz;
339 	struct dpu_kms *dpu_kms;
340 
341 	/*
342 	 * TODO: if/when resource allocation is refactored, move this to a
343 	 * place where the driver can actually return an error.
344 	 */
345 	if (!phys_enc->has_intf_te &&
346 	    (!phys_enc->hw_pp ||
347 	     !phys_enc->hw_pp->ops.enable_tearcheck)) {
348 		DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
349 		return;
350 	}
351 
352 	DPU_DEBUG_CMDENC(cmd_enc, "intf %d pp %d\n",
353 			 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
354 			 phys_enc->hw_pp ? phys_enc->hw_pp->idx - PINGPONG_0 : -1);
355 
356 	mode = &phys_enc->cached_mode;
357 
358 	dpu_kms = phys_enc->dpu_kms;
359 
360 	/*
361 	 * TE default: dsi byte clock calculated base on 70 fps;
362 	 * around 14 ms to complete a kickoff cycle if te disabled;
363 	 * vclk_line base on 60 fps; write is faster than read;
364 	 * init == start == rdptr;
365 	 *
366 	 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
367 	 * frequency divided by the no. of rows (lines) in the LCDpanel.
368 	 */
369 	vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
370 	if (!vsync_hz) {
371 		DPU_DEBUG_CMDENC(cmd_enc, "invalid - no vsync clock\n");
372 		return;
373 	}
374 
375 	tc_cfg.vsync_count = vsync_hz /
376 				(mode->vtotal * drm_mode_vrefresh(mode));
377 
378 	/*
379 	 * Set the sync_cfg_height to twice vtotal so that if we lose a
380 	 * TE event coming from the display TE pin we won't stall immediately
381 	 */
382 	tc_cfg.hw_vsync_mode = 1;
383 	tc_cfg.sync_cfg_height = mode->vtotal * 2;
384 	tc_cfg.vsync_init_val = mode->vdisplay;
385 	tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
386 	tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
387 	tc_cfg.start_pos = mode->vdisplay;
388 	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
389 
390 	DPU_DEBUG_CMDENC(cmd_enc,
391 		"tc vsync_clk_speed_hz %lu vtotal %u vrefresh %u\n",
392 		vsync_hz, mode->vtotal, drm_mode_vrefresh(mode));
393 	DPU_DEBUG_CMDENC(cmd_enc,
394 		"tc enable %u start_pos %u rd_ptr_irq %u\n",
395 		tc_enable, tc_cfg.start_pos, tc_cfg.rd_ptr_irq);
396 	DPU_DEBUG_CMDENC(cmd_enc,
397 		"tc hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
398 		tc_cfg.hw_vsync_mode, tc_cfg.vsync_count,
399 		tc_cfg.vsync_init_val);
400 	DPU_DEBUG_CMDENC(cmd_enc,
401 		"tc cfgheight %u thresh_start %u thresh_cont %u\n",
402 		tc_cfg.sync_cfg_height, tc_cfg.sync_threshold_start,
403 		tc_cfg.sync_threshold_continue);
404 
405 	if (phys_enc->has_intf_te)
406 		phys_enc->hw_intf->ops.enable_tearcheck(phys_enc->hw_intf, &tc_cfg);
407 	else
408 		phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, &tc_cfg);
409 }
410 
_dpu_encoder_phys_cmd_pingpong_config(struct dpu_encoder_phys * phys_enc)411 static void _dpu_encoder_phys_cmd_pingpong_config(
412 		struct dpu_encoder_phys *phys_enc)
413 {
414 	struct dpu_encoder_phys_cmd *cmd_enc =
415 		to_dpu_encoder_phys_cmd(phys_enc);
416 
417 	if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
418 		DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
419 		return;
420 	}
421 
422 	DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
423 			phys_enc->hw_pp->idx - PINGPONG_0);
424 	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
425 
426 	_dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
427 	dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
428 }
429 
dpu_encoder_phys_cmd_needs_single_flush(struct dpu_encoder_phys * phys_enc)430 static bool dpu_encoder_phys_cmd_needs_single_flush(
431 		struct dpu_encoder_phys *phys_enc)
432 {
433 	/**
434 	 * we do separate flush for each CTL and let
435 	 * CTL_START synchronize them
436 	 */
437 	return false;
438 }
439 
dpu_encoder_phys_cmd_enable_helper(struct dpu_encoder_phys * phys_enc)440 static void dpu_encoder_phys_cmd_enable_helper(
441 		struct dpu_encoder_phys *phys_enc)
442 {
443 	struct dpu_hw_ctl *ctl;
444 
445 	if (!phys_enc->hw_pp) {
446 		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
447 		return;
448 	}
449 
450 	dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
451 
452 	_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
453 
454 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
455 		return;
456 
457 	ctl = phys_enc->hw_ctl;
458 	ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
459 }
460 
dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys * phys_enc)461 static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
462 {
463 	struct dpu_encoder_phys_cmd *cmd_enc =
464 		to_dpu_encoder_phys_cmd(phys_enc);
465 
466 	if (!phys_enc->hw_pp) {
467 		DPU_ERROR("invalid phys encoder\n");
468 		return;
469 	}
470 
471 	DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
472 
473 	if (phys_enc->enable_state == DPU_ENC_ENABLED) {
474 		DPU_ERROR("already enabled\n");
475 		return;
476 	}
477 
478 	dpu_encoder_phys_cmd_enable_helper(phys_enc);
479 	phys_enc->enable_state = DPU_ENC_ENABLED;
480 }
481 
_dpu_encoder_phys_cmd_connect_te(struct dpu_encoder_phys * phys_enc,bool enable)482 static void _dpu_encoder_phys_cmd_connect_te(
483 		struct dpu_encoder_phys *phys_enc, bool enable)
484 {
485 	if (phys_enc->has_intf_te) {
486 		if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.connect_external_te)
487 			return;
488 
489 		trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
490 		phys_enc->hw_intf->ops.connect_external_te(phys_enc->hw_intf, enable);
491 	} else {
492 		if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
493 			return;
494 
495 		trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
496 		phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
497 	}
498 }
499 
dpu_encoder_phys_cmd_prepare_idle_pc(struct dpu_encoder_phys * phys_enc)500 static void dpu_encoder_phys_cmd_prepare_idle_pc(
501 		struct dpu_encoder_phys *phys_enc)
502 {
503 	_dpu_encoder_phys_cmd_connect_te(phys_enc, false);
504 }
505 
dpu_encoder_phys_cmd_get_line_count(struct dpu_encoder_phys * phys_enc)506 static int dpu_encoder_phys_cmd_get_line_count(
507 		struct dpu_encoder_phys *phys_enc)
508 {
509 	struct dpu_hw_pingpong *hw_pp;
510 	struct dpu_hw_intf *hw_intf;
511 
512 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
513 		return -EINVAL;
514 
515 	if (phys_enc->has_intf_te) {
516 		hw_intf = phys_enc->hw_intf;
517 		if (!hw_intf || !hw_intf->ops.get_line_count)
518 			return -EINVAL;
519 		return hw_intf->ops.get_line_count(hw_intf);
520 	}
521 
522 	hw_pp = phys_enc->hw_pp;
523 	if (!hw_pp || !hw_pp->ops.get_line_count)
524 		return -EINVAL;
525 	return hw_pp->ops.get_line_count(hw_pp);
526 }
527 
dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys * phys_enc)528 static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
529 {
530 	struct dpu_encoder_phys_cmd *cmd_enc =
531 		to_dpu_encoder_phys_cmd(phys_enc);
532 	struct dpu_hw_ctl *ctl;
533 
534 	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
535 		DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
536 		return;
537 	}
538 
539 	if (phys_enc->has_intf_te) {
540 		DRM_DEBUG_KMS("id:%u intf:%d state:%d\n", DRMID(phys_enc->parent),
541 			      phys_enc->hw_intf->idx - INTF_0,
542 			      phys_enc->enable_state);
543 
544 		if (phys_enc->hw_intf->ops.disable_tearcheck)
545 			phys_enc->hw_intf->ops.disable_tearcheck(phys_enc->hw_intf);
546 	} else {
547 		if (!phys_enc->hw_pp) {
548 			DPU_ERROR("invalid encoder\n");
549 			return;
550 		}
551 
552 		DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
553 			      phys_enc->hw_pp->idx - PINGPONG_0,
554 			      phys_enc->enable_state);
555 
556 		if (phys_enc->hw_pp->ops.disable_tearcheck)
557 			phys_enc->hw_pp->ops.disable_tearcheck(phys_enc->hw_pp);
558 	}
559 
560 	if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
561 		phys_enc->hw_intf->ops.bind_pingpong_blk(
562 				phys_enc->hw_intf,
563 				PINGPONG_NONE);
564 
565 		ctl = phys_enc->hw_ctl;
566 		ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
567 	}
568 
569 	phys_enc->enable_state = DPU_ENC_DISABLED;
570 }
571 
dpu_encoder_phys_cmd_prepare_for_kickoff(struct dpu_encoder_phys * phys_enc)572 static void dpu_encoder_phys_cmd_prepare_for_kickoff(
573 		struct dpu_encoder_phys *phys_enc)
574 {
575 	struct dpu_encoder_phys_cmd *cmd_enc =
576 			to_dpu_encoder_phys_cmd(phys_enc);
577 	int ret;
578 
579 	if (!phys_enc->hw_pp) {
580 		DPU_ERROR("invalid encoder\n");
581 		return;
582 	}
583 	DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
584 		      phys_enc->hw_pp->idx - PINGPONG_0,
585 		      atomic_read(&phys_enc->pending_kickoff_cnt));
586 
587 	/*
588 	 * Mark kickoff request as outstanding. If there are more than one,
589 	 * outstanding, then we have to wait for the previous one to complete
590 	 */
591 	ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
592 	if (ret) {
593 		/* force pending_kickoff_cnt 0 to discard failed kickoff */
594 		atomic_set(&phys_enc->pending_kickoff_cnt, 0);
595 		DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
596 			  DRMID(phys_enc->parent), ret,
597 			  phys_enc->hw_pp->idx - PINGPONG_0);
598 	}
599 
600 	dpu_encoder_phys_cmd_enable_te(phys_enc);
601 
602 	DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
603 			phys_enc->hw_pp->idx - PINGPONG_0,
604 			atomic_read(&phys_enc->pending_kickoff_cnt));
605 }
606 
dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys * phys_enc)607 static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc)
608 {
609 	if (!phys_enc)
610 		return;
611 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
612 		return;
613 
614 	if (phys_enc->has_intf_te) {
615 		if (!phys_enc->hw_intf->ops.disable_autorefresh)
616 			return;
617 
618 		phys_enc->hw_intf->ops.disable_autorefresh(
619 				phys_enc->hw_intf,
620 				DRMID(phys_enc->parent),
621 				phys_enc->cached_mode.vdisplay);
622 	} else {
623 		if (!phys_enc->hw_pp ||
624 		    !phys_enc->hw_pp->ops.disable_autorefresh)
625 			return;
626 
627 		phys_enc->hw_pp->ops.disable_autorefresh(
628 				phys_enc->hw_pp,
629 				DRMID(phys_enc->parent),
630 				phys_enc->cached_mode.vdisplay);
631 	}
632 }
633 
_dpu_encoder_phys_cmd_wait_for_ctl_start(struct dpu_encoder_phys * phys_enc)634 static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
635 		struct dpu_encoder_phys *phys_enc)
636 {
637 	struct dpu_encoder_phys_cmd *cmd_enc =
638 			to_dpu_encoder_phys_cmd(phys_enc);
639 	struct dpu_encoder_wait_info wait_info;
640 	int ret;
641 
642 	wait_info.wq = &phys_enc->pending_kickoff_wq;
643 	wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
644 	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
645 
646 	ret = dpu_encoder_helper_wait_for_irq(phys_enc,
647 			phys_enc->irq[INTR_IDX_CTL_START],
648 			dpu_encoder_phys_cmd_ctl_start_irq,
649 			&wait_info);
650 	if (ret == -ETIMEDOUT) {
651 		DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
652 		ret = -EINVAL;
653 	} else if (!ret)
654 		ret = 0;
655 
656 	return ret;
657 }
658 
dpu_encoder_phys_cmd_wait_for_tx_complete(struct dpu_encoder_phys * phys_enc)659 static int dpu_encoder_phys_cmd_wait_for_tx_complete(
660 		struct dpu_encoder_phys *phys_enc)
661 {
662 	int rc;
663 
664 	rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
665 	if (rc) {
666 		DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
667 			  DRMID(phys_enc->parent), rc,
668 			  phys_enc->hw_intf->idx - INTF_0);
669 	}
670 
671 	return rc;
672 }
673 
dpu_encoder_phys_cmd_wait_for_commit_done(struct dpu_encoder_phys * phys_enc)674 static int dpu_encoder_phys_cmd_wait_for_commit_done(
675 		struct dpu_encoder_phys *phys_enc)
676 {
677 	/* only required for master controller */
678 	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
679 		return 0;
680 
681 	if (phys_enc->hw_ctl->ops.is_started(phys_enc->hw_ctl))
682 		return dpu_encoder_phys_cmd_wait_for_tx_complete(phys_enc);
683 
684 	return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
685 }
686 
dpu_encoder_phys_cmd_handle_post_kickoff(struct dpu_encoder_phys * phys_enc)687 static void dpu_encoder_phys_cmd_handle_post_kickoff(
688 		struct dpu_encoder_phys *phys_enc)
689 {
690 	/**
691 	 * re-enable external TE, either for the first time after enabling
692 	 * or if disabled for Autorefresh
693 	 */
694 	_dpu_encoder_phys_cmd_connect_te(phys_enc, true);
695 }
696 
dpu_encoder_phys_cmd_trigger_start(struct dpu_encoder_phys * phys_enc)697 static void dpu_encoder_phys_cmd_trigger_start(
698 		struct dpu_encoder_phys *phys_enc)
699 {
700 	dpu_encoder_helper_trigger_start(phys_enc);
701 }
702 
dpu_encoder_phys_cmd_init_ops(struct dpu_encoder_phys_ops * ops)703 static void dpu_encoder_phys_cmd_init_ops(
704 		struct dpu_encoder_phys_ops *ops)
705 {
706 	ops->is_master = dpu_encoder_phys_cmd_is_master;
707 	ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
708 	ops->enable = dpu_encoder_phys_cmd_enable;
709 	ops->disable = dpu_encoder_phys_cmd_disable;
710 	ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
711 	ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
712 	ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
713 	ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
714 	ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
715 	ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
716 	ops->irq_control = dpu_encoder_phys_cmd_irq_control;
717 	ops->restore = dpu_encoder_phys_cmd_enable_helper;
718 	ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
719 	ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
720 	ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
721 }
722 
dpu_encoder_phys_cmd_init(struct drm_device * dev,struct dpu_enc_phys_init_params * p)723 struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
724 		struct dpu_enc_phys_init_params *p)
725 {
726 	struct dpu_encoder_phys *phys_enc = NULL;
727 	struct dpu_encoder_phys_cmd *cmd_enc = NULL;
728 
729 	DPU_DEBUG("intf\n");
730 
731 	cmd_enc = drmm_kzalloc(dev, sizeof(*cmd_enc), GFP_KERNEL);
732 	if (!cmd_enc) {
733 		DPU_ERROR("failed to allocate\n");
734 		return ERR_PTR(-ENOMEM);
735 	}
736 	phys_enc = &cmd_enc->base;
737 
738 	dpu_encoder_phys_init(phys_enc, p);
739 
740 	mutex_init(&phys_enc->vblank_ctl_lock);
741 	phys_enc->vblank_refcount = 0;
742 
743 	dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
744 	phys_enc->intf_mode = INTF_MODE_CMD;
745 	cmd_enc->stream_sel = 0;
746 
747 	if (!phys_enc->hw_intf) {
748 		DPU_ERROR_CMDENC(cmd_enc, "no INTF provided\n");
749 		return ERR_PTR(-EINVAL);
750 	}
751 
752 	/* DPU before 5.0 use PINGPONG for TE handling */
753 	if (phys_enc->dpu_kms->catalog->mdss_ver->core_major_ver >= 5)
754 		phys_enc->has_intf_te = true;
755 
756 	if (phys_enc->has_intf_te && !phys_enc->hw_intf->ops.enable_tearcheck) {
757 		DPU_ERROR_CMDENC(cmd_enc, "tearcheck not supported\n");
758 		return ERR_PTR(-EINVAL);
759 	}
760 
761 	atomic_set(&cmd_enc->pending_vblank_cnt, 0);
762 	init_waitqueue_head(&cmd_enc->pending_vblank_wq);
763 
764 	DPU_DEBUG_CMDENC(cmd_enc, "created\n");
765 
766 	return phys_enc;
767 }
768