xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c (revision 4a57e0913e8c7fff407e97909f4ae48caa84d612) !
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
5  * Copyright (C) 2013 Red Hat
6  * Author: Rob Clark <robdclark@gmail.com>
7  */
8 
9 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
10 #include <linux/sort.h>
11 #include <linux/debugfs.h>
12 #include <linux/ktime.h>
13 #include <linux/bits.h>
14 
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_blend.h>
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_flip_work.h>
19 #include <drm/drm_framebuffer.h>
20 #include <drm/drm_mode.h>
21 #include <drm/drm_probe_helper.h>
22 #include <drm/drm_rect.h>
23 #include <drm/drm_vblank.h>
24 #include <drm/drm_self_refresh_helper.h>
25 
26 #include "dpu_kms.h"
27 #include "dpu_hw_lm.h"
28 #include "dpu_hw_ctl.h"
29 #include "dpu_hw_dspp.h"
30 #include "dpu_crtc.h"
31 #include "dpu_plane.h"
32 #include "dpu_encoder.h"
33 #include "dpu_vbif.h"
34 #include "dpu_core_perf.h"
35 #include "dpu_trace.h"
36 
37 /* layer mixer index on dpu_crtc */
38 #define LEFT_MIXER 0
39 #define RIGHT_MIXER 1
40 
41 /* timeout in ms waiting for frame done */
42 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS	60
43 
44 #define	CONVERT_S3_15(val) \
45 	(((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
46 
47 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
48 {
49 	struct msm_drm_private *priv = crtc->dev->dev_private;
50 
51 	return to_dpu_kms(priv->kms);
52 }
53 
54 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
55 {
56 	struct drm_device *dev = crtc->dev;
57 	struct drm_encoder *encoder;
58 
59 	drm_for_each_encoder(encoder, dev)
60 		if (encoder->crtc == crtc)
61 			return encoder;
62 
63 	return NULL;
64 }
65 
66 static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name)
67 {
68 	if (!src_name ||
69 	    !strcmp(src_name, "none"))
70 		return DPU_CRTC_CRC_SOURCE_NONE;
71 	if (!strcmp(src_name, "auto") ||
72 	    !strcmp(src_name, "lm"))
73 		return DPU_CRTC_CRC_SOURCE_LAYER_MIXER;
74 	if (!strcmp(src_name, "encoder"))
75 		return DPU_CRTC_CRC_SOURCE_ENCODER;
76 
77 	return DPU_CRTC_CRC_SOURCE_INVALID;
78 }
79 
80 static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc,
81 		const char *src_name, size_t *values_cnt)
82 {
83 	enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
84 	struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
85 
86 	if (source < 0) {
87 		DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index);
88 		return -EINVAL;
89 	}
90 
91 	if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) {
92 		*values_cnt = crtc_state->num_mixers;
93 	} else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) {
94 		struct drm_encoder *drm_enc;
95 
96 		*values_cnt = 0;
97 
98 		drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
99 			*values_cnt += dpu_encoder_get_crc_values_cnt(drm_enc);
100 	}
101 
102 	return 0;
103 }
104 
105 static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state)
106 {
107 	struct dpu_crtc_mixer *m;
108 	int i;
109 
110 	for (i = 0; i < crtc_state->num_mixers; ++i) {
111 		m = &crtc_state->mixers[i];
112 
113 		if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
114 			continue;
115 
116 		/* Calculate MISR over 1 frame */
117 		m->hw_lm->ops.setup_misr(m->hw_lm);
118 	}
119 }
120 
121 static void dpu_crtc_setup_encoder_misr(struct drm_crtc *crtc)
122 {
123 	struct drm_encoder *drm_enc;
124 
125 	drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
126 		dpu_encoder_setup_misr(drm_enc);
127 }
128 
129 static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
130 {
131 	enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
132 	enum dpu_crtc_crc_source current_source;
133 	struct dpu_crtc_state *crtc_state;
134 	struct drm_device *drm_dev = crtc->dev;
135 
136 	bool was_enabled;
137 	bool enable = false;
138 	int ret = 0;
139 
140 	if (source < 0) {
141 		DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index);
142 		return -EINVAL;
143 	}
144 
145 	ret = drm_modeset_lock(&crtc->mutex, NULL);
146 
147 	if (ret)
148 		return ret;
149 
150 	enable = (source != DPU_CRTC_CRC_SOURCE_NONE);
151 	crtc_state = to_dpu_crtc_state(crtc->state);
152 
153 	spin_lock_irq(&drm_dev->event_lock);
154 	current_source = crtc_state->crc_source;
155 	spin_unlock_irq(&drm_dev->event_lock);
156 
157 	was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE);
158 
159 	if (!was_enabled && enable) {
160 		ret = drm_crtc_vblank_get(crtc);
161 
162 		if (ret)
163 			goto cleanup;
164 
165 	} else if (was_enabled && !enable) {
166 		drm_crtc_vblank_put(crtc);
167 	}
168 
169 	spin_lock_irq(&drm_dev->event_lock);
170 	crtc_state->crc_source = source;
171 	spin_unlock_irq(&drm_dev->event_lock);
172 
173 	crtc_state->crc_frame_skip_count = 0;
174 
175 	if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
176 		dpu_crtc_setup_lm_misr(crtc_state);
177 	else if (source == DPU_CRTC_CRC_SOURCE_ENCODER)
178 		dpu_crtc_setup_encoder_misr(crtc);
179 	else
180 		ret = -EINVAL;
181 
182 cleanup:
183 	drm_modeset_unlock(&crtc->mutex);
184 
185 	return ret;
186 }
187 
188 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
189 {
190 	struct drm_encoder *encoder = get_encoder_from_crtc(crtc);
191 	if (!encoder) {
192 		DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
193 		return 0;
194 	}
195 
196 	return dpu_encoder_get_vsync_count(encoder);
197 }
198 
199 static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc,
200 		struct dpu_crtc_state *crtc_state)
201 {
202 	struct dpu_crtc_mixer *m;
203 	u32 crcs[CRTC_DUAL_MIXERS];
204 
205 	int rc = 0;
206 	int i;
207 
208 	BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers));
209 
210 	for (i = 0; i < crtc_state->num_mixers; ++i) {
211 
212 		m = &crtc_state->mixers[i];
213 
214 		if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
215 			continue;
216 
217 		rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
218 
219 		if (rc) {
220 			if (rc != -ENODATA)
221 				DRM_DEBUG_DRIVER("MISR read failed\n");
222 			return rc;
223 		}
224 	}
225 
226 	return drm_crtc_add_crc_entry(crtc, true,
227 			drm_crtc_accurate_vblank_count(crtc), crcs);
228 }
229 
230 static int dpu_crtc_get_encoder_crc(struct drm_crtc *crtc)
231 {
232 	struct drm_encoder *drm_enc;
233 	int rc, pos = 0;
234 	u32 crcs[INTF_MAX];
235 
236 	drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) {
237 		rc = dpu_encoder_get_crc(drm_enc, crcs, pos);
238 		if (rc < 0) {
239 			if (rc != -ENODATA)
240 				DRM_DEBUG_DRIVER("MISR read failed\n");
241 
242 			return rc;
243 		}
244 
245 		pos += rc;
246 	}
247 
248 	return drm_crtc_add_crc_entry(crtc, true,
249 			drm_crtc_accurate_vblank_count(crtc), crcs);
250 }
251 
252 static int dpu_crtc_get_crc(struct drm_crtc *crtc)
253 {
254 	struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
255 
256 	/* Skip first 2 frames in case of "uncooked" CRCs */
257 	if (crtc_state->crc_frame_skip_count < 2) {
258 		crtc_state->crc_frame_skip_count++;
259 		return 0;
260 	}
261 
262 	if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
263 		return dpu_crtc_get_lm_crc(crtc, crtc_state);
264 	else if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_ENCODER)
265 		return dpu_crtc_get_encoder_crc(crtc);
266 
267 	return -EINVAL;
268 }
269 
270 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
271 					   bool in_vblank_irq,
272 					   int *vpos, int *hpos,
273 					   ktime_t *stime, ktime_t *etime,
274 					   const struct drm_display_mode *mode)
275 {
276 	unsigned int pipe = crtc->index;
277 	struct drm_encoder *encoder;
278 	int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
279 
280 	encoder = get_encoder_from_crtc(crtc);
281 	if (!encoder) {
282 		DRM_ERROR("no encoder found for crtc %d\n", pipe);
283 		return false;
284 	}
285 
286 	vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
287 	vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
288 
289 	/*
290 	 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
291 	 * the end of VFP. Translate the porch values relative to the line
292 	 * counter positions.
293 	 */
294 
295 	vactive_start = vsw + vbp + 1;
296 	vactive_end = vactive_start + mode->crtc_vdisplay;
297 
298 	/* last scan line before VSYNC */
299 	vfp_end = mode->crtc_vtotal;
300 
301 	if (stime)
302 		*stime = ktime_get();
303 
304 	line = dpu_encoder_get_linecount(encoder);
305 
306 	if (line < vactive_start)
307 		line -= vactive_start;
308 	else if (line > vactive_end)
309 		line = line - vfp_end - vactive_start;
310 	else
311 		line -= vactive_start;
312 
313 	*vpos = line;
314 	*hpos = 0;
315 
316 	if (etime)
317 		*etime = ktime_get();
318 
319 	return true;
320 }
321 
322 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
323 				      struct dpu_plane_state *pstate,
324 				      const struct msm_format *format,
325 				      const struct dpu_mdss_version *mdss_ver)
326 {
327 	struct dpu_hw_mixer *lm = mixer->hw_lm;
328 	u32 blend_op;
329 	u32 fg_alpha, bg_alpha;
330 
331 	fg_alpha = pstate->base.alpha;
332 
333 	/* default to opaque blending */
334 	if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
335 	    !format->alpha_enable) {
336 		blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
337 			DPU_BLEND_BG_ALPHA_BG_CONST;
338 		bg_alpha = DRM_BLEND_ALPHA_OPAQUE - fg_alpha;
339 	} else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
340 		blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
341 			DPU_BLEND_BG_ALPHA_FG_PIXEL;
342 		if (fg_alpha != DRM_BLEND_ALPHA_OPAQUE) {
343 			bg_alpha = fg_alpha;
344 			blend_op |= DPU_BLEND_BG_MOD_ALPHA |
345 				    DPU_BLEND_BG_INV_MOD_ALPHA;
346 		} else {
347 			bg_alpha = 0;
348 			blend_op |= DPU_BLEND_BG_INV_ALPHA;
349 		}
350 	} else {
351 		/* coverage blending */
352 		blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
353 			DPU_BLEND_BG_ALPHA_FG_PIXEL;
354 		if (fg_alpha != DRM_BLEND_ALPHA_OPAQUE) {
355 			bg_alpha = fg_alpha;
356 			blend_op |= DPU_BLEND_FG_MOD_ALPHA |
357 				    DPU_BLEND_FG_INV_MOD_ALPHA |
358 				    DPU_BLEND_BG_MOD_ALPHA |
359 				    DPU_BLEND_BG_INV_MOD_ALPHA;
360 		} else {
361 			bg_alpha = 0;
362 			blend_op |= DPU_BLEND_BG_INV_ALPHA;
363 		}
364 	}
365 
366 	lm->ops.setup_blend_config(lm, pstate->stage,
367 				fg_alpha, bg_alpha, blend_op);
368 
369 	DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
370 		  &format->pixel_format, format->alpha_enable, blend_op);
371 }
372 
373 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
374 {
375 	struct dpu_crtc_state *crtc_state;
376 	int lm_idx;
377 
378 	crtc_state = to_dpu_crtc_state(crtc->state);
379 
380 	for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
381 		const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
382 		struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
383 		struct dpu_hw_mixer_cfg cfg;
384 
385 		if (!lm_roi || !drm_rect_visible(lm_roi))
386 			continue;
387 
388 		cfg.out_width = drm_rect_width(lm_roi);
389 		cfg.out_height = drm_rect_height(lm_roi);
390 		cfg.right_mixer = lm_idx & 0x1;
391 		cfg.flags = 0;
392 		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
393 	}
394 }
395 
396 static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc,
397 				       struct drm_plane *plane,
398 				       struct dpu_crtc_mixer *mixer,
399 				       u32 lms_in_pair,
400 				       enum dpu_stage stage,
401 				       const struct msm_format *format,
402 				       uint64_t modifier,
403 				       struct dpu_sw_pipe *pipe,
404 				       unsigned int stage_idx,
405 				       struct dpu_hw_stage_cfg *stage_cfg
406 				      )
407 {
408 	u32 lm_idx;
409 	enum dpu_sspp sspp_idx;
410 	struct drm_plane_state *state;
411 
412 	sspp_idx = pipe->sspp->idx;
413 
414 	state = plane->state;
415 
416 	trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
417 				   state, to_dpu_plane_state(state), stage_idx,
418 				   format->pixel_format, pipe,
419 				   modifier);
420 
421 	DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d multirect_idx %d\n",
422 			 crtc->base.id,
423 			 stage,
424 			 plane->base.id,
425 			 sspp_idx - SSPP_NONE,
426 			 state->fb ? state->fb->base.id : -1,
427 			 pipe->multirect_index);
428 
429 	stage_cfg->stage[stage][stage_idx] = sspp_idx;
430 	stage_cfg->multirect_index[stage][stage_idx] = pipe->multirect_index;
431 
432 	/* blend config update */
433 	for (lm_idx = 0; lm_idx < lms_in_pair; lm_idx++)
434 		mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, sspp_idx);
435 }
436 
437 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
438 	struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer,
439 	struct dpu_hw_stage_cfg *stage_cfg)
440 {
441 	struct drm_plane *plane;
442 	struct drm_framebuffer *fb;
443 	struct drm_plane_state *state;
444 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
445 	struct dpu_plane_state *pstate = NULL;
446 	const struct msm_format *format;
447 	struct dpu_hw_ctl *ctl = mixer->lm_ctl;
448 	u32 lm_idx, stage, i, pipe_idx, head_pipe_in_stage, lms_in_pair;
449 	bool bg_alpha_enable = false;
450 	DECLARE_BITMAP(active_fetch, SSPP_MAX);
451 	DECLARE_BITMAP(active_pipes, SSPP_MAX);
452 
453 	memset(active_fetch, 0, sizeof(active_fetch));
454 	memset(active_pipes, 0, sizeof(active_pipes));
455 	drm_atomic_crtc_for_each_plane(plane, crtc) {
456 		state = plane->state;
457 		if (!state)
458 			continue;
459 
460 		if (!state->visible)
461 			continue;
462 
463 		pstate = to_dpu_plane_state(state);
464 		fb = state->fb;
465 
466 		format = msm_framebuffer_format(pstate->base.fb);
467 
468 		if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
469 			bg_alpha_enable = true;
470 
471 		/* loop pipe per mixer pair with config in stage structure */
472 		for (stage = 0; stage < STAGES_PER_PLANE; stage++) {
473 			head_pipe_in_stage = stage * PIPES_PER_STAGE;
474 			for (i = 0; i < PIPES_PER_STAGE; i++) {
475 				pipe_idx = i + head_pipe_in_stage;
476 				if (!pstate->pipe[pipe_idx].sspp)
477 					continue;
478 				lms_in_pair = min(cstate->num_mixers - (stage * PIPES_PER_STAGE),
479 						  PIPES_PER_STAGE);
480 				set_bit(pstate->pipe[pipe_idx].sspp->idx, active_fetch);
481 				set_bit(pstate->pipe[pipe_idx].sspp->idx, active_pipes);
482 				_dpu_crtc_blend_setup_pipe(crtc, plane,
483 							   &mixer[head_pipe_in_stage],
484 							   lms_in_pair,
485 							   pstate->stage,
486 							   format, fb ? fb->modifier : 0,
487 							   &pstate->pipe[pipe_idx], i,
488 							   &stage_cfg[stage]);
489 			}
490 		}
491 
492 		/* blend config update */
493 		for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
494 			_dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format,
495 						  ctl->mdss_ver);
496 
497 			if (bg_alpha_enable && !format->alpha_enable)
498 				mixer[lm_idx].mixer_op_mode = 0;
499 			else
500 				mixer[lm_idx].mixer_op_mode |=
501 						1 << pstate->stage;
502 		}
503 	}
504 
505 	if (ctl->ops.set_active_fetch_pipes)
506 		ctl->ops.set_active_fetch_pipes(ctl, active_fetch);
507 
508 	if (ctl->ops.set_active_pipes)
509 		ctl->ops.set_active_pipes(ctl, active_pipes);
510 
511 	_dpu_crtc_program_lm_output_roi(crtc);
512 }
513 
514 /**
515  * _dpu_crtc_blend_setup - configure crtc mixers
516  * @crtc: Pointer to drm crtc structure
517  */
518 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
519 {
520 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
521 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
522 	struct dpu_crtc_mixer *mixer = cstate->mixers;
523 	struct dpu_hw_ctl *ctl;
524 	struct dpu_hw_mixer *lm;
525 	struct dpu_hw_stage_cfg stage_cfg[STAGES_PER_PLANE];
526 	DECLARE_BITMAP(active_lms, LM_MAX);
527 	int i;
528 
529 	DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
530 
531 	for (i = 0; i < cstate->num_mixers; i++) {
532 		mixer[i].mixer_op_mode = 0;
533 		if (mixer[i].lm_ctl->ops.clear_all_blendstages)
534 			mixer[i].lm_ctl->ops.clear_all_blendstages(
535 					mixer[i].lm_ctl);
536 		if (mixer[i].lm_ctl->ops.set_active_fetch_pipes)
537 			mixer[i].lm_ctl->ops.set_active_fetch_pipes(mixer[i].lm_ctl, NULL);
538 		if (mixer[i].lm_ctl->ops.set_active_pipes)
539 			mixer[i].lm_ctl->ops.set_active_pipes(mixer[i].lm_ctl, NULL);
540 
541 		if (mixer[i].hw_lm->ops.clear_all_blendstages)
542 			mixer[i].hw_lm->ops.clear_all_blendstages(mixer[i].hw_lm);
543 	}
544 
545 	/* initialize stage cfg */
546 	memset(&stage_cfg, 0, sizeof(stage_cfg));
547 	memset(active_lms, 0, sizeof(active_lms));
548 
549 	_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, stage_cfg);
550 
551 	for (i = 0; i < cstate->num_mixers; i++) {
552 		ctl = mixer[i].lm_ctl;
553 		lm = mixer[i].hw_lm;
554 
555 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
556 
557 		/* stage config flush mask */
558 		ctl->ops.update_pending_flush_mixer(ctl,
559 			mixer[i].hw_lm->idx);
560 
561 		set_bit(lm->idx, active_lms);
562 		if (ctl->ops.set_active_lms)
563 			ctl->ops.set_active_lms(ctl, active_lms);
564 
565 		DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n",
566 			mixer[i].hw_lm->idx - LM_0,
567 			mixer[i].mixer_op_mode,
568 			ctl->idx - CTL_0);
569 
570 		/*
571 		 * call dpu_hw_ctl_setup_blendstage() to blend layers per stage cfg.
572 		 * stage data is shared between PIPES_PER_STAGE pipes.
573 		 */
574 		if (ctl->ops.setup_blendstage)
575 			ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
576 				&stage_cfg[i / PIPES_PER_STAGE]);
577 
578 		if (lm->ops.setup_blendstage)
579 			lm->ops.setup_blendstage(lm, mixer[i].hw_lm->idx,
580 				&stage_cfg[i / PIPES_PER_STAGE]);
581 	}
582 }
583 
584 /**
585  *  _dpu_crtc_complete_flip - signal pending page_flip events
586  * Any pending vblank events are added to the vblank_event_list
587  * so that the next vblank interrupt shall signal them.
588  * However PAGE_FLIP events are not handled through the vblank_event_list.
589  * This API signals any pending PAGE_FLIP events requested through
590  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
591  * @crtc: Pointer to drm crtc structure
592  */
593 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
594 {
595 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
596 	struct drm_device *dev = crtc->dev;
597 	unsigned long flags;
598 
599 	spin_lock_irqsave(&dev->event_lock, flags);
600 	if (dpu_crtc->event) {
601 		DRM_DEBUG_VBL("%s: send event: %p\n", dpu_crtc->name,
602 			      dpu_crtc->event);
603 		trace_dpu_crtc_complete_flip(DRMID(crtc));
604 		drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
605 		dpu_crtc->event = NULL;
606 	}
607 	spin_unlock_irqrestore(&dev->event_lock, flags);
608 }
609 
610 /**
611  * dpu_crtc_get_intf_mode - get interface mode of the given crtc
612  * @crtc: Pointert to crtc
613  */
614 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
615 {
616 	struct drm_encoder *encoder;
617 
618 	/*
619 	 * TODO: This function is called from dpu debugfs and as part of atomic
620 	 * check. When called from debugfs, the crtc->mutex must be held to
621 	 * read crtc->state. However reading crtc->state from atomic check isn't
622 	 * allowed (unless you have a good reason, a big comment, and a deep
623 	 * understanding of how the atomic/modeset locks work (<- and this is
624 	 * probably not possible)). So we'll keep the WARN_ON here for now, but
625 	 * really we need to figure out a better way to track our operating mode
626 	 */
627 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
628 
629 	/* TODO: Returns the first INTF_MODE, could there be multiple values? */
630 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
631 		return dpu_encoder_get_intf_mode(encoder);
632 
633 	return INTF_MODE_NONE;
634 }
635 
636 /**
637  * dpu_crtc_vblank_callback - called on vblank irq, issues completion events
638  * @crtc: Pointer to drm crtc object
639  */
640 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
641 {
642 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
643 
644 	/* keep statistics on vblank callback - with auto reset via debugfs */
645 	if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
646 		dpu_crtc->vblank_cb_time = ktime_get();
647 	else
648 		dpu_crtc->vblank_cb_count++;
649 
650 	dpu_crtc_get_crc(crtc);
651 
652 	drm_crtc_handle_vblank(crtc);
653 	trace_dpu_crtc_vblank_cb(DRMID(crtc));
654 }
655 
656 static void dpu_crtc_frame_event_work(struct kthread_work *work)
657 {
658 	struct dpu_crtc_frame_event *fevent = container_of(work,
659 			struct dpu_crtc_frame_event, work);
660 	struct drm_crtc *crtc = fevent->crtc;
661 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
662 	unsigned long flags;
663 	bool frame_done = false;
664 
665 	DPU_ATRACE_BEGIN("crtc_frame_event");
666 
667 	DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
668 			ktime_to_ns(fevent->ts));
669 
670 	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
671 				| DPU_ENCODER_FRAME_EVENT_ERROR
672 				| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
673 
674 		if (atomic_read(&dpu_crtc->frame_pending) < 1) {
675 			/* ignore vblank when not pending */
676 		} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
677 			/* release bandwidth and other resources */
678 			trace_dpu_crtc_frame_event_done(DRMID(crtc),
679 							fevent->event);
680 			dpu_core_perf_crtc_release_bw(crtc);
681 		} else {
682 			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
683 								fevent->event);
684 		}
685 
686 		if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
687 					| DPU_ENCODER_FRAME_EVENT_ERROR))
688 			frame_done = true;
689 	}
690 
691 	if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
692 		DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
693 				crtc->base.id, ktime_to_ns(fevent->ts));
694 
695 	if (frame_done)
696 		complete_all(&dpu_crtc->frame_done_comp);
697 
698 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
699 	list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
700 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
701 	DPU_ATRACE_END("crtc_frame_event");
702 }
703 
704 /**
705  * dpu_crtc_frame_event_cb - crtc frame event callback API
706  * @crtc: Pointer to crtc
707  * @event: Event to process
708  *
709  * Encoder may call this for different events from different context - IRQ,
710  * user thread, commit_thread, etc. Each event should be carefully reviewed and
711  * should be processed in proper task context to avoid schedulin delay or
712  * properly manage the irq context's bottom half processing.
713  */
714 void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event)
715 {
716 	struct dpu_crtc *dpu_crtc;
717 	struct msm_drm_private *priv;
718 	struct dpu_crtc_frame_event *fevent;
719 	unsigned long flags;
720 	u32 crtc_id;
721 
722 	/* Nothing to do on idle event */
723 	if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
724 		return;
725 
726 	dpu_crtc = to_dpu_crtc(crtc);
727 	priv = crtc->dev->dev_private;
728 	crtc_id = drm_crtc_index(crtc);
729 
730 	trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
731 
732 	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
733 	fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
734 			struct dpu_crtc_frame_event, list);
735 	if (fevent)
736 		list_del_init(&fevent->list);
737 	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
738 
739 	if (!fevent) {
740 		DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
741 		return;
742 	}
743 
744 	fevent->event = event;
745 	fevent->crtc = crtc;
746 	fevent->ts = ktime_get();
747 	kthread_queue_work(priv->kms->event_thread[crtc_id].worker, &fevent->work);
748 }
749 
750 /**
751  * dpu_crtc_complete_commit - callback signalling completion of current commit
752  * @crtc: Pointer to drm crtc object
753  */
754 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
755 {
756 	trace_dpu_crtc_complete_commit(DRMID(crtc));
757 	dpu_core_perf_crtc_update(crtc, 0);
758 	_dpu_crtc_complete_flip(crtc);
759 }
760 
761 static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc,
762 		struct drm_crtc_state *state)
763 {
764 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
765 	struct drm_display_mode *adj_mode = &state->adjusted_mode;
766 	u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
767 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
768 	int i;
769 
770 	/* if we cannot merge 2 LMs (no 3d mux) better to fail earlier
771 	 * before even checking the width after the split
772 	 */
773 	if (!dpu_kms->catalog->caps->has_3d_merge &&
774 	    adj_mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
775 		return -E2BIG;
776 
777 	for (i = 0; i < cstate->num_mixers; i++) {
778 		struct drm_rect *r = &cstate->lm_bounds[i];
779 		r->x1 = crtc_split_width * i;
780 		r->y1 = 0;
781 		r->x2 = r->x1 + crtc_split_width;
782 		r->y2 = adj_mode->vdisplay;
783 
784 		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
785 
786 		if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width)
787 			return -E2BIG;
788 	}
789 
790 	return 0;
791 }
792 
793 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
794 		struct dpu_hw_pcc_cfg *cfg)
795 {
796 	struct drm_color_ctm *ctm;
797 
798 	memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
799 
800 	ctm = (struct drm_color_ctm *)state->ctm->data;
801 
802 	if (!ctm)
803 		return;
804 
805 	cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
806 	cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
807 	cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
808 
809 	cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
810 	cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
811 	cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
812 
813 	cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
814 	cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
815 	cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
816 }
817 
818 static void _dpu_crtc_get_gc_lut(struct drm_crtc_state *state,
819 		struct dpu_hw_gc_lut *gc_lut)
820 {
821 	struct drm_color_lut *lut;
822 	int i;
823 	u32 val_even, val_odd;
824 
825 	lut = (struct drm_color_lut *)state->gamma_lut->data;
826 
827 	if (!lut)
828 		return;
829 
830 	/* Pack 1024 10-bit entries in 512 32-bit registers */
831 	for (i = 0; i < PGC_TBL_LEN; i++) {
832 		val_even = drm_color_lut_extract(lut[i * 2].green, 10);
833 		val_odd = drm_color_lut_extract(lut[i * 2 + 1].green, 10);
834 		gc_lut->c0[i] = val_even | (val_odd << 16);
835 		val_even = drm_color_lut_extract(lut[i * 2].blue, 10);
836 		val_odd = drm_color_lut_extract(lut[i * 2 + 1].blue, 10);
837 		gc_lut->c1[i] = val_even | (val_odd << 16);
838 		val_even = drm_color_lut_extract(lut[i * 2].red, 10);
839 		val_odd = drm_color_lut_extract(lut[i * 2 + 1].red, 10);
840 		gc_lut->c2[i] = val_even | (val_odd << 16);
841 	}
842 
843 	/* Disable 8-bit rounding mode */
844 	gc_lut->flags = 0;
845 }
846 
847 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
848 {
849 	struct drm_crtc_state *state = crtc->state;
850 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
851 	struct dpu_crtc_mixer *mixer = cstate->mixers;
852 	struct dpu_hw_pcc_cfg cfg;
853 	struct dpu_hw_gc_lut *gc_lut;
854 	struct dpu_hw_ctl *ctl;
855 	struct dpu_hw_dspp *dspp;
856 	int i;
857 
858 
859 	if (!state->color_mgmt_changed && !drm_atomic_crtc_needs_modeset(state))
860 		return;
861 
862 	for (i = 0; i < cstate->num_mixers; i++) {
863 		ctl = mixer[i].lm_ctl;
864 		dspp = mixer[i].hw_dspp;
865 
866 		if (!dspp)
867 			continue;
868 
869 		if (dspp->ops.setup_pcc) {
870 			if (!state->ctm) {
871 				dspp->ops.setup_pcc(dspp, NULL);
872 			} else {
873 				_dpu_crtc_get_pcc_coeff(state, &cfg);
874 				dspp->ops.setup_pcc(dspp, &cfg);
875 			}
876 
877 			/* stage config flush mask */
878 			ctl->ops.update_pending_flush_dspp(ctl,
879 				mixer[i].hw_dspp->idx, DPU_DSPP_PCC);
880 		}
881 
882 		if (dspp->ops.setup_gc) {
883 			if (!state->gamma_lut) {
884 				dspp->ops.setup_gc(dspp, NULL);
885 			} else {
886 				gc_lut = kzalloc_obj(*gc_lut);
887 				if (!gc_lut)
888 					continue;
889 				_dpu_crtc_get_gc_lut(state, gc_lut);
890 				dspp->ops.setup_gc(dspp, gc_lut);
891 				kfree(gc_lut);
892 			}
893 
894 			/* stage config flush mask */
895 			ctl->ops.update_pending_flush_dspp(ctl,
896 				mixer[i].hw_dspp->idx, DPU_DSPP_GC);
897 		}
898 	}
899 }
900 
901 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
902 		struct drm_atomic_state *state)
903 {
904 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
905 	struct drm_encoder *encoder;
906 
907 	if (!crtc->state->enable) {
908 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
909 				crtc->base.id, crtc->state->enable);
910 		return;
911 	}
912 
913 	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
914 
915 	_dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state);
916 
917 	/* encoder will trigger pending mask now */
918 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
919 		dpu_encoder_trigger_kickoff_pending(encoder);
920 
921 	/*
922 	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
923 	 * it means we are trying to flush a CRTC whose state is disabled:
924 	 * nothing else needs to be done.
925 	 */
926 	if (unlikely(!cstate->num_mixers))
927 		return;
928 
929 	_dpu_crtc_blend_setup(crtc);
930 
931 	_dpu_crtc_setup_cp_blocks(crtc);
932 
933 	/*
934 	 * PP_DONE irq is only used by command mode for now.
935 	 * It is better to request pending before FLUSH and START trigger
936 	 * to make sure no pp_done irq missed.
937 	 * This is safe because no pp_done will happen before SW trigger
938 	 * in command mode.
939 	 */
940 }
941 
942 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
943 		struct drm_atomic_state *state)
944 {
945 	struct dpu_crtc *dpu_crtc;
946 	struct drm_device *dev;
947 	struct drm_plane *plane;
948 	struct msm_drm_private *priv;
949 	unsigned long flags;
950 	struct dpu_crtc_state *cstate;
951 
952 	if (!crtc->state->enable) {
953 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
954 				crtc->base.id, crtc->state->enable);
955 		return;
956 	}
957 
958 	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
959 
960 	dpu_crtc = to_dpu_crtc(crtc);
961 	cstate = to_dpu_crtc_state(crtc->state);
962 	dev = crtc->dev;
963 	priv = dev->dev_private;
964 
965 	if (crtc->index >= ARRAY_SIZE(priv->kms->event_thread)) {
966 		DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
967 		return;
968 	}
969 
970 	WARN_ON(dpu_crtc->event);
971 	spin_lock_irqsave(&dev->event_lock, flags);
972 	dpu_crtc->event = crtc->state->event;
973 	crtc->state->event = NULL;
974 	spin_unlock_irqrestore(&dev->event_lock, flags);
975 
976 	/*
977 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
978 	 * it means we are trying to flush a CRTC whose state is disabled:
979 	 * nothing else needs to be done.
980 	 */
981 	if (unlikely(!cstate->num_mixers))
982 		return;
983 
984 	/* update performance setting before crtc kickoff */
985 	dpu_core_perf_crtc_update(crtc, 1);
986 
987 	/*
988 	 * Final plane updates: Give each plane a chance to complete all
989 	 *                      required writes/flushing before crtc's "flush
990 	 *                      everything" call below.
991 	 */
992 	drm_atomic_crtc_for_each_plane(plane, crtc) {
993 		if (dpu_crtc->smmu_state.transition_error)
994 			dpu_plane_set_error(plane, true);
995 		dpu_plane_flush(plane);
996 	}
997 
998 	/* Kickoff will be scheduled by outer layer */
999 }
1000 
1001 /**
1002  * dpu_crtc_destroy_state - state destroy hook
1003  * @crtc: drm CRTC
1004  * @state: CRTC state object to release
1005  */
1006 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
1007 		struct drm_crtc_state *state)
1008 {
1009 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
1010 
1011 	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
1012 
1013 	__drm_atomic_helper_crtc_destroy_state(state);
1014 
1015 	kfree(cstate);
1016 }
1017 
1018 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
1019 {
1020 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1021 	int ret, rc = 0;
1022 
1023 	if (!atomic_read(&dpu_crtc->frame_pending)) {
1024 		DRM_DEBUG_ATOMIC("no frames pending\n");
1025 		return 0;
1026 	}
1027 
1028 	DPU_ATRACE_BEGIN("frame done completion wait");
1029 	ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
1030 			msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
1031 	if (!ret) {
1032 		DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
1033 		rc = -ETIMEDOUT;
1034 	}
1035 	DPU_ATRACE_END("frame done completion wait");
1036 
1037 	return rc;
1038 }
1039 
1040 static int dpu_crtc_kickoff_clone_mode(struct drm_crtc *crtc)
1041 {
1042 	struct drm_encoder *encoder;
1043 	struct drm_encoder *rt_encoder = NULL, *wb_encoder = NULL;
1044 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
1045 
1046 	/* Find encoder for real time display */
1047 	drm_for_each_encoder_mask(encoder, crtc->dev,
1048 				  crtc->state->encoder_mask) {
1049 		if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
1050 			wb_encoder = encoder;
1051 		else
1052 			rt_encoder = encoder;
1053 	}
1054 
1055 	if (!rt_encoder || !wb_encoder) {
1056 		DRM_DEBUG_ATOMIC("real time or wb encoder not found\n");
1057 		return -EINVAL;
1058 	}
1059 
1060 	dpu_encoder_prepare_for_kickoff(wb_encoder);
1061 	dpu_encoder_prepare_for_kickoff(rt_encoder);
1062 
1063 	dpu_vbif_clear_errors(dpu_kms);
1064 
1065 	/*
1066 	 * Kickoff real time encoder last as it's the encoder that
1067 	 * will do the flush
1068 	 */
1069 	dpu_encoder_kickoff(wb_encoder);
1070 	dpu_encoder_kickoff(rt_encoder);
1071 
1072 	/* Don't start frame done timers until the kickoffs have finished */
1073 	dpu_encoder_start_frame_done_timer(wb_encoder);
1074 	dpu_encoder_start_frame_done_timer(rt_encoder);
1075 
1076 	return 0;
1077 }
1078 
1079 /**
1080  * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
1081  * @crtc: Pointer to drm crtc object
1082  */
1083 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
1084 {
1085 	struct drm_encoder *encoder;
1086 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1087 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
1088 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
1089 
1090 	/*
1091 	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
1092 	 * it means we are trying to start a CRTC whose state is disabled:
1093 	 * nothing else needs to be done.
1094 	 */
1095 	if (unlikely(!cstate->num_mixers))
1096 		return;
1097 
1098 	DPU_ATRACE_BEGIN("crtc_commit");
1099 
1100 	drm_for_each_encoder_mask(encoder, crtc->dev,
1101 			crtc->state->encoder_mask) {
1102 		if (!dpu_encoder_is_valid_for_commit(encoder)) {
1103 			DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n");
1104 			goto end;
1105 		}
1106 	}
1107 
1108 	if (drm_crtc_in_clone_mode(crtc->state)) {
1109 		if (dpu_crtc_kickoff_clone_mode(crtc))
1110 			goto end;
1111 	} else {
1112 		/*
1113 		 * Encoder will flush/start now, unless it has a tx pending.
1114 		 * If so, it may delay and flush at an irq event (e.g. ppdone)
1115 		 */
1116 		drm_for_each_encoder_mask(encoder, crtc->dev,
1117 				crtc->state->encoder_mask)
1118 			dpu_encoder_prepare_for_kickoff(encoder);
1119 
1120 		dpu_vbif_clear_errors(dpu_kms);
1121 
1122 		drm_for_each_encoder_mask(encoder, crtc->dev,
1123 				crtc->state->encoder_mask) {
1124 			dpu_encoder_kickoff(encoder);
1125 			dpu_encoder_start_frame_done_timer(encoder);
1126 		}
1127 	}
1128 
1129 	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
1130 		/* acquire bandwidth and other resources */
1131 		DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
1132 	} else
1133 		DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
1134 
1135 	dpu_crtc->play_count++;
1136 
1137 	reinit_completion(&dpu_crtc->frame_done_comp);
1138 
1139 end:
1140 	DPU_ATRACE_END("crtc_commit");
1141 }
1142 
1143 static void dpu_crtc_reset(struct drm_crtc *crtc)
1144 {
1145 	struct dpu_crtc_state *cstate = kzalloc_obj(*cstate);
1146 
1147 	if (crtc->state)
1148 		dpu_crtc_destroy_state(crtc, crtc->state);
1149 
1150 	if (cstate)
1151 		__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
1152 	else
1153 		__drm_atomic_helper_crtc_reset(crtc, NULL);
1154 }
1155 
1156 /**
1157  * dpu_crtc_duplicate_state - state duplicate hook
1158  * @crtc: Pointer to drm crtc structure
1159  */
1160 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
1161 {
1162 	struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
1163 
1164 	cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
1165 	if (!cstate) {
1166 		DPU_ERROR("failed to allocate state\n");
1167 		return NULL;
1168 	}
1169 
1170 	/* duplicate base helper */
1171 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
1172 
1173 	return &cstate->base;
1174 }
1175 
1176 static void dpu_crtc_atomic_print_state(struct drm_printer *p,
1177 					const struct drm_crtc_state *state)
1178 {
1179 	const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
1180 	int i;
1181 
1182 	for (i = 0; i < cstate->num_mixers; i++) {
1183 		drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0);
1184 		drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0);
1185 		if (cstate->mixers[i].hw_dspp)
1186 			drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0);
1187 	}
1188 }
1189 
1190 static void dpu_crtc_disable(struct drm_crtc *crtc,
1191 			     struct drm_atomic_state *state)
1192 {
1193 	struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
1194 									      crtc);
1195 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1196 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
1197 	struct drm_encoder *encoder;
1198 	unsigned long flags;
1199 	bool release_bandwidth = false;
1200 
1201 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
1202 
1203 	/* If disable is triggered while in self refresh mode,
1204 	 * reset the encoder software state so that in enable
1205 	 * it won't trigger a warn while assigning crtc.
1206 	 */
1207 	if (old_crtc_state->self_refresh_active) {
1208 		drm_for_each_encoder_mask(encoder, crtc->dev,
1209 					old_crtc_state->encoder_mask) {
1210 			dpu_encoder_assign_crtc(encoder, NULL);
1211 		}
1212 		return;
1213 	}
1214 
1215 	/* Disable/save vblank irq handling */
1216 	drm_crtc_vblank_off(crtc);
1217 
1218 	drm_for_each_encoder_mask(encoder, crtc->dev,
1219 				  old_crtc_state->encoder_mask) {
1220 		/* in video mode, we hold an extra bandwidth reference
1221 		 * as we cannot drop bandwidth at frame-done if any
1222 		 * crtc is being used in video mode.
1223 		 */
1224 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
1225 			release_bandwidth = true;
1226 
1227 		/*
1228 		 * If disable is triggered during psr active(e.g: screen dim in PSR),
1229 		 * we will need encoder->crtc connection to process the device sleep &
1230 		 * preserve it during psr sequence.
1231 		 */
1232 		if (!crtc->state->self_refresh_active)
1233 			dpu_encoder_assign_crtc(encoder, NULL);
1234 	}
1235 
1236 	/* wait for frame_event_done completion */
1237 	if (_dpu_crtc_wait_for_frame_done(crtc))
1238 		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
1239 				crtc->base.id,
1240 				atomic_read(&dpu_crtc->frame_pending));
1241 
1242 	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
1243 	dpu_crtc->enabled = false;
1244 
1245 	if (atomic_read(&dpu_crtc->frame_pending)) {
1246 		trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
1247 				     atomic_read(&dpu_crtc->frame_pending));
1248 		if (release_bandwidth)
1249 			dpu_core_perf_crtc_release_bw(crtc);
1250 		atomic_set(&dpu_crtc->frame_pending, 0);
1251 	}
1252 
1253 	dpu_core_perf_crtc_update(crtc, 0);
1254 
1255 	/* disable clk & bw control until clk & bw properties are set */
1256 	cstate->bw_control = false;
1257 	cstate->bw_split_vote = false;
1258 
1259 	if (crtc->state->event && !crtc->state->active) {
1260 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
1261 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
1262 		crtc->state->event = NULL;
1263 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
1264 	}
1265 
1266 	pm_runtime_put_sync(crtc->dev->dev);
1267 }
1268 
1269 static void dpu_crtc_enable(struct drm_crtc *crtc,
1270 		struct drm_atomic_state *state)
1271 {
1272 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1273 	struct drm_encoder *encoder;
1274 	bool request_bandwidth = false;
1275 	struct drm_crtc_state *old_crtc_state;
1276 
1277 	old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
1278 
1279 	pm_runtime_get_sync(crtc->dev->dev);
1280 
1281 	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
1282 
1283 	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
1284 		/* in video mode, we hold an extra bandwidth reference
1285 		 * as we cannot drop bandwidth at frame-done if any
1286 		 * crtc is being used in video mode.
1287 		 */
1288 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
1289 			request_bandwidth = true;
1290 	}
1291 
1292 	if (request_bandwidth)
1293 		atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1294 
1295 	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
1296 	dpu_crtc->enabled = true;
1297 
1298 	if (!old_crtc_state->self_refresh_active) {
1299 		drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
1300 			dpu_encoder_assign_crtc(encoder, crtc);
1301 	}
1302 
1303 	/* Enable/restore vblank irq handling */
1304 	drm_crtc_vblank_on(crtc);
1305 }
1306 
1307 static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
1308 {
1309 	struct drm_crtc *crtc = cstate->crtc;
1310 	struct drm_encoder *encoder;
1311 
1312 	if (cstate->self_refresh_active)
1313 		return true;
1314 
1315 	drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) {
1316 		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) {
1317 			return true;
1318 		}
1319 	}
1320 
1321 	return false;
1322 }
1323 
1324 static int dpu_crtc_assign_planes(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
1325 {
1326 	int total_planes = crtc->dev->mode_config.num_total_plane;
1327 	struct drm_atomic_state *state = crtc_state->state;
1328 	struct dpu_global_state *global_state;
1329 	struct drm_plane_state **states;
1330 	struct drm_plane *plane;
1331 	int ret;
1332 
1333 	global_state = dpu_kms_get_global_state(crtc_state->state);
1334 	if (IS_ERR(global_state))
1335 		return PTR_ERR(global_state);
1336 
1337 	if (!crtc_state->enable)
1338 		return 0;
1339 
1340 	states = kzalloc_objs(*states, total_planes);
1341 	if (!states)
1342 		return -ENOMEM;
1343 
1344 	drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
1345 		struct drm_plane_state *plane_state =
1346 			drm_atomic_get_plane_state(state, plane);
1347 
1348 		if (IS_ERR(plane_state)) {
1349 			ret = PTR_ERR(plane_state);
1350 			goto done;
1351 		}
1352 
1353 		states[plane_state->normalized_zpos] = plane_state;
1354 	}
1355 
1356 	ret = dpu_assign_plane_resources(global_state, state, crtc, states, total_planes);
1357 
1358 done:
1359 	kfree(states);
1360 	return ret;
1361 }
1362 
1363 static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
1364 {
1365 	struct dpu_global_state *global_state;
1366 
1367 	global_state = dpu_kms_get_global_state(crtc_state->state);
1368 	if (IS_ERR(global_state))
1369 		return PTR_ERR(global_state);
1370 
1371 	dpu_rm_release_all_sspp(global_state, crtc);
1372 
1373 	return dpu_crtc_assign_planes(crtc, crtc_state);
1374 }
1375 
1376 #define MAX_CHANNELS_PER_CRTC PIPES_PER_PLANE
1377 #define MAX_HDISPLAY_SPLIT 1080
1378 
1379 static struct msm_display_topology dpu_crtc_get_topology(
1380 		struct drm_crtc *crtc,
1381 		struct dpu_kms *dpu_kms,
1382 		struct drm_crtc_state *crtc_state)
1383 {
1384 	struct drm_display_mode *mode = &crtc_state->adjusted_mode;
1385 	struct msm_display_topology topology = {0};
1386 	struct drm_encoder *drm_enc;
1387 
1388 	drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask)
1389 		dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state,
1390 					    &crtc_state->adjusted_mode);
1391 
1392 	topology.cwb_enabled = drm_crtc_in_clone_mode(crtc_state);
1393 
1394 	/*
1395 	 * Datapath topology selection
1396 	 *
1397 	 * Dual display
1398 	 * 2 LM, 2 INTF ( Split display using 2 interfaces)
1399 	 *
1400 	 * Single display
1401 	 * 1 LM, 1 INTF
1402 	 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
1403 	 *
1404 	 * If DSC is enabled, use 2 LMs for 2:2:1 topology
1405 	 *
1406 	 * Add dspps to the reservation requirements if ctm or gamma_lut are requested
1407 	 *
1408 	 * Only hardcode num_lm to 2 for cases where num_intf == 2 and CWB is not
1409 	 * enabled. This is because in cases where CWB is enabled, num_intf will
1410 	 * count both the WB and real-time phys encoders.
1411 	 *
1412 	 * For non-DSC CWB usecases, have the num_lm be decided by the
1413 	 * (mode->hdisplay > MAX_HDISPLAY_SPLIT) check.
1414 	 */
1415 
1416 	if (topology.num_intf == 2 && !topology.cwb_enabled)
1417 		topology.num_lm = 2;
1418 	else if (topology.num_dsc == 2)
1419 		topology.num_lm = 2;
1420 	else if (dpu_kms->catalog->caps->has_3d_merge &&
1421 		 topology.num_dsc == 0)
1422 		topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
1423 	else
1424 		topology.num_lm = 1;
1425 
1426 	if (crtc_state->ctm || crtc_state->gamma_lut)
1427 		topology.num_dspp = topology.num_lm;
1428 
1429 	return topology;
1430 }
1431 
1432 static int dpu_crtc_assign_resources(struct drm_crtc *crtc,
1433 				     struct drm_crtc_state *crtc_state)
1434 {
1435 	struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_CRTC];
1436 	struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_CRTC];
1437 	struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_CRTC];
1438 	int i, num_lm, num_ctl, num_dspp;
1439 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
1440 	struct dpu_global_state *global_state;
1441 	struct dpu_crtc_state *cstate;
1442 	struct msm_display_topology topology;
1443 	int ret;
1444 
1445 	/*
1446 	 * Release and Allocate resources on every modeset
1447 	 */
1448 	global_state = dpu_kms_get_global_state(crtc_state->state);
1449 	if (IS_ERR(global_state))
1450 		return PTR_ERR(global_state);
1451 
1452 	dpu_rm_release(global_state, crtc);
1453 
1454 	if (!crtc_state->enable)
1455 		return 0;
1456 
1457 	topology = dpu_crtc_get_topology(crtc, dpu_kms, crtc_state);
1458 	ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
1459 			     crtc_state->crtc, &topology);
1460 	if (ret)
1461 		return ret;
1462 
1463 	cstate = to_dpu_crtc_state(crtc_state);
1464 
1465 	num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1466 						crtc_state->crtc,
1467 						DPU_HW_BLK_CTL, hw_ctl,
1468 						ARRAY_SIZE(hw_ctl));
1469 	num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1470 					       crtc_state->crtc,
1471 					       DPU_HW_BLK_LM, hw_lm,
1472 					       ARRAY_SIZE(hw_lm));
1473 	num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1474 						 crtc_state->crtc,
1475 						 DPU_HW_BLK_DSPP, hw_dspp,
1476 						 ARRAY_SIZE(hw_dspp));
1477 
1478 	for (i = 0; i < num_lm; i++) {
1479 		int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1480 
1481 		cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1482 		cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1483 		if (i < num_dspp)
1484 			cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1485 	}
1486 
1487 	cstate->num_mixers = num_lm;
1488 
1489 	return 0;
1490 }
1491 
1492 /**
1493  * dpu_crtc_check_mode_changed: check if full modeset is required
1494  * @old_crtc_state:	Previous CRTC state
1495  * @new_crtc_state:	Corresponding CRTC state to be checked
1496  *
1497  * Check if the changes in the object properties demand full mode set.
1498  */
1499 int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state,
1500 				struct drm_crtc_state *new_crtc_state)
1501 {
1502 	struct drm_encoder *drm_enc;
1503 	struct drm_crtc *crtc = new_crtc_state->crtc;
1504 	bool clone_mode_enabled = drm_crtc_in_clone_mode(old_crtc_state);
1505 	bool clone_mode_requested = drm_crtc_in_clone_mode(new_crtc_state);
1506 
1507 	DRM_DEBUG_ATOMIC("%d\n", crtc->base.id);
1508 
1509 	/* there might be cases where encoder needs a modeset too */
1510 	drm_for_each_encoder_mask(drm_enc, crtc->dev, new_crtc_state->encoder_mask) {
1511 		if (dpu_encoder_needs_modeset(drm_enc, new_crtc_state->state))
1512 			new_crtc_state->mode_changed = true;
1513 	}
1514 
1515 	if ((clone_mode_requested && !clone_mode_enabled) ||
1516 	    (!clone_mode_requested && clone_mode_enabled))
1517 		new_crtc_state->mode_changed = true;
1518 
1519 	return 0;
1520 }
1521 
1522 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
1523 		struct drm_atomic_state *state)
1524 {
1525 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
1526 									  crtc);
1527 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1528 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
1529 
1530 	const struct drm_plane_state *pstate;
1531 	struct drm_plane *plane;
1532 
1533 	int rc = 0;
1534 
1535 	bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
1536 
1537 	/* don't reallocate resources if only ACTIVE has beeen changed */
1538 	if (crtc_state->mode_changed || crtc_state->connectors_changed ||
1539 	    crtc_state->color_mgmt_changed) {
1540 		rc = dpu_crtc_assign_resources(crtc, crtc_state);
1541 		if (rc < 0)
1542 			return rc;
1543 	}
1544 
1545 	if (crtc_state->planes_changed || crtc_state->zpos_changed) {
1546 		if (dpu_use_virtual_planes)
1547 			rc = dpu_crtc_reassign_planes(crtc, crtc_state);
1548 		else
1549 			rc = dpu_crtc_assign_planes(crtc, crtc_state);
1550 		if (rc < 0)
1551 			return rc;
1552 	}
1553 
1554 	if (!crtc_state->enable || !drm_atomic_crtc_effectively_active(crtc_state)) {
1555 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
1556 				crtc->base.id, crtc_state->enable,
1557 				crtc_state->active);
1558 		memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
1559 		return 0;
1560 	}
1561 
1562 	DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
1563 
1564 	if (cstate->num_mixers) {
1565 		rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state);
1566 		if (rc)
1567 			return rc;
1568 	}
1569 
1570 	/* FIXME: move this to dpu_plane_atomic_check? */
1571 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
1572 		struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate);
1573 
1574 		if (IS_ERR_OR_NULL(pstate)) {
1575 			rc = PTR_ERR(pstate);
1576 			DPU_ERROR("%s: failed to get plane%d state, %d\n",
1577 					dpu_crtc->name, plane->base.id, rc);
1578 			return rc;
1579 		}
1580 
1581 		if (!pstate->visible)
1582 			continue;
1583 
1584 		dpu_pstate->needs_dirtyfb = needs_dirtyfb;
1585 	}
1586 
1587 	atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1588 
1589 	rc = dpu_core_perf_crtc_check(crtc, crtc_state);
1590 	if (rc) {
1591 		DPU_ERROR("crtc%d failed performance check %d\n",
1592 				crtc->base.id, rc);
1593 		return rc;
1594 	}
1595 
1596 	return 0;
1597 }
1598 
1599 static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
1600 						const struct drm_display_mode *mode)
1601 {
1602 	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
1603 	u64 adjusted_mode_clk;
1604 
1605 	/* if there is no 3d_mux block we cannot merge LMs so we cannot
1606 	 * split the large layer into 2 LMs, filter out such modes
1607 	 */
1608 	if (!dpu_kms->catalog->caps->has_3d_merge &&
1609 	    mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
1610 		return MODE_BAD_HVALUE;
1611 
1612 	adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,
1613 							    dpu_kms->perf.perf_cfg);
1614 
1615 	if (dpu_kms->catalog->caps->has_3d_merge)
1616 		adjusted_mode_clk /= 2;
1617 
1618 	/*
1619 	 * The given mode, adjusted for the perf clock factor, should not exceed
1620 	 * the max core clock rate
1621 	 */
1622 	if (dpu_kms->perf.max_core_clk_rate < adjusted_mode_clk * 1000)
1623 		return MODE_CLOCK_HIGH;
1624 
1625 	/*
1626 	 * max crtc width is equal to the max mixer width * 2 and max height is 4K
1627 	 */
1628 	return drm_mode_validate_size(mode,
1629 				      2 * dpu_kms->catalog->caps->max_mixer_width,
1630 				      4096);
1631 }
1632 
1633 /**
1634  * dpu_crtc_vblank - enable or disable vblanks for this crtc
1635  * @crtc: Pointer to drm crtc object
1636  * @en: true to enable vblanks, false to disable
1637  */
1638 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1639 {
1640 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1641 	struct drm_encoder *enc;
1642 
1643 	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1644 
1645 	/*
1646 	 * Normally we would iterate through encoder_mask in crtc state to find
1647 	 * attached encoders. In this case, we might be disabling vblank _after_
1648 	 * encoder_mask has been cleared.
1649 	 *
1650 	 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1651 	 * disable (which is also after encoder_mask is cleared). So instead of
1652 	 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1653 	 * currently assigned to our crtc.
1654 	 *
1655 	 * Note also that this function cannot be called while crtc is disabled
1656 	 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1657 	 * about the assigned crtcs being inconsistent with the current state
1658 	 * (which means no need to worry about modeset locks).
1659 	 */
1660 	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1661 		trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1662 					     dpu_crtc);
1663 
1664 		dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1665 	}
1666 
1667 	return 0;
1668 }
1669 
1670 /**
1671  * dpu_crtc_get_num_lm - Get mixer number in this CRTC pipeline
1672  * @state: Pointer to drm crtc state object
1673  */
1674 unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state)
1675 {
1676 	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
1677 
1678 	return cstate->num_mixers;
1679 }
1680 
1681 #ifdef CONFIG_DEBUG_FS
1682 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1683 {
1684 	struct dpu_crtc *dpu_crtc;
1685 	struct dpu_plane_state *pstate = NULL;
1686 	struct dpu_crtc_mixer *m;
1687 
1688 	struct drm_crtc *crtc;
1689 	struct drm_plane *plane;
1690 	struct drm_display_mode *mode;
1691 	struct drm_framebuffer *fb;
1692 	struct drm_plane_state *state;
1693 	struct dpu_crtc_state *cstate;
1694 
1695 	int i, out_width;
1696 
1697 	dpu_crtc = s->private;
1698 	crtc = &dpu_crtc->base;
1699 
1700 	drm_modeset_lock_all(crtc->dev);
1701 	cstate = to_dpu_crtc_state(crtc->state);
1702 
1703 	mode = &crtc->state->adjusted_mode;
1704 	out_width = mode->hdisplay / cstate->num_mixers;
1705 
1706 	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1707 				mode->hdisplay, mode->vdisplay);
1708 
1709 	seq_puts(s, "\n");
1710 
1711 	for (i = 0; i < cstate->num_mixers; ++i) {
1712 		m = &cstate->mixers[i];
1713 		seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1714 			m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1715 			out_width, mode->vdisplay);
1716 	}
1717 
1718 	seq_puts(s, "\n");
1719 
1720 	drm_atomic_crtc_for_each_plane(plane, crtc) {
1721 		pstate = to_dpu_plane_state(plane->state);
1722 		state = plane->state;
1723 
1724 		if (!pstate || !state)
1725 			continue;
1726 
1727 		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1728 			pstate->stage);
1729 
1730 		if (plane->state->fb) {
1731 			fb = plane->state->fb;
1732 
1733 			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1734 				fb->base.id, (char *) &fb->format->format,
1735 				fb->width, fb->height);
1736 			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1737 				seq_printf(s, "cpp[%d]:%u ",
1738 						i, fb->format->cpp[i]);
1739 			seq_puts(s, "\n\t");
1740 
1741 			seq_printf(s, "modifier:%8llu ", fb->modifier);
1742 			seq_puts(s, "\n");
1743 
1744 			seq_puts(s, "\t");
1745 			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1746 				seq_printf(s, "pitches[%d]:%8u ", i,
1747 							fb->pitches[i]);
1748 			seq_puts(s, "\n");
1749 
1750 			seq_puts(s, "\t");
1751 			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1752 				seq_printf(s, "offsets[%d]:%8u ", i,
1753 							fb->offsets[i]);
1754 			seq_puts(s, "\n");
1755 		}
1756 
1757 		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1758 			state->src_x, state->src_y, state->src_w, state->src_h);
1759 
1760 		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1761 			state->crtc_x, state->crtc_y, state->crtc_w,
1762 			state->crtc_h);
1763 
1764 		for (i = 0; i < PIPES_PER_PLANE; i++) {
1765 			if (!pstate->pipe[i].sspp)
1766 				continue;
1767 			seq_printf(s, "\tsspp[%d]:%s\n",
1768 				   i, pstate->pipe[i].sspp->cap->name);
1769 			seq_printf(s, "\tmultirect[%d]: mode: %d index: %d\n",
1770 				   i, pstate->pipe[i].multirect_mode,
1771 				   pstate->pipe[i].multirect_index);
1772 		}
1773 
1774 		seq_puts(s, "\n");
1775 	}
1776 	if (dpu_crtc->vblank_cb_count) {
1777 		ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1778 		s64 diff_ms = ktime_to_ms(diff);
1779 		s64 fps = diff_ms ? div_s64(
1780 				dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1781 
1782 		seq_printf(s,
1783 			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1784 				fps, dpu_crtc->vblank_cb_count,
1785 				ktime_to_ms(diff), dpu_crtc->play_count);
1786 
1787 		/* reset time & count for next measurement */
1788 		dpu_crtc->vblank_cb_count = 0;
1789 		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1790 	}
1791 
1792 	drm_modeset_unlock_all(crtc->dev);
1793 
1794 	return 0;
1795 }
1796 
1797 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
1798 
1799 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1800 {
1801 	struct drm_crtc *crtc = s->private;
1802 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1803 
1804 	seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1805 	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1806 	seq_printf(s, "core_clk_rate: %llu\n",
1807 			dpu_crtc->cur_perf.core_clk_rate);
1808 	seq_printf(s, "bw_ctl: %uk\n",
1809 		   (u32)DIV_ROUND_UP_ULL(dpu_crtc->cur_perf.bw_ctl, 1000));
1810 	seq_printf(s, "max_per_pipe_ib: %u\n",
1811 				dpu_crtc->cur_perf.max_per_pipe_ib);
1812 
1813 	return 0;
1814 }
1815 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
1816 
1817 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1818 {
1819 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1820 
1821 	debugfs_create_file("status", 0400,
1822 			crtc->debugfs_entry,
1823 			dpu_crtc, &_dpu_debugfs_status_fops);
1824 	debugfs_create_file("state", 0600,
1825 			crtc->debugfs_entry,
1826 			&dpu_crtc->base,
1827 			&dpu_crtc_debugfs_state_fops);
1828 
1829 	return 0;
1830 }
1831 #else
1832 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1833 {
1834 	return 0;
1835 }
1836 #endif /* CONFIG_DEBUG_FS */
1837 
1838 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1839 {
1840 	return _dpu_crtc_init_debugfs(crtc);
1841 }
1842 
1843 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1844 	.set_config = drm_atomic_helper_set_config,
1845 	.page_flip = drm_atomic_helper_page_flip,
1846 	.reset = dpu_crtc_reset,
1847 	.atomic_duplicate_state = dpu_crtc_duplicate_state,
1848 	.atomic_destroy_state = dpu_crtc_destroy_state,
1849 	.atomic_print_state = dpu_crtc_atomic_print_state,
1850 	.late_register = dpu_crtc_late_register,
1851 	.verify_crc_source = dpu_crtc_verify_crc_source,
1852 	.set_crc_source = dpu_crtc_set_crc_source,
1853 	.enable_vblank  = msm_crtc_enable_vblank,
1854 	.disable_vblank = msm_crtc_disable_vblank,
1855 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1856 	.get_vblank_counter = dpu_crtc_get_vblank_counter,
1857 };
1858 
1859 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1860 	.atomic_disable = dpu_crtc_disable,
1861 	.atomic_enable = dpu_crtc_enable,
1862 	.atomic_check = dpu_crtc_atomic_check,
1863 	.atomic_begin = dpu_crtc_atomic_begin,
1864 	.atomic_flush = dpu_crtc_atomic_flush,
1865 	.mode_valid = dpu_crtc_mode_valid,
1866 	.get_scanout_position = dpu_crtc_get_scanout_position,
1867 };
1868 
1869 /**
1870  * dpu_crtc_init - create a new crtc object
1871  * @dev: dpu device
1872  * @plane: base plane
1873  * @cursor: cursor plane
1874  * @return: new crtc object or error
1875  *
1876  * initialize CRTC
1877  */
1878 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1879 				struct drm_plane *cursor)
1880 {
1881 	struct msm_drm_private *priv = dev->dev_private;
1882 	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1883 	struct drm_crtc *crtc = NULL;
1884 	struct dpu_crtc *dpu_crtc;
1885 	int i, ret;
1886 
1887 	dpu_crtc = drmm_crtc_alloc_with_planes(dev, struct dpu_crtc, base,
1888 					       plane, cursor,
1889 					       &dpu_crtc_funcs,
1890 					       NULL);
1891 
1892 	if (IS_ERR(dpu_crtc))
1893 		return ERR_CAST(dpu_crtc);
1894 
1895 	crtc = &dpu_crtc->base;
1896 	crtc->dev = dev;
1897 
1898 	spin_lock_init(&dpu_crtc->spin_lock);
1899 	atomic_set(&dpu_crtc->frame_pending, 0);
1900 
1901 	init_completion(&dpu_crtc->frame_done_comp);
1902 
1903 	INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1904 
1905 	for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1906 		INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1907 		list_add(&dpu_crtc->frame_events[i].list,
1908 				&dpu_crtc->frame_event_list);
1909 		kthread_init_work(&dpu_crtc->frame_events[i].work,
1910 				dpu_crtc_frame_event_work);
1911 	}
1912 
1913 	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1914 
1915 	if (dpu_kms->catalog->dspp_count) {
1916 		const struct dpu_dspp_cfg *dspp = &dpu_kms->catalog->dspp[0];
1917 
1918 		if (dspp->sblk->gc.base) {
1919 			drm_mode_crtc_set_gamma_size(crtc, DPU_GAMMA_LUT_SIZE);
1920 			drm_crtc_enable_color_mgmt(crtc, 0, true, DPU_GAMMA_LUT_SIZE);
1921 		} else {
1922 			drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1923 		}
1924 	}
1925 
1926 	/* save user friendly CRTC name for later */
1927 	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1928 
1929 	/* initialize event handling */
1930 	spin_lock_init(&dpu_crtc->event_lock);
1931 
1932 	ret = drm_self_refresh_helper_init(crtc);
1933 	if (ret) {
1934 		DPU_ERROR("Failed to initialize %s with self-refresh helpers %d\n",
1935 			crtc->name, ret);
1936 		return ERR_PTR(ret);
1937 	}
1938 
1939 	DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
1940 	return crtc;
1941 }
1942