xref: /linux/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hwseq.h"
33 #include "dcn10/dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10/dcn10_optc.h"
38 #include "dcn10/dcn10_dpp.h"
39 #include "dcn10/dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10/dcn10_hubp.h"
46 #include "dcn10/dcn10_hubbub.h"
47 #include "dcn10/dcn10_cm_common.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
52 #include "dsc.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dc_trace.h"
57 #include "dce/dmub_outbox.h"
58 #include "link.h"
59 #include "dc_state_priv.h"
60 
61 #define DC_LOGGER \
62 	dc_logger
63 #define DC_LOGGER_INIT(logger) \
64 	struct dal_logger *dc_logger = logger
65 
66 #define CTX \
67 	hws->ctx
68 #define REG(reg)\
69 	hws->regs->reg
70 
71 #undef FN
72 #define FN(reg_name, field_name) \
73 	hws->shifts->field_name, hws->masks->field_name
74 
75 /*print is 17 wide, first two characters are spaces*/
76 #define DTN_INFO_MICRO_SEC(ref_cycle) \
77 	print_microsec(dc_ctx, log_ctx, ref_cycle)
78 
79 #define GAMMA_HW_POINTS_NUM 256
80 
81 #define PGFSM_POWER_ON 0
82 #define PGFSM_POWER_OFF 2
83 
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)84 static void print_microsec(struct dc_context *dc_ctx,
85 			   struct dc_log_buffer_ctx *log_ctx,
86 			   uint32_t ref_cycle)
87 {
88 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
89 	static const unsigned int frac = 1000;
90 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
91 
92 	DTN_INFO("  %11d.%03d",
93 			us_x10 / frac,
94 			us_x10 % frac);
95 }
96 
97 /*
98  * Delay until we passed busy-until-point to which we can
99  * do necessary locking/programming on consecutive full updates
100  */
dcn10_wait_for_pipe_update_if_needed(struct dc * dc,struct pipe_ctx * pipe_ctx,bool is_surface_update_only)101 void dcn10_wait_for_pipe_update_if_needed(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only)
102 {
103 	struct crtc_position position;
104 	struct dc_stream_state *stream = pipe_ctx->stream;
105 	unsigned int vpos, frame_count;
106 	uint32_t vupdate_start, vupdate_end, vblank_start;
107 	unsigned int lines_to_vupdate, us_to_vupdate;
108 	unsigned int us_per_line, us_vupdate;
109 
110 	if (!pipe_ctx->stream ||
111 		!pipe_ctx->stream_res.tg ||
112 		!pipe_ctx->stream_res.stream_enc)
113 		return;
114 
115 	if (pipe_ctx->prev_odm_pipe &&
116 				pipe_ctx->stream)
117 		return;
118 
119 	if (!pipe_ctx->wait_is_required)
120 		return;
121 
122 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
123 
124 	if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
125 		return;
126 
127 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
128 						&vupdate_end);
129 
130 	dc->hwss.get_position(&pipe_ctx, 1, &position);
131 	vpos = position.vertical_count;
132 
133 	frame_count = tg->funcs->get_frame_count(tg);
134 
135 	if (frame_count - pipe_ctx->wait_frame_count > 2)
136 		return;
137 
138 	vblank_start = pipe_ctx->pipe_dlg_param.vblank_start;
139 
140 	if (vpos >= vupdate_start && vupdate_start >= vblank_start)
141 		lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
142 	else
143 		lines_to_vupdate = vupdate_start - vpos;
144 
145 	us_per_line =
146 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
147 	us_to_vupdate = lines_to_vupdate * us_per_line;
148 
149 	if (vupdate_end < vupdate_start)
150 		vupdate_end += stream->timing.v_total;
151 
152 	if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
153 		us_to_vupdate = 0;
154 
155 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
156 
157 	if (is_surface_update_only && us_to_vupdate + us_vupdate > 200) {
158 		//surface updates come in at high irql
159 		pipe_ctx->wait_is_required = true;
160 		return;
161 	}
162 
163 	fsleep(us_to_vupdate + us_vupdate);
164 
165 	//clear
166 	pipe_ctx->next_vupdate = 0;
167 	pipe_ctx->wait_frame_count = 0;
168 	pipe_ctx->wait_is_required = false;
169 }
170 
171 /*
172  * On pipe unlock and programming, indicate pipe will be busy
173  * until some frame and line (vupdate), this is required for consecutive
174  * full updates, need to wait for updates
175  * to latch to try and program the next update
176  */
dcn10_set_wait_for_update_needed_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx)177 void dcn10_set_wait_for_update_needed_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx)
178 {
179 	uint32_t vupdate_start, vupdate_end;
180 	struct crtc_position position;
181 	unsigned int vpos, cur_frame;
182 
183 	if (!pipe_ctx->stream ||
184 		!pipe_ctx->stream_res.tg ||
185 		!pipe_ctx->stream_res.stream_enc)
186 		return;
187 
188 	dc->hwss.get_position(&pipe_ctx, 1, &position);
189 	vpos = position.vertical_count;
190 
191 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
192 						&vupdate_end);
193 
194 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
195 
196 	struct optc *optc1 = DCN10TG_FROM_TG(tg);
197 
198 	ASSERT(optc1->max_frame_count != 0);
199 
200 	if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
201 		return;
202 
203 	pipe_ctx->next_vupdate = vupdate_start;
204 
205 	cur_frame = tg->funcs->get_frame_count(tg);
206 
207 	if (vpos < vupdate_start) {
208 		pipe_ctx->wait_frame_count = cur_frame;
209 	} else {
210 		if (cur_frame + 1 > optc1->max_frame_count)
211 			pipe_ctx->wait_frame_count = cur_frame + 1 - optc1->max_frame_count;
212 		else
213 			pipe_ctx->wait_frame_count = cur_frame + 1;
214 	}
215 
216 	pipe_ctx->wait_is_required = true;
217 }
218 
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)219 void dcn10_lock_all_pipes(struct dc *dc,
220 	struct dc_state *context,
221 	bool lock)
222 {
223 	struct pipe_ctx *pipe_ctx;
224 	struct pipe_ctx *old_pipe_ctx;
225 	struct timing_generator *tg;
226 	int i;
227 
228 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
229 		old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
230 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
231 		tg = pipe_ctx->stream_res.tg;
232 
233 		/*
234 		 * Only lock the top pipe's tg to prevent redundant
235 		 * (un)locking. Also skip if pipe is disabled.
236 		 */
237 		if (pipe_ctx->top_pipe ||
238 		    !pipe_ctx->stream ||
239 		    (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
240 		    !tg->funcs->is_tg_enabled(tg) ||
241 			dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM)
242 			continue;
243 
244 		if (lock)
245 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
246 		else
247 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
248 	}
249 }
250 
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)251 static void log_mpc_crc(struct dc *dc,
252 	struct dc_log_buffer_ctx *log_ctx)
253 {
254 	struct dc_context *dc_ctx = dc->ctx;
255 	struct dce_hwseq *hws = dc->hwseq;
256 
257 	if (REG(MPC_CRC_RESULT_GB))
258 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
259 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
260 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
261 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
262 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
263 }
264 
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)265 static void dcn10_log_hubbub_state(struct dc *dc,
266 				   struct dc_log_buffer_ctx *log_ctx)
267 {
268 	struct dc_context *dc_ctx = dc->ctx;
269 	struct dcn_hubbub_wm wm;
270 	int i;
271 
272 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
273 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
274 
275 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
276 			"         sr_enter          sr_exit  dram_clk_change\n");
277 
278 	for (i = 0; i < 4; i++) {
279 		struct dcn_hubbub_wm_set *s;
280 
281 		s = &wm.sets[i];
282 		DTN_INFO("WM_Set[%d]:", s->wm_set);
283 		DTN_INFO_MICRO_SEC(s->data_urgent);
284 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
285 		DTN_INFO_MICRO_SEC(s->sr_enter);
286 		DTN_INFO_MICRO_SEC(s->sr_exit);
287 		DTN_INFO_MICRO_SEC(s->dram_clk_change);
288 		DTN_INFO("\n");
289 	}
290 
291 	DTN_INFO("\n");
292 }
293 
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)294 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
295 {
296 	struct dc_context *dc_ctx = dc->ctx;
297 	struct resource_pool *pool = dc->res_pool;
298 	int i;
299 
300 	DTN_INFO(
301 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
302 	for (i = 0; i < pool->pipe_count; i++) {
303 		struct hubp *hubp = pool->hubps[i];
304 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
305 
306 		hubp->funcs->hubp_read_state(hubp);
307 
308 		if (!s->blank_en) {
309 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
310 					hubp->inst,
311 					s->pixel_format,
312 					s->inuse_addr_hi,
313 					s->viewport_width,
314 					s->viewport_height,
315 					s->rotation_angle,
316 					s->h_mirror_en,
317 					s->sw_mode,
318 					s->dcc_en,
319 					s->blank_en,
320 					s->clock_en,
321 					s->ttu_disable,
322 					s->underflow_status);
323 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
324 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
325 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
326 			DTN_INFO("\n");
327 		}
328 	}
329 
330 	DTN_INFO("\n=======HUBP FL======\n");
331 	DTN_INFO(
332 		"HUBP FL:  Enabled  Done  adr_mode  width  tmz  xbar_sel_R  xbar_sel_G  xbar_sel_B  adr_hi  adr_low  REFCYC  Bias   Scale       Mode      Format\n");
333 	for (i = 0; i < pool->pipe_count; i++) {
334 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
335 		struct dcn_fl_regs_st *fl_regs = &s->fl_regs;
336 
337 		if (!s->blank_en) {
338 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %8xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh %5x %5x %5x",
339 					pool->hubps[i]->inst,
340 					fl_regs->lut_enable,
341 					fl_regs->lut_done,
342 					fl_regs->lut_addr_mode,
343 					fl_regs->lut_width,
344 					fl_regs->lut_tmz,
345 					fl_regs->lut_crossbar_sel_r,
346 					fl_regs->lut_crossbar_sel_g,
347 					fl_regs->lut_crossbar_sel_b,
348 					fl_regs->lut_addr_hi,
349 					fl_regs->lut_addr_lo,
350 					fl_regs->refcyc_3dlut_group,
351 					fl_regs->lut_fl_bias,
352 					fl_regs->lut_fl_scale,
353 					fl_regs->lut_fl_mode,
354 					fl_regs->lut_fl_format);
355 			DTN_INFO("\n");
356 		}
357 	}
358 
359 	DTN_INFO("\n=========RQ========\n");
360 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
361 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
362 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
363 	for (i = 0; i < pool->pipe_count; i++) {
364 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
365 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
366 
367 		if (!s->blank_en)
368 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
369 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
370 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
371 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
372 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
373 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
374 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
375 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
376 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
377 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
378 	}
379 
380 	DTN_INFO("========DLG========\n");
381 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
382 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
383 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
384 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
385 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
386 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
387 			"  x_rp_dlay  x_rr_sfl  rc_td_grp\n");
388 
389 	for (i = 0; i < pool->pipe_count; i++) {
390 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
391 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
392 
393 		if (!s->blank_en)
394 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
395 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
396 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh %xh\n",
397 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
398 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
399 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
400 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
401 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
402 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
403 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
404 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
405 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
406 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
407 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
408 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
409 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
410 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
411 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
412 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
413 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
414 				dlg_regs->xfc_reg_remote_surface_flip_latency, dlg_regs->refcyc_per_tdlut_group);
415 	}
416 
417 	DTN_INFO("========TTU========\n");
418 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
419 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
420 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
421 	for (i = 0; i < pool->pipe_count; i++) {
422 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
423 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
424 
425 		if (!s->blank_en)
426 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
427 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
428 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
429 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
430 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
431 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
432 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
433 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
434 	}
435 	DTN_INFO("\n");
436 }
437 
dcn10_log_color_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)438 static void dcn10_log_color_state(struct dc *dc,
439 				  struct dc_log_buffer_ctx *log_ctx)
440 {
441 	struct dc_context *dc_ctx = dc->ctx;
442 	struct resource_pool *pool = dc->res_pool;
443 	bool is_gamut_remap_available = false;
444 	int i;
445 
446 	DTN_INFO("DPP:    IGAM format    IGAM mode    DGAM mode    RGAM mode"
447 		 "  GAMUT adjust  "
448 		 "C11        C12        C13        C14        "
449 		 "C21        C22        C23        C24        "
450 		 "C31        C32        C33        C34        \n");
451 	for (i = 0; i < pool->pipe_count; i++) {
452 		struct dpp *dpp = pool->dpps[i];
453 		struct dcn_dpp_state s = {0};
454 
455 		dpp->funcs->dpp_read_state(dpp, &s);
456 		if (dpp->funcs->dpp_get_gamut_remap) {
457 			dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
458 			is_gamut_remap_available = true;
459 		}
460 
461 		if (!s.is_enabled)
462 			continue;
463 
464 		DTN_INFO("[%2d]:  %11xh  %11s    %9s    %9s",
465 				dpp->inst,
466 				s.igam_input_format,
467 				(s.igam_lut_mode == 0) ? "BypassFixed" :
468 					((s.igam_lut_mode == 1) ? "BypassFloat" :
469 					((s.igam_lut_mode == 2) ? "RAM" :
470 					((s.igam_lut_mode == 3) ? "RAM" :
471 								 "Unknown"))),
472 				(s.dgam_lut_mode == 0) ? "Bypass" :
473 					((s.dgam_lut_mode == 1) ? "sRGB" :
474 					((s.dgam_lut_mode == 2) ? "Ycc" :
475 					((s.dgam_lut_mode == 3) ? "RAM" :
476 					((s.dgam_lut_mode == 4) ? "RAM" :
477 								 "Unknown")))),
478 				(s.rgam_lut_mode == 0) ? "Bypass" :
479 					((s.rgam_lut_mode == 1) ? "sRGB" :
480 					((s.rgam_lut_mode == 2) ? "Ycc" :
481 					((s.rgam_lut_mode == 3) ? "RAM" :
482 					((s.rgam_lut_mode == 4) ? "RAM" :
483 								 "Unknown")))));
484 		if (is_gamut_remap_available)
485 			DTN_INFO("  %12s  "
486 				 "%010lld %010lld %010lld %010lld "
487 				 "%010lld %010lld %010lld %010lld "
488 				 "%010lld %010lld %010lld %010lld",
489 				 (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
490 					((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : "SW"),
491 				 s.gamut_remap.temperature_matrix[0].value,
492 				 s.gamut_remap.temperature_matrix[1].value,
493 				 s.gamut_remap.temperature_matrix[2].value,
494 				 s.gamut_remap.temperature_matrix[3].value,
495 				 s.gamut_remap.temperature_matrix[4].value,
496 				 s.gamut_remap.temperature_matrix[5].value,
497 				 s.gamut_remap.temperature_matrix[6].value,
498 				 s.gamut_remap.temperature_matrix[7].value,
499 				 s.gamut_remap.temperature_matrix[8].value,
500 				 s.gamut_remap.temperature_matrix[9].value,
501 				 s.gamut_remap.temperature_matrix[10].value,
502 				 s.gamut_remap.temperature_matrix[11].value);
503 
504 		DTN_INFO("\n");
505 	}
506 	DTN_INFO("\n");
507 	DTN_INFO("DPP Color Caps: input_lut_shared:%d  icsc:%d"
508 		 "  dgam_ram:%d  dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
509 		 "  post_csc:%d  gamcor:%d  dgam_rom_for_yuv:%d  3d_lut:%d"
510 		 "  blnd_lut:%d  oscs:%d\n\n",
511 		 dc->caps.color.dpp.input_lut_shared,
512 		 dc->caps.color.dpp.icsc,
513 		 dc->caps.color.dpp.dgam_ram,
514 		 dc->caps.color.dpp.dgam_rom_caps.srgb,
515 		 dc->caps.color.dpp.dgam_rom_caps.bt2020,
516 		 dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
517 		 dc->caps.color.dpp.dgam_rom_caps.pq,
518 		 dc->caps.color.dpp.dgam_rom_caps.hlg,
519 		 dc->caps.color.dpp.post_csc,
520 		 dc->caps.color.dpp.gamma_corr,
521 		 dc->caps.color.dpp.dgam_rom_for_yuv,
522 		 dc->caps.color.dpp.hw_3d_lut,
523 		 dc->caps.color.dpp.ogam_ram,
524 		 dc->caps.color.dpp.ocsc);
525 
526 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
527 	for (i = 0; i < pool->mpcc_count; i++) {
528 		struct mpcc_state s = {0};
529 
530 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
531 		if (s.opp_id != 0xf)
532 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
533 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
534 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
535 				s.idle);
536 	}
537 	DTN_INFO("\n");
538 	DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
539 		 dc->caps.color.mpc.gamut_remap,
540 		 dc->caps.color.mpc.num_3dluts,
541 		 dc->caps.color.mpc.ogam_ram,
542 		 dc->caps.color.mpc.ocsc);
543 	DTN_INFO("===== MPC RMCM 3DLUT =====\n");
544 	DTN_INFO("MPCC:  SIZE  MODE  MODE_CUR  RD_SEL  30BIT_EN  WR_EN_MASK  RAM_SEL  OUT_NORM_FACTOR	FL_SEL	OUT_OFFSET	OUT_SCALE	FL_DONE	SOFT_UNDERFLOW	HARD_UNDERFLOW MEM_PWR_ST	FORCE	DIS	MODE\n");
545 	for (i = 0; i < pool->mpcc_count; i++) {
546 		struct mpcc_state s = {0};
547 
548 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
549 		if (s.opp_id != 0xf)
550 			DTN_INFO("[%2d]:  %4xh  %4xh  %6xh  %4x  %4x  %4x  %4x  %4x %4xh  %4xh  %6xh  %4x  %4x  %4x  %4x  %4x  %4x  %4x\n",
551 				i, s.rmcm_regs.rmcm_3dlut_size, s.rmcm_regs.rmcm_3dlut_mode, s.rmcm_regs.rmcm_3dlut_mode_cur,
552 				s.rmcm_regs.rmcm_3dlut_read_sel, s.rmcm_regs.rmcm_3dlut_30bit_en, s.rmcm_regs.rmcm_3dlut_wr_en_mask,
553 				s.rmcm_regs.rmcm_3dlut_ram_sel, s.rmcm_regs.rmcm_3dlut_out_norm_factor, s.rmcm_regs.rmcm_3dlut_fl_sel,
554 				s.rmcm_regs.rmcm_3dlut_out_offset_r, s.rmcm_regs.rmcm_3dlut_out_scale_r, s.rmcm_regs.rmcm_3dlut_fl_done,
555 				s.rmcm_regs.rmcm_3dlut_fl_soft_underflow, s.rmcm_regs.rmcm_3dlut_fl_hard_underflow, s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
556 				s.rmcm_regs.rmcm_3dlut_mem_pwr_force, s.rmcm_regs.rmcm_3dlut_mem_pwr_dis, s.rmcm_regs.rmcm_3dlut_mem_pwr_mode);
557 	}
558 	DTN_INFO("\n");
559 	DTN_INFO("===== MPC RMCM Shaper =====\n");
560 	DTN_INFO("MPCC:  CNTL  LUT_MODE  MODE_CUR  WR_EN_MASK  WR_SEL  OFFSET  SCALE  START_B	START_SEG_B	END_B	END_BASE_B	MEM_PWR_ST	FORCE	DIS	MODE\n");
561 	for (i = 0; i < pool->mpcc_count; i++) {
562 		struct mpcc_state s = {0};
563 
564 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
565 		if (s.opp_id != 0xf)
566 			DTN_INFO("[%2d]:  %4xh  %4xh  %6xh  %4x  %4x  %4x  %4x  %4x %4xh  %4xh  %6xh  %4x  %4x  %4x  %4x\n",
567 				i, s.rmcm_regs.rmcm_cntl, s.rmcm_regs.rmcm_shaper_lut_mode, s.rmcm_regs.rmcm_shaper_mode_cur,
568 				s.rmcm_regs.rmcm_shaper_lut_write_en_mask, s.rmcm_regs.rmcm_shaper_lut_write_sel, s.rmcm_regs.rmcm_shaper_offset_b,
569 				s.rmcm_regs.rmcm_shaper_scale_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_seg_b,
570 				s.rmcm_regs.rmcm_shaper_rama_exp_region_end_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_end_base_b, s.rmcm_regs.rmcm_shaper_mem_pwr_state,
571 				s.rmcm_regs.rmcm_shaper_mem_pwr_force, s.rmcm_regs.rmcm_shaper_mem_pwr_dis, s.rmcm_regs.rmcm_shaper_mem_pwr_mode);
572 	}
573 }
574 
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)575 void dcn10_log_hw_state(struct dc *dc,
576 			struct dc_log_buffer_ctx *log_ctx)
577 {
578 	struct dc_context *dc_ctx = dc->ctx;
579 	struct resource_pool *pool = dc->res_pool;
580 	int i;
581 
582 	DTN_INFO_BEGIN();
583 
584 	dcn10_log_hubbub_state(dc, log_ctx);
585 
586 	dcn10_log_hubp_states(dc, log_ctx);
587 
588 	if (dc->hwss.log_color_state)
589 		dc->hwss.log_color_state(dc, log_ctx);
590 	else
591 		dcn10_log_color_state(dc, log_ctx);
592 
593 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
594 
595 	for (i = 0; i < pool->timing_generator_count; i++) {
596 		struct timing_generator *tg = pool->timing_generators[i];
597 		struct dcn_otg_state s = {0};
598 		/* Read shared OTG state registers for all DCNx */
599 		if (tg->funcs->read_otg_state)
600 			tg->funcs->read_otg_state(tg, &s);
601 
602 		/*
603 		 * For DCN2 and greater, a register on the OPP is used to
604 		 * determine if the CRTC is blanked instead of the OTG. So use
605 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
606 		 *
607 		 * TODO: Implement DCN-specific read_otg_state hooks.
608 		 */
609 		if (pool->opps[i]->funcs->dpg_is_blanked)
610 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
611 		else
612 			s.blank_enabled = tg->funcs->is_blanked(tg);
613 
614 		//only print if OTG master is enabled
615 		if ((s.otg_enabled & 1) == 0)
616 			continue;
617 
618 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
619 				tg->inst,
620 				s.v_blank_start,
621 				s.v_blank_end,
622 				s.v_sync_a_start,
623 				s.v_sync_a_end,
624 				s.v_sync_a_pol,
625 				s.v_total_max,
626 				s.v_total_min,
627 				s.v_total_max_sel,
628 				s.v_total_min_sel,
629 				s.h_blank_start,
630 				s.h_blank_end,
631 				s.h_sync_a_start,
632 				s.h_sync_a_end,
633 				s.h_sync_a_pol,
634 				s.h_total,
635 				s.v_total,
636 				s.underflow_occurred_status,
637 				s.blank_enabled);
638 
639 		// Clear underflow for debug purposes
640 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
641 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
642 		// it from here without affecting the original intent.
643 		tg->funcs->clear_optc_underflow(tg);
644 	}
645 	DTN_INFO("\n");
646 
647 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
648 	// TODO: Update golden log header to reflect this name change
649 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
650 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
651 		struct display_stream_compressor *dsc = pool->dscs[i];
652 		struct dcn_dsc_state s = {0};
653 
654 		dsc->funcs->dsc_read_state(dsc, &s);
655 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
656 		dsc->inst,
657 			s.dsc_clock_en,
658 			s.dsc_slice_width,
659 			s.dsc_bits_per_pixel);
660 		DTN_INFO("\n");
661 	}
662 	DTN_INFO("\n");
663 
664 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
665 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
666 	for (i = 0; i < pool->stream_enc_count; i++) {
667 		struct stream_encoder *enc = pool->stream_enc[i];
668 		struct enc_state s = {0};
669 
670 		if (enc->funcs->enc_read_state) {
671 			enc->funcs->enc_read_state(enc, &s);
672 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
673 				enc->id,
674 				s.dsc_mode,
675 				s.sec_gsp_pps_line_num,
676 				s.vbid6_line_reference,
677 				s.vbid6_line_num,
678 				s.sec_gsp_pps_enable,
679 				s.sec_stream_enable);
680 			DTN_INFO("\n");
681 		}
682 	}
683 	DTN_INFO("\n");
684 
685 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
686 	for (i = 0; i < dc->link_count; i++) {
687 		struct link_encoder *lenc = dc->links[i]->link_enc;
688 
689 		struct link_enc_state s = {0};
690 
691 		if (lenc && lenc->funcs->read_state) {
692 			lenc->funcs->read_state(lenc, &s);
693 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
694 				i,
695 				s.dphy_fec_en,
696 				s.dphy_fec_ready_shadow,
697 				s.dphy_fec_active_status,
698 				s.dp_link_training_complete);
699 			DTN_INFO("\n");
700 		}
701 	}
702 	DTN_INFO("\n");
703 
704 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
705 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
706 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
707 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
708 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
709 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
710 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
711 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
712 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
713 
714 	log_mpc_crc(dc, log_ctx);
715 
716 	{
717 		if (pool->hpo_dp_stream_enc_count > 0) {
718 			DTN_INFO("DP HPO S_ENC:  Enabled  OTG   Format   Depth   Vid   SDP   Compressed  Link\n");
719 			for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
720 				struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
721 				struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
722 
723 				if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
724 					hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
725 
726 					DTN_INFO("[%d]:                 %d    %d   %6s       %d     %d     %d            %d     %d\n",
727 							hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
728 							hpo_dp_se_state.stream_enc_enabled,
729 							hpo_dp_se_state.otg_inst,
730 							(hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
731 									((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
732 									(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
733 							(hpo_dp_se_state.component_depth == 0) ? 6 :
734 									((hpo_dp_se_state.component_depth == 1) ? 8 :
735 									(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
736 							hpo_dp_se_state.vid_stream_enabled,
737 							hpo_dp_se_state.sdp_enabled,
738 							hpo_dp_se_state.compressed_format,
739 							hpo_dp_se_state.mapped_to_link_enc);
740 				}
741 			}
742 
743 			DTN_INFO("\n");
744 		}
745 
746 		/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
747 		if (pool->hpo_dp_link_enc_count) {
748 			DTN_INFO("DP HPO L_ENC:  Enabled  Mode   Lanes   Stream  Slots   VC Rate X    VC Rate Y\n");
749 
750 			for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
751 				struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
752 				struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
753 
754 				if (hpo_dp_link_enc->funcs->read_state) {
755 					hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
756 					DTN_INFO("[%d]:                 %d  %6s     %d        %d      %d     %d     %d\n",
757 							hpo_dp_link_enc->inst,
758 							hpo_dp_le_state.link_enc_enabled,
759 							(hpo_dp_le_state.link_mode == 0) ? "TPS1" :
760 									(hpo_dp_le_state.link_mode == 1) ? "TPS2" :
761 									(hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
762 							hpo_dp_le_state.lane_count,
763 							hpo_dp_le_state.stream_src[0],
764 							hpo_dp_le_state.slot_count[0],
765 							hpo_dp_le_state.vc_rate_x[0],
766 							hpo_dp_le_state.vc_rate_y[0]);
767 					DTN_INFO("\n");
768 				}
769 			}
770 
771 			DTN_INFO("\n");
772 		}
773 	}
774 
775 	DTN_INFO_END();
776 }
777 
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)778 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
779 {
780 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
781 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
782 
783 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
784 		tg->funcs->clear_optc_underflow(tg);
785 		return true;
786 	}
787 
788 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
789 		hubp->funcs->hubp_clear_underflow(hubp);
790 		return true;
791 	}
792 	return false;
793 }
794 
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)795 void dcn10_enable_power_gating_plane(
796 	struct dce_hwseq *hws,
797 	bool enable)
798 {
799 	bool force_on = true; /* disable power gating */
800 
801 	if (enable)
802 		force_on = false;
803 
804 	/* DCHUBP0/1/2/3 */
805 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
806 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
807 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
808 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
809 
810 	/* DPP0/1/2/3 */
811 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
812 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
813 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
814 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
815 }
816 
dcn10_disable_vga(struct dce_hwseq * hws)817 void dcn10_disable_vga(
818 	struct dce_hwseq *hws)
819 {
820 	unsigned int in_vga1_mode = 0;
821 	unsigned int in_vga2_mode = 0;
822 	unsigned int in_vga3_mode = 0;
823 	unsigned int in_vga4_mode = 0;
824 
825 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
826 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
827 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
828 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
829 
830 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
831 			in_vga3_mode == 0 && in_vga4_mode == 0)
832 		return;
833 
834 	REG_WRITE(D1VGA_CONTROL, 0);
835 	REG_WRITE(D2VGA_CONTROL, 0);
836 	REG_WRITE(D3VGA_CONTROL, 0);
837 	REG_WRITE(D4VGA_CONTROL, 0);
838 
839 	/* HW Engineer's Notes:
840 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
841 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
842 	 *
843 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
844 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
845 	 */
846 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
847 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
848 }
849 
850 /**
851  * dcn10_dpp_pg_control - DPP power gate control.
852  *
853  * @hws: dce_hwseq reference.
854  * @dpp_inst: DPP instance reference.
855  * @power_on: true if we want to enable power gate, false otherwise.
856  *
857  * Enable or disable power gate in the specific DPP instance.
858  */
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)859 void dcn10_dpp_pg_control(
860 		struct dce_hwseq *hws,
861 		unsigned int dpp_inst,
862 		bool power_on)
863 {
864 	uint32_t power_gate = power_on ? 0 : 1;
865 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
866 
867 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
868 		return;
869 	if (REG(DOMAIN1_PG_CONFIG) == 0)
870 		return;
871 
872 	switch (dpp_inst) {
873 	case 0: /* DPP0 */
874 		REG_UPDATE(DOMAIN1_PG_CONFIG,
875 				DOMAIN1_POWER_GATE, power_gate);
876 
877 		REG_WAIT(DOMAIN1_PG_STATUS,
878 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
879 				1, 1000);
880 		break;
881 	case 1: /* DPP1 */
882 		REG_UPDATE(DOMAIN3_PG_CONFIG,
883 				DOMAIN3_POWER_GATE, power_gate);
884 
885 		REG_WAIT(DOMAIN3_PG_STATUS,
886 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
887 				1, 1000);
888 		break;
889 	case 2: /* DPP2 */
890 		REG_UPDATE(DOMAIN5_PG_CONFIG,
891 				DOMAIN5_POWER_GATE, power_gate);
892 
893 		REG_WAIT(DOMAIN5_PG_STATUS,
894 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
895 				1, 1000);
896 		break;
897 	case 3: /* DPP3 */
898 		REG_UPDATE(DOMAIN7_PG_CONFIG,
899 				DOMAIN7_POWER_GATE, power_gate);
900 
901 		REG_WAIT(DOMAIN7_PG_STATUS,
902 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
903 				1, 1000);
904 		break;
905 	default:
906 		BREAK_TO_DEBUGGER();
907 		break;
908 	}
909 }
910 
911 /**
912  * dcn10_hubp_pg_control - HUBP power gate control.
913  *
914  * @hws: dce_hwseq reference.
915  * @hubp_inst: DPP instance reference.
916  * @power_on: true if we want to enable power gate, false otherwise.
917  *
918  * Enable or disable power gate in the specific HUBP instance.
919  */
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)920 void dcn10_hubp_pg_control(
921 		struct dce_hwseq *hws,
922 		unsigned int hubp_inst,
923 		bool power_on)
924 {
925 	uint32_t power_gate = power_on ? 0 : 1;
926 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
927 
928 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
929 		return;
930 	if (REG(DOMAIN0_PG_CONFIG) == 0)
931 		return;
932 
933 	switch (hubp_inst) {
934 	case 0: /* DCHUBP0 */
935 		REG_UPDATE(DOMAIN0_PG_CONFIG,
936 				DOMAIN0_POWER_GATE, power_gate);
937 
938 		REG_WAIT(DOMAIN0_PG_STATUS,
939 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
940 				1, 1000);
941 		break;
942 	case 1: /* DCHUBP1 */
943 		REG_UPDATE(DOMAIN2_PG_CONFIG,
944 				DOMAIN2_POWER_GATE, power_gate);
945 
946 		REG_WAIT(DOMAIN2_PG_STATUS,
947 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
948 				1, 1000);
949 		break;
950 	case 2: /* DCHUBP2 */
951 		REG_UPDATE(DOMAIN4_PG_CONFIG,
952 				DOMAIN4_POWER_GATE, power_gate);
953 
954 		REG_WAIT(DOMAIN4_PG_STATUS,
955 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
956 				1, 1000);
957 		break;
958 	case 3: /* DCHUBP3 */
959 		REG_UPDATE(DOMAIN6_PG_CONFIG,
960 				DOMAIN6_POWER_GATE, power_gate);
961 
962 		REG_WAIT(DOMAIN6_PG_STATUS,
963 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
964 				1, 1000);
965 		break;
966 	default:
967 		BREAK_TO_DEBUGGER();
968 		break;
969 	}
970 }
971 
power_on_plane_resources(struct dce_hwseq * hws,int plane_id)972 static void power_on_plane_resources(
973 	struct dce_hwseq *hws,
974 	int plane_id)
975 {
976 	DC_LOGGER_INIT(hws->ctx->logger);
977 
978 	if (hws->funcs.dpp_root_clock_control)
979 		hws->funcs.dpp_root_clock_control(hws, plane_id, true);
980 
981 	if (REG(DC_IP_REQUEST_CNTL)) {
982 		REG_SET(DC_IP_REQUEST_CNTL, 0,
983 				IP_REQUEST_EN, 1);
984 
985 		if (hws->funcs.dpp_pg_control)
986 			hws->funcs.dpp_pg_control(hws, plane_id, true);
987 
988 		if (hws->funcs.hubp_pg_control)
989 			hws->funcs.hubp_pg_control(hws, plane_id, true);
990 
991 		REG_SET(DC_IP_REQUEST_CNTL, 0,
992 				IP_REQUEST_EN, 0);
993 		DC_LOG_DEBUG(
994 				"Un-gated front end for pipe %d\n", plane_id);
995 	}
996 }
997 
undo_DEGVIDCN10_253_wa(struct dc * dc)998 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
999 {
1000 	struct dce_hwseq *hws = dc->hwseq;
1001 	struct hubp *hubp = dc->res_pool->hubps[0];
1002 
1003 	if (!hws->wa_state.DEGVIDCN10_253_applied)
1004 		return;
1005 
1006 	hubp->funcs->set_blank(hubp, true);
1007 
1008 	REG_SET(DC_IP_REQUEST_CNTL, 0,
1009 			IP_REQUEST_EN, 1);
1010 
1011 	hws->funcs.hubp_pg_control(hws, 0, false);
1012 	REG_SET(DC_IP_REQUEST_CNTL, 0,
1013 			IP_REQUEST_EN, 0);
1014 
1015 	hws->wa_state.DEGVIDCN10_253_applied = false;
1016 }
1017 
apply_DEGVIDCN10_253_wa(struct dc * dc)1018 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
1019 {
1020 	struct dce_hwseq *hws = dc->hwseq;
1021 	struct hubp *hubp = dc->res_pool->hubps[0];
1022 	int i;
1023 
1024 	if (dc->debug.disable_stutter)
1025 		return;
1026 
1027 	if (!hws->wa.DEGVIDCN10_253)
1028 		return;
1029 
1030 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1031 		if (!dc->res_pool->hubps[i]->power_gated)
1032 			return;
1033 	}
1034 
1035 	/* all pipe power gated, apply work around to enable stutter. */
1036 
1037 	REG_SET(DC_IP_REQUEST_CNTL, 0,
1038 			IP_REQUEST_EN, 1);
1039 
1040 	hws->funcs.hubp_pg_control(hws, 0, true);
1041 	REG_SET(DC_IP_REQUEST_CNTL, 0,
1042 			IP_REQUEST_EN, 0);
1043 
1044 	hubp->funcs->set_hubp_blank_en(hubp, false);
1045 	hws->wa_state.DEGVIDCN10_253_applied = true;
1046 }
1047 
dcn10_bios_golden_init(struct dc * dc)1048 void dcn10_bios_golden_init(struct dc *dc)
1049 {
1050 	struct dce_hwseq *hws = dc->hwseq;
1051 	struct dc_bios *bp = dc->ctx->dc_bios;
1052 	int i;
1053 	bool allow_self_fresh_force_enable = true;
1054 
1055 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
1056 		return;
1057 
1058 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
1059 		allow_self_fresh_force_enable =
1060 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
1061 
1062 
1063 	/* WA for making DF sleep when idle after resume from S0i3.
1064 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
1065 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
1066 	 * before calling command table and it changed to 1 after,
1067 	 * it should be set back to 0.
1068 	 */
1069 
1070 	/* initialize dcn global */
1071 	bp->funcs->enable_disp_power_gating(bp,
1072 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
1073 
1074 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1075 		/* initialize dcn per pipe */
1076 		bp->funcs->enable_disp_power_gating(bp,
1077 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
1078 	}
1079 
1080 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1081 		if (allow_self_fresh_force_enable == false &&
1082 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
1083 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1084 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1085 
1086 }
1087 
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)1088 static void false_optc_underflow_wa(
1089 		struct dc *dc,
1090 		const struct dc_stream_state *stream,
1091 		struct timing_generator *tg)
1092 {
1093 	int i;
1094 	bool underflow;
1095 
1096 	if (!dc->hwseq->wa.false_optc_underflow)
1097 		return;
1098 
1099 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
1100 
1101 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1102 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1103 
1104 		if (old_pipe_ctx->stream != stream)
1105 			continue;
1106 
1107 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
1108 	}
1109 
1110 	if (tg->funcs->set_blank_data_double_buffer)
1111 		tg->funcs->set_blank_data_double_buffer(tg, true);
1112 
1113 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
1114 		tg->funcs->clear_optc_underflow(tg);
1115 }
1116 
calculate_vready_offset_for_group(struct pipe_ctx * pipe)1117 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
1118 {
1119 	struct pipe_ctx *other_pipe;
1120 	int vready_offset = pipe->pipe_dlg_param.vready_offset;
1121 
1122 	/* Always use the largest vready_offset of all connected pipes */
1123 	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
1124 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
1125 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
1126 	}
1127 	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
1128 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
1129 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
1130 	}
1131 	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
1132 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
1133 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
1134 	}
1135 	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
1136 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
1137 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
1138 	}
1139 
1140 	return vready_offset;
1141 }
1142 
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)1143 enum dc_status dcn10_enable_stream_timing(
1144 		struct pipe_ctx *pipe_ctx,
1145 		struct dc_state *context,
1146 		struct dc *dc)
1147 {
1148 	struct dc_stream_state *stream = pipe_ctx->stream;
1149 	enum dc_color_space color_space;
1150 	struct tg_color black_color = {0};
1151 
1152 	/* by upper caller loop, pipe0 is parent pipe and be called first.
1153 	 * back end is set up by for pipe0. Other children pipe share back end
1154 	 * with pipe 0. No program is needed.
1155 	 */
1156 	if (pipe_ctx->top_pipe != NULL)
1157 		return DC_OK;
1158 
1159 	/* TODO check if timing_changed, disable stream if timing changed */
1160 
1161 	/* HW program guide assume display already disable
1162 	 * by unplug sequence. OTG assume stop.
1163 	 */
1164 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
1165 
1166 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
1167 			pipe_ctx->clock_source,
1168 			&pipe_ctx->stream_res.pix_clk_params,
1169 			dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
1170 			&pipe_ctx->pll_settings)) {
1171 		BREAK_TO_DEBUGGER();
1172 		return DC_ERROR_UNEXPECTED;
1173 	}
1174 
1175 	if (dc_is_hdmi_tmds_signal(stream->signal)) {
1176 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
1177 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
1178 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
1179 		else
1180 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
1181 	}
1182 
1183 	pipe_ctx->stream_res.tg->funcs->program_timing(
1184 			pipe_ctx->stream_res.tg,
1185 			&stream->timing,
1186 			calculate_vready_offset_for_group(pipe_ctx),
1187 			pipe_ctx->pipe_dlg_param.vstartup_start,
1188 			pipe_ctx->pipe_dlg_param.vupdate_offset,
1189 			pipe_ctx->pipe_dlg_param.vupdate_width,
1190 			pipe_ctx->pipe_dlg_param.pstate_keepout,
1191 			pipe_ctx->stream->signal,
1192 			true);
1193 
1194 #if 0 /* move to after enable_crtc */
1195 	/* TODO: OPP FMT, ABM. etc. should be done here. */
1196 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
1197 
1198 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
1199 
1200 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
1201 				pipe_ctx->stream_res.opp,
1202 				&stream->bit_depth_params,
1203 				&stream->clamping);
1204 #endif
1205 	/* program otg blank color */
1206 	color_space = stream->output_color_space;
1207 	color_space_to_black_color(dc, color_space, &black_color);
1208 
1209 	/*
1210 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
1211 	 * alternate between Cb and Cr, so both channels need the pixel
1212 	 * value for Y
1213 	 */
1214 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
1215 		black_color.color_r_cr = black_color.color_g_y;
1216 
1217 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
1218 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
1219 				pipe_ctx->stream_res.tg,
1220 				&black_color);
1221 
1222 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
1223 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
1224 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
1225 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
1226 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
1227 	}
1228 
1229 	/* VTG is  within DCHUB command block. DCFCLK is always on */
1230 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
1231 		BREAK_TO_DEBUGGER();
1232 		return DC_ERROR_UNEXPECTED;
1233 	}
1234 
1235 	/* TODO program crtc source select for non-virtual signal*/
1236 	/* TODO program FMT */
1237 	/* TODO setup link_enc */
1238 	/* TODO set stream attributes */
1239 	/* TODO program audio */
1240 	/* TODO enable stream if timing changed */
1241 	/* TODO unblank stream if DP */
1242 
1243 	return DC_OK;
1244 }
1245 
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1246 static void dcn10_reset_back_end_for_pipe(
1247 		struct dc *dc,
1248 		struct pipe_ctx *pipe_ctx,
1249 		struct dc_state *context)
1250 {
1251 	int i;
1252 	struct dc_link *link;
1253 	DC_LOGGER_INIT(dc->ctx->logger);
1254 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1255 		pipe_ctx->stream = NULL;
1256 		return;
1257 	}
1258 
1259 	link = pipe_ctx->stream->link;
1260 	/* DPMS may already disable or */
1261 	/* dpms_off status is incorrect due to fastboot
1262 	 * feature. When system resume from S4 with second
1263 	 * screen only, the dpms_off would be true but
1264 	 * VBIOS lit up eDP, so check link status too.
1265 	 */
1266 	if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1267 		dc->link_srv->set_dpms_off(pipe_ctx);
1268 	else if (pipe_ctx->stream_res.audio)
1269 		dc->hwss.disable_audio_stream(pipe_ctx);
1270 
1271 	if (pipe_ctx->stream_res.audio) {
1272 		/*disable az_endpoint*/
1273 		pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1274 
1275 		/*free audio*/
1276 		if (dc->caps.dynamic_audio == true) {
1277 			/*we have to dynamic arbitrate the audio endpoints*/
1278 			/*we free the resource, need reset is_audio_acquired*/
1279 			update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1280 					pipe_ctx->stream_res.audio, false);
1281 			pipe_ctx->stream_res.audio = NULL;
1282 		}
1283 	}
1284 
1285 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1286 	 * back end share by all pipes and will be disable only when disable
1287 	 * parent pipe.
1288 	 */
1289 	if (pipe_ctx->top_pipe == NULL) {
1290 
1291 		if (pipe_ctx->stream_res.abm)
1292 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
1293 
1294 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1295 
1296 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1297 		set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
1298 		if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
1299 			pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1300 	}
1301 
1302 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1303 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1304 			break;
1305 
1306 	if (i == dc->res_pool->pipe_count)
1307 		return;
1308 
1309 	pipe_ctx->stream = NULL;
1310 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1311 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1312 }
1313 
dcn10_hw_wa_force_recovery(struct dc * dc)1314 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1315 {
1316 	struct hubp *hubp ;
1317 	unsigned int i;
1318 
1319 	if (!dc->debug.recovery_enabled)
1320 		return false;
1321 	/*
1322 	DCHUBP_CNTL:HUBP_BLANK_EN=1
1323 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1324 	DCHUBP_CNTL:HUBP_DISABLE=1
1325 	DCHUBP_CNTL:HUBP_DISABLE=0
1326 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1327 	DCSURF_PRIMARY_SURFACE_ADDRESS
1328 	DCHUBP_CNTL:HUBP_BLANK_EN=0
1329 	*/
1330 
1331 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1332 		struct pipe_ctx *pipe_ctx =
1333 			&dc->current_state->res_ctx.pipe_ctx[i];
1334 		if (pipe_ctx != NULL) {
1335 			hubp = pipe_ctx->plane_res.hubp;
1336 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1337 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1338 				hubp->funcs->set_hubp_blank_en(hubp, true);
1339 		}
1340 	}
1341 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1342 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
1343 
1344 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1345 		struct pipe_ctx *pipe_ctx =
1346 			&dc->current_state->res_ctx.pipe_ctx[i];
1347 		if (pipe_ctx != NULL) {
1348 			hubp = pipe_ctx->plane_res.hubp;
1349 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
1350 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1351 				hubp->funcs->hubp_disable_control(hubp, true);
1352 		}
1353 	}
1354 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1355 		struct pipe_ctx *pipe_ctx =
1356 			&dc->current_state->res_ctx.pipe_ctx[i];
1357 		if (pipe_ctx != NULL) {
1358 			hubp = pipe_ctx->plane_res.hubp;
1359 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1360 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1361 				hubp->funcs->hubp_disable_control(hubp, true);
1362 		}
1363 	}
1364 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1365 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1366 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1367 		struct pipe_ctx *pipe_ctx =
1368 			&dc->current_state->res_ctx.pipe_ctx[i];
1369 		if (pipe_ctx != NULL) {
1370 			hubp = pipe_ctx->plane_res.hubp;
1371 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1372 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1373 				hubp->funcs->set_hubp_blank_en(hubp, true);
1374 		}
1375 	}
1376 	return true;
1377 
1378 }
1379 
dcn10_verify_allow_pstate_change_high(struct dc * dc)1380 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1381 {
1382 	struct hubbub *hubbub = dc->res_pool->hubbub;
1383 	static bool should_log_hw_state; /* prevent hw state log by default */
1384 
1385 	if (!hubbub->funcs->verify_allow_pstate_change_high)
1386 		return;
1387 
1388 	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1389 		int i = 0;
1390 
1391 		if (should_log_hw_state)
1392 			dcn10_log_hw_state(dc, NULL);
1393 
1394 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1395 		BREAK_TO_DEBUGGER();
1396 		if (dcn10_hw_wa_force_recovery(dc)) {
1397 			/*check again*/
1398 			if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1399 				BREAK_TO_DEBUGGER();
1400 		}
1401 	}
1402 }
1403 
1404 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1405 void dcn10_plane_atomic_disconnect(struct dc *dc,
1406 		struct dc_state *state,
1407 		struct pipe_ctx *pipe_ctx)
1408 {
1409 	struct dce_hwseq *hws = dc->hwseq;
1410 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1411 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1412 	struct mpc *mpc = dc->res_pool->mpc;
1413 	struct mpc_tree *mpc_tree_params;
1414 	struct mpcc *mpcc_to_remove = NULL;
1415 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1416 
1417 	mpc_tree_params = &(opp->mpc_tree_params);
1418 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1419 
1420 	/*Already reset*/
1421 	if (mpcc_to_remove == NULL)
1422 		return;
1423 
1424 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1425 	// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1426 	// so don't wait for MPCC_IDLE in the programming sequence
1427 	if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
1428 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1429 
1430 	dc->optimized_required = true;
1431 
1432 	if (hubp->funcs->hubp_disconnect)
1433 		hubp->funcs->hubp_disconnect(hubp);
1434 
1435 	if (dc->debug.sanity_checks)
1436 		hws->funcs.verify_allow_pstate_change_high(dc);
1437 }
1438 
1439 /**
1440  * dcn10_plane_atomic_power_down - Power down plane components.
1441  *
1442  * @dc: dc struct reference. used for grab hwseq.
1443  * @dpp: dpp struct reference.
1444  * @hubp: hubp struct reference.
1445  *
1446  * Keep in mind that this operation requires a power gate configuration;
1447  * however, requests for switch power gate are precisely controlled to avoid
1448  * problems. For this reason, power gate request is usually disabled. This
1449  * function first needs to enable the power gate request before disabling DPP
1450  * and HUBP. Finally, it disables the power gate request again.
1451  */
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1452 void dcn10_plane_atomic_power_down(struct dc *dc,
1453 		struct dpp *dpp,
1454 		struct hubp *hubp)
1455 {
1456 	struct dce_hwseq *hws = dc->hwseq;
1457 	DC_LOGGER_INIT(dc->ctx->logger);
1458 
1459 	if (REG(DC_IP_REQUEST_CNTL)) {
1460 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1461 				IP_REQUEST_EN, 1);
1462 
1463 		if (hws->funcs.dpp_pg_control)
1464 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1465 
1466 		if (hws->funcs.hubp_pg_control)
1467 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1468 
1469 		hubp->funcs->hubp_reset(hubp);
1470 		dpp->funcs->dpp_reset(dpp);
1471 
1472 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1473 				IP_REQUEST_EN, 0);
1474 		DC_LOG_DEBUG(
1475 				"Power gated front end %d\n", hubp->inst);
1476 	}
1477 
1478 	if (hws->funcs.dpp_root_clock_control)
1479 		hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1480 }
1481 
1482 /* disable HW used by plane.
1483  * note:  cannot disable until disconnect is complete
1484  */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1485 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1486 {
1487 	struct dce_hwseq *hws = dc->hwseq;
1488 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1489 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1490 	int opp_id = hubp->opp_id;
1491 
1492 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1493 
1494 	hubp->funcs->hubp_clk_cntl(hubp, false);
1495 
1496 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1497 
1498 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1499 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1500 				pipe_ctx->stream_res.opp,
1501 				false);
1502 
1503 	hubp->power_gated = true;
1504 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1505 
1506 	hws->funcs.plane_atomic_power_down(dc,
1507 			pipe_ctx->plane_res.dpp,
1508 			pipe_ctx->plane_res.hubp);
1509 
1510 	pipe_ctx->stream = NULL;
1511 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1512 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1513 	pipe_ctx->top_pipe = NULL;
1514 	pipe_ctx->bottom_pipe = NULL;
1515 	pipe_ctx->plane_state = NULL;
1516 }
1517 
dcn10_disable_plane(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1518 void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
1519 {
1520 	struct dce_hwseq *hws = dc->hwseq;
1521 	DC_LOGGER_INIT(dc->ctx->logger);
1522 
1523 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1524 		return;
1525 
1526 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1527 
1528 	apply_DEGVIDCN10_253_wa(dc);
1529 
1530 	DC_LOG_DC("Power down front end %d\n",
1531 					pipe_ctx->pipe_idx);
1532 }
1533 
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1534 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1535 {
1536 	int i;
1537 	struct dce_hwseq *hws = dc->hwseq;
1538 	struct hubbub *hubbub = dc->res_pool->hubbub;
1539 	bool can_apply_seamless_boot = false;
1540 	bool tg_enabled[MAX_PIPES] = {false};
1541 
1542 	for (i = 0; i < context->stream_count; i++) {
1543 		if (context->streams[i]->apply_seamless_boot_optimization) {
1544 			can_apply_seamless_boot = true;
1545 			break;
1546 		}
1547 	}
1548 
1549 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1550 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1551 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1552 
1553 		/* There is assumption that pipe_ctx is not mapping irregularly
1554 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1555 		 * we will use the pipe, so don't disable
1556 		 */
1557 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1558 			continue;
1559 
1560 		/* Blank controller using driver code instead of
1561 		 * command table.
1562 		 */
1563 		if (tg->funcs->is_tg_enabled(tg)) {
1564 			if (hws->funcs.init_blank != NULL) {
1565 				hws->funcs.init_blank(dc, tg);
1566 				tg->funcs->lock(tg);
1567 			} else {
1568 				tg->funcs->lock(tg);
1569 				tg->funcs->set_blank(tg, true);
1570 				hwss_wait_for_blank_complete(tg);
1571 			}
1572 		}
1573 	}
1574 
1575 	/* Reset det size */
1576 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1577 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1578 		struct hubp *hubp = dc->res_pool->hubps[i];
1579 
1580 		/* Do not need to reset for seamless boot */
1581 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1582 			continue;
1583 
1584 		if (hubbub && hubp) {
1585 			if (hubbub->funcs->program_det_size)
1586 				hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1587 			if (hubbub->funcs->program_det_segments)
1588 				hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
1589 		}
1590 	}
1591 
1592 	/* num_opp will be equal to number of mpcc */
1593 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1594 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1595 
1596 		/* Cannot reset the MPC mux if seamless boot */
1597 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1598 			continue;
1599 
1600 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1601 				dc->res_pool->mpc, i);
1602 	}
1603 
1604 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1605 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1606 		struct hubp *hubp = dc->res_pool->hubps[i];
1607 		struct dpp *dpp = dc->res_pool->dpps[i];
1608 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1609 
1610 		/* There is assumption that pipe_ctx is not mapping irregularly
1611 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1612 		 * we will use the pipe, so don't disable
1613 		 */
1614 		if (can_apply_seamless_boot &&
1615 			pipe_ctx->stream != NULL &&
1616 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1617 				pipe_ctx->stream_res.tg)) {
1618 			// Enable double buffering for OTG_BLANK no matter if
1619 			// seamless boot is enabled or not to suppress global sync
1620 			// signals when OTG blanked. This is to prevent pipe from
1621 			// requesting data while in PSR.
1622 			tg->funcs->tg_init(tg);
1623 			hubp->power_gated = true;
1624 			tg_enabled[i] = true;
1625 			continue;
1626 		}
1627 
1628 		/* Disable on the current state so the new one isn't cleared. */
1629 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1630 
1631 		hubp->funcs->hubp_reset(hubp);
1632 		dpp->funcs->dpp_reset(dpp);
1633 
1634 		pipe_ctx->stream_res.tg = tg;
1635 		pipe_ctx->pipe_idx = i;
1636 
1637 		pipe_ctx->plane_res.hubp = hubp;
1638 		pipe_ctx->plane_res.dpp = dpp;
1639 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1640 		hubp->mpcc_id = dpp->inst;
1641 		hubp->opp_id = OPP_ID_INVALID;
1642 		hubp->power_gated = false;
1643 
1644 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1645 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1646 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1647 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1648 
1649 		hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
1650 
1651 		if (tg->funcs->is_tg_enabled(tg))
1652 			tg->funcs->unlock(tg);
1653 
1654 		dc->hwss.disable_plane(dc, context, pipe_ctx);
1655 
1656 		pipe_ctx->stream_res.tg = NULL;
1657 		pipe_ctx->plane_res.hubp = NULL;
1658 
1659 		if (tg->funcs->is_tg_enabled(tg)) {
1660 			if (tg->funcs->init_odm)
1661 				tg->funcs->init_odm(tg);
1662 		}
1663 
1664 		tg->funcs->tg_init(tg);
1665 	}
1666 
1667 	/* Clean up MPC tree */
1668 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1669 		if (tg_enabled[i]) {
1670 			if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
1671 				if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
1672 					int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
1673 
1674 					if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
1675 						dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1676 				}
1677 			}
1678 		}
1679 	}
1680 
1681 	/* Power gate DSCs */
1682 	if (hws->funcs.dsc_pg_control != NULL) {
1683 		uint32_t num_opps = 0;
1684 		uint32_t opp_id_src0 = OPP_ID_INVALID;
1685 		uint32_t opp_id_src1 = OPP_ID_INVALID;
1686 
1687 		// Step 1: To find out which OPTC is running & OPTC DSC is ON
1688 		// We can't use res_pool->res_cap->num_timing_generator to check
1689 		// Because it records display pipes default setting built in driver,
1690 		// not display pipes of the current chip.
1691 		// Some ASICs would be fused display pipes less than the default setting.
1692 		// In dcnxx_resource_construct function, driver would obatin real information.
1693 		for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1694 			uint32_t optc_dsc_state = 0;
1695 			struct timing_generator *tg = dc->res_pool->timing_generators[i];
1696 
1697 			if (tg->funcs->is_tg_enabled(tg)) {
1698 				if (tg->funcs->get_dsc_status)
1699 					tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1700 				// Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1701 				// non-zero value is DSC enabled
1702 				if (optc_dsc_state != 0) {
1703 					tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1704 					break;
1705 				}
1706 			}
1707 		}
1708 
1709 		// Step 2: To power down DSC but skip DSC  of running OPTC
1710 		for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1711 			struct dcn_dsc_state s  = {0};
1712 
1713 			dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1714 
1715 			if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1716 				s.dsc_clock_en && s.dsc_fw_en)
1717 				continue;
1718 
1719 			hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1720 		}
1721 	}
1722 }
1723 
dcn10_init_hw(struct dc * dc)1724 void dcn10_init_hw(struct dc *dc)
1725 {
1726 	int i;
1727 	struct abm *abm = dc->res_pool->abm;
1728 	struct dmcu *dmcu = dc->res_pool->dmcu;
1729 	struct dce_hwseq *hws = dc->hwseq;
1730 	struct dc_bios *dcb = dc->ctx->dc_bios;
1731 	struct resource_pool *res_pool = dc->res_pool;
1732 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1733 	uint32_t user_level = MAX_BACKLIGHT_LEVEL;
1734 	bool   is_optimized_init_done = false;
1735 
1736 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1737 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1738 
1739 	/* Align bw context with hw config when system resume. */
1740 	if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1741 		dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1742 		dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1743 	}
1744 
1745 	// Initialize the dccg
1746 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1747 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1748 
1749 	if (!dcb->funcs->is_accelerated_mode(dcb))
1750 		hws->funcs.disable_vga(dc->hwseq);
1751 
1752 	if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
1753 		hws->funcs.bios_golden_init(dc);
1754 
1755 
1756 	if (dc->ctx->dc_bios->fw_info_valid) {
1757 		res_pool->ref_clocks.xtalin_clock_inKhz =
1758 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1759 
1760 		if (res_pool->dccg && res_pool->hubbub) {
1761 
1762 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1763 					dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1764 					&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1765 
1766 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1767 					res_pool->ref_clocks.dccg_ref_clock_inKhz,
1768 					&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1769 		} else {
1770 			// Not all ASICs have DCCG sw component
1771 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
1772 					res_pool->ref_clocks.xtalin_clock_inKhz;
1773 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
1774 					res_pool->ref_clocks.xtalin_clock_inKhz;
1775 		}
1776 	} else
1777 		ASSERT_CRITICAL(false);
1778 
1779 	for (i = 0; i < dc->link_count; i++) {
1780 		/* Power up AND update implementation according to the
1781 		 * required signal (which may be different from the
1782 		 * default signal on connector).
1783 		 */
1784 		struct dc_link *link = dc->links[i];
1785 
1786 		if (!is_optimized_init_done)
1787 			link->link_enc->funcs->hw_init(link->link_enc);
1788 
1789 		/* Check for enabled DIG to identify enabled display */
1790 		if (link->link_enc->funcs->is_dig_enabled &&
1791 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1792 			link->link_status.link_active = true;
1793 			if (link->link_enc->funcs->fec_is_active &&
1794 					link->link_enc->funcs->fec_is_active(link->link_enc))
1795 				link->fec_state = dc_link_fec_enabled;
1796 		}
1797 	}
1798 
1799 	/* we want to turn off all dp displays before doing detection */
1800 	dc->link_srv->blank_all_dp_displays(dc);
1801 
1802 	if (hws->funcs.enable_power_gating_plane)
1803 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1804 
1805 	/* If taking control over from VBIOS, we may want to optimize our first
1806 	 * mode set, so we need to skip powering down pipes until we know which
1807 	 * pipes we want to use.
1808 	 * Otherwise, if taking control is not possible, we need to power
1809 	 * everything down.
1810 	 */
1811 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1812 		if (!is_optimized_init_done) {
1813 			hws->funcs.init_pipes(dc, dc->current_state);
1814 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1815 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1816 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1817 		}
1818 	}
1819 
1820 	if (!is_optimized_init_done) {
1821 
1822 		for (i = 0; i < res_pool->audio_count; i++) {
1823 			struct audio *audio = res_pool->audios[i];
1824 
1825 			audio->funcs->hw_init(audio);
1826 		}
1827 
1828 		for (i = 0; i < dc->link_count; i++) {
1829 			struct dc_link *link = dc->links[i];
1830 
1831 			if (link->panel_cntl) {
1832 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1833 				user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
1834 			}
1835 		}
1836 
1837 		if (abm != NULL)
1838 			abm->funcs->abm_init(abm, backlight, user_level);
1839 
1840 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1841 			dmcu->funcs->dmcu_init(dmcu);
1842 	}
1843 
1844 	if (abm != NULL && dmcu != NULL)
1845 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1846 
1847 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1848 	if (!is_optimized_init_done)
1849 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1850 
1851 	if (!dc->debug.disable_clock_gate) {
1852 		/* enable all DCN clock gating */
1853 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1854 
1855 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1856 
1857 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1858 	}
1859 
1860 	if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
1861 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1862 }
1863 
1864 /* In headless boot cases, DIG may be turned
1865  * on which causes HW/SW discrepancies.
1866  * To avoid this, power down hardware on boot
1867  * if DIG is turned on
1868  */
dcn10_power_down_on_boot(struct dc * dc)1869 void dcn10_power_down_on_boot(struct dc *dc)
1870 {
1871 	struct dc_link *edp_links[MAX_NUM_EDP];
1872 	struct dc_link *edp_link = NULL;
1873 	int edp_num;
1874 	int i = 0;
1875 
1876 	dc_get_edp_links(dc, edp_links, &edp_num);
1877 	if (edp_num)
1878 		edp_link = edp_links[0];
1879 
1880 	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1881 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1882 			dc->hwseq->funcs.edp_backlight_control &&
1883 			dc->hwseq->funcs.power_down &&
1884 			dc->hwss.edp_power_control) {
1885 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1886 		dc->hwseq->funcs.power_down(dc);
1887 		dc->hwss.edp_power_control(edp_link, false);
1888 	} else {
1889 		for (i = 0; i < dc->link_count; i++) {
1890 			struct dc_link *link = dc->links[i];
1891 
1892 			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1893 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1894 					dc->hwseq->funcs.power_down) {
1895 				dc->hwseq->funcs.power_down(dc);
1896 				break;
1897 			}
1898 
1899 		}
1900 	}
1901 
1902 	/*
1903 	 * Call update_clocks with empty context
1904 	 * to send DISPLAY_OFF
1905 	 * Otherwise DISPLAY_OFF may not be asserted
1906 	 */
1907 	if (dc->clk_mgr->funcs->set_low_power_state)
1908 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1909 }
1910 
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1911 void dcn10_reset_hw_ctx_wrap(
1912 		struct dc *dc,
1913 		struct dc_state *context)
1914 {
1915 	int i;
1916 	struct dce_hwseq *hws = dc->hwseq;
1917 
1918 	/* Reset Back End*/
1919 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1920 		struct pipe_ctx *pipe_ctx_old =
1921 			&dc->current_state->res_ctx.pipe_ctx[i];
1922 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1923 
1924 		if (!pipe_ctx_old->stream)
1925 			continue;
1926 
1927 		if (pipe_ctx_old->top_pipe)
1928 			continue;
1929 
1930 		if (!pipe_ctx->stream ||
1931 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1932 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1933 
1934 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1935 			if (hws->funcs.enable_stream_gating)
1936 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1937 			if (old_clk)
1938 				old_clk->funcs->cs_power_down(old_clk);
1939 		}
1940 	}
1941 }
1942 
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1943 static bool patch_address_for_sbs_tb_stereo(
1944 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1945 {
1946 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1947 	bool sec_split = pipe_ctx->top_pipe &&
1948 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1949 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1950 		(pipe_ctx->stream->timing.timing_3d_format ==
1951 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1952 		 pipe_ctx->stream->timing.timing_3d_format ==
1953 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1954 		*addr = plane_state->address.grph_stereo.left_addr;
1955 		plane_state->address.grph_stereo.left_addr =
1956 		plane_state->address.grph_stereo.right_addr;
1957 		return true;
1958 	} else {
1959 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1960 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1961 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1962 			plane_state->address.grph_stereo.right_addr =
1963 			plane_state->address.grph_stereo.left_addr;
1964 			plane_state->address.grph_stereo.right_meta_addr =
1965 			plane_state->address.grph_stereo.left_meta_addr;
1966 		}
1967 	}
1968 	return false;
1969 }
1970 
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1971 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1972 {
1973 	bool addr_patched = false;
1974 	PHYSICAL_ADDRESS_LOC addr;
1975 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1976 
1977 	if (plane_state == NULL)
1978 		return;
1979 
1980 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1981 
1982 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1983 			pipe_ctx->plane_res.hubp,
1984 			&plane_state->address,
1985 			plane_state->flip_immediate);
1986 
1987 	plane_state->status.requested_address = plane_state->address;
1988 
1989 	if (plane_state->flip_immediate)
1990 		plane_state->status.current_address = plane_state->address;
1991 
1992 	if (addr_patched)
1993 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1994 }
1995 
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1996 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1997 			const struct dc_plane_state *plane_state)
1998 {
1999 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
2000 	const struct dc_transfer_func *tf = NULL;
2001 	bool result = true;
2002 
2003 	if (dpp_base == NULL)
2004 		return false;
2005 
2006 	tf = &plane_state->in_transfer_func;
2007 
2008 	if (!dpp_base->ctx->dc->debug.always_use_regamma
2009 		&& !plane_state->gamma_correction.is_identity
2010 			&& dce_use_lut(plane_state->format))
2011 		dpp_base->funcs->dpp_program_input_lut(dpp_base, &plane_state->gamma_correction);
2012 
2013 	if (tf->type == TF_TYPE_PREDEFINED) {
2014 		switch (tf->tf) {
2015 		case TRANSFER_FUNCTION_SRGB:
2016 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
2017 			break;
2018 		case TRANSFER_FUNCTION_BT709:
2019 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
2020 			break;
2021 		case TRANSFER_FUNCTION_LINEAR:
2022 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
2023 			break;
2024 		case TRANSFER_FUNCTION_PQ:
2025 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
2026 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
2027 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
2028 			result = true;
2029 			break;
2030 		default:
2031 			result = false;
2032 			break;
2033 		}
2034 	} else if (tf->type == TF_TYPE_BYPASS) {
2035 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
2036 	} else {
2037 		cm_helper_translate_curve_to_degamma_hw_format(tf,
2038 					&dpp_base->degamma_params);
2039 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
2040 				&dpp_base->degamma_params);
2041 		result = true;
2042 	}
2043 
2044 	return result;
2045 }
2046 
2047 #define MAX_NUM_HW_POINTS 0x200
2048 
log_tf(struct dc_context * ctx,const struct dc_transfer_func * tf,uint32_t hw_points_num)2049 static void log_tf(struct dc_context *ctx,
2050 				const struct dc_transfer_func *tf, uint32_t hw_points_num)
2051 {
2052 	// DC_LOG_GAMMA is default logging of all hw points
2053 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
2054 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
2055 	int i = 0;
2056 
2057 	DC_LOG_GAMMA("Gamma Correction TF");
2058 	DC_LOG_ALL_GAMMA("Logging all tf points...");
2059 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
2060 
2061 	for (i = 0; i < hw_points_num; i++) {
2062 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
2063 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
2064 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
2065 	}
2066 
2067 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
2068 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
2069 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
2070 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
2071 	}
2072 }
2073 
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)2074 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
2075 				const struct dc_stream_state *stream)
2076 {
2077 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2078 
2079 	if (!stream)
2080 		return false;
2081 
2082 	if (dpp == NULL)
2083 		return false;
2084 
2085 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
2086 
2087 	if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED &&
2088 	    stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB)
2089 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
2090 
2091 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
2092 	 * update.
2093 	 */
2094 	else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
2095 			&stream->out_transfer_func,
2096 			&dpp->regamma_params, false)) {
2097 		dpp->funcs->dpp_program_regamma_pwl(
2098 				dpp,
2099 				&dpp->regamma_params, OPP_REGAMMA_USER);
2100 	} else
2101 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
2102 
2103 	if (stream->ctx) {
2104 		log_tf(stream->ctx,
2105 				&stream->out_transfer_func,
2106 				dpp->regamma_params.hw_points_num);
2107 	}
2108 
2109 	return true;
2110 }
2111 
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)2112 void dcn10_pipe_control_lock(
2113 	struct dc *dc,
2114 	struct pipe_ctx *pipe,
2115 	bool lock)
2116 {
2117 	struct dce_hwseq *hws = dc->hwseq;
2118 
2119 	/* use TG master update lock to lock everything on the TG
2120 	 * therefore only top pipe need to lock
2121 	 */
2122 	if (!pipe || pipe->top_pipe)
2123 		return;
2124 
2125 	if (dc->debug.sanity_checks)
2126 		hws->funcs.verify_allow_pstate_change_high(dc);
2127 
2128 	if (lock)
2129 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
2130 	else
2131 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
2132 
2133 	if (dc->debug.sanity_checks)
2134 		hws->funcs.verify_allow_pstate_change_high(dc);
2135 }
2136 
2137 /**
2138  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
2139  *
2140  * Software keepout workaround to prevent cursor update locking from stalling
2141  * out cursor updates indefinitely or from old values from being retained in
2142  * the case where the viewport changes in the same frame as the cursor.
2143  *
2144  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
2145  * too close to VUPDATE, then stall out until VUPDATE finishes.
2146  *
2147  * TODO: Optimize cursor programming to be once per frame before VUPDATE
2148  *       to avoid the need for this workaround.
2149  *
2150  * @dc: Current DC state
2151  * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
2152  *
2153  * Return: void
2154  */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)2155 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
2156 {
2157 	struct dc_stream_state *stream = pipe_ctx->stream;
2158 	struct crtc_position position;
2159 	uint32_t vupdate_start, vupdate_end;
2160 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
2161 	unsigned int us_per_line, us_vupdate;
2162 
2163 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
2164 		return;
2165 
2166 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
2167 		return;
2168 
2169 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
2170 				       &vupdate_end);
2171 
2172 	dc->hwss.get_position(&pipe_ctx, 1, &position);
2173 	vpos = position.vertical_count;
2174 
2175 	if (vpos <= vupdate_start) {
2176 		/* VPOS is in VACTIVE or back porch. */
2177 		lines_to_vupdate = vupdate_start - vpos;
2178 	} else {
2179 		lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
2180 	}
2181 
2182 	/* Calculate time until VUPDATE in microseconds. */
2183 	us_per_line =
2184 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
2185 	us_to_vupdate = lines_to_vupdate * us_per_line;
2186 
2187 	/* Stall out until the cursor update completes. */
2188 	if (vupdate_end < vupdate_start)
2189 		vupdate_end += stream->timing.v_total;
2190 
2191 	/* Position is in the range of vupdate start and end*/
2192 	if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
2193 		us_to_vupdate = 0;
2194 
2195 	/* 70 us is a conservative estimate of cursor update time*/
2196 	if (us_to_vupdate > 70)
2197 		return;
2198 
2199 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
2200 	udelay(us_to_vupdate + us_vupdate);
2201 }
2202 
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)2203 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
2204 {
2205 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
2206 	if (!pipe || pipe->top_pipe)
2207 		return;
2208 
2209 	/* Prevent cursor lock from stalling out cursor updates. */
2210 	if (lock)
2211 		delay_cursor_until_vupdate(dc, pipe);
2212 
2213 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
2214 		union dmub_hw_lock_flags hw_locks = { 0 };
2215 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2216 
2217 		hw_locks.bits.lock_cursor = 1;
2218 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
2219 
2220 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2221 					lock,
2222 					&hw_locks,
2223 					&inst_flags);
2224 	} else
2225 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
2226 				pipe->stream_res.opp->inst, lock);
2227 }
2228 
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)2229 static bool wait_for_reset_trigger_to_occur(
2230 	struct dc_context *dc_ctx,
2231 	struct timing_generator *tg)
2232 {
2233 	bool rc = false;
2234 
2235 	DC_LOGGER_INIT(dc_ctx->logger);
2236 
2237 	/* To avoid endless loop we wait at most
2238 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2239 	const uint32_t frames_to_wait_on_triggered_reset = 10;
2240 	int i;
2241 
2242 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2243 
2244 		if (!tg->funcs->is_counter_moving(tg)) {
2245 			DC_ERROR("TG counter is not moving!\n");
2246 			break;
2247 		}
2248 
2249 		if (tg->funcs->did_triggered_reset_occur(tg)) {
2250 			rc = true;
2251 			/* usually occurs at i=1 */
2252 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2253 					i);
2254 			break;
2255 		}
2256 
2257 		/* Wait for one frame. */
2258 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2259 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2260 	}
2261 
2262 	if (false == rc)
2263 		DC_ERROR("GSL: Timeout on reset trigger!\n");
2264 
2265 	return rc;
2266 }
2267 
reduceSizeAndFraction(uint64_t * numerator,uint64_t * denominator,bool checkUint32Bounary)2268 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2269 				      uint64_t *denominator,
2270 				      bool checkUint32Bounary)
2271 {
2272 	int i;
2273 	bool ret = checkUint32Bounary == false;
2274 	uint64_t max_int32 = 0xffffffff;
2275 	uint64_t num, denom;
2276 	static const uint16_t prime_numbers[] = {
2277 		2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2278 		47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2279 		107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2280 		167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2281 		229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2282 		283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2283 		359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2284 		431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2285 		491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2286 		571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2287 		641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2288 		709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2289 		787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2290 		859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2291 		941, 947, 953, 967, 971, 977, 983, 991, 997};
2292 	int count = ARRAY_SIZE(prime_numbers);
2293 
2294 	num = *numerator;
2295 	denom = *denominator;
2296 	for (i = 0; i < count; i++) {
2297 		uint32_t num_remainder, denom_remainder;
2298 		uint64_t num_result, denom_result;
2299 		if (checkUint32Bounary &&
2300 			num <= max_int32 && denom <= max_int32) {
2301 			ret = true;
2302 			break;
2303 		}
2304 		do {
2305 			num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2306 			denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2307 			if (num_remainder == 0 && denom_remainder == 0) {
2308 				num = num_result;
2309 				denom = denom_result;
2310 			}
2311 		} while (num_remainder == 0 && denom_remainder == 0);
2312 	}
2313 	*numerator = num;
2314 	*denominator = denom;
2315 	return ret;
2316 }
2317 
is_low_refresh_rate(struct pipe_ctx * pipe)2318 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2319 {
2320 	uint32_t master_pipe_refresh_rate =
2321 		pipe->stream->timing.pix_clk_100hz * 100 /
2322 		pipe->stream->timing.h_total /
2323 		pipe->stream->timing.v_total;
2324 	return master_pipe_refresh_rate <= 30;
2325 }
2326 
get_clock_divider(struct pipe_ctx * pipe,bool account_low_refresh_rate)2327 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2328 				 bool account_low_refresh_rate)
2329 {
2330 	uint32_t clock_divider = 1;
2331 	uint32_t numpipes = 1;
2332 
2333 	if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2334 		clock_divider *= 2;
2335 
2336 	if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2337 		clock_divider *= 2;
2338 
2339 	while (pipe->next_odm_pipe) {
2340 		pipe = pipe->next_odm_pipe;
2341 		numpipes++;
2342 	}
2343 	clock_divider *= numpipes;
2344 
2345 	return clock_divider;
2346 }
2347 
dcn10_align_pixel_clocks(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2348 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2349 				    struct pipe_ctx *grouped_pipes[])
2350 {
2351 	struct dc_context *dc_ctx = dc->ctx;
2352 	int i, master = -1, embedded = -1;
2353 	struct dc_crtc_timing *hw_crtc_timing;
2354 	uint64_t phase[MAX_PIPES];
2355 	uint64_t modulo[MAX_PIPES];
2356 	unsigned int pclk = 0;
2357 
2358 	uint32_t embedded_pix_clk_100hz;
2359 	uint16_t embedded_h_total;
2360 	uint16_t embedded_v_total;
2361 	uint32_t dp_ref_clk_100hz =
2362 		dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2363 
2364 	DC_LOGGER_INIT(dc_ctx->logger);
2365 
2366 	hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2367 	if (!hw_crtc_timing)
2368 		return master;
2369 
2370 	if (dc->config.vblank_alignment_dto_params &&
2371 		dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2372 		embedded_h_total =
2373 			(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2374 		embedded_v_total =
2375 			(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2376 		embedded_pix_clk_100hz =
2377 			dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2378 
2379 		for (i = 0; i < group_size; i++) {
2380 			grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2381 					grouped_pipes[i]->stream_res.tg,
2382 					&hw_crtc_timing[i]);
2383 			dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2384 				dc->res_pool->dp_clock_source,
2385 				grouped_pipes[i]->stream_res.tg->inst,
2386 				&pclk);
2387 			hw_crtc_timing[i].pix_clk_100hz = pclk;
2388 			if (dc_is_embedded_signal(
2389 					grouped_pipes[i]->stream->signal)) {
2390 				embedded = i;
2391 				master = i;
2392 				phase[i] = embedded_pix_clk_100hz*(uint64_t)100;
2393 				modulo[i] = dp_ref_clk_100hz*100;
2394 			} else {
2395 
2396 				phase[i] = (uint64_t)embedded_pix_clk_100hz*
2397 					hw_crtc_timing[i].h_total*
2398 					hw_crtc_timing[i].v_total;
2399 				phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2400 				modulo[i] = (uint64_t)dp_ref_clk_100hz*
2401 					embedded_h_total*
2402 					embedded_v_total;
2403 
2404 				if (reduceSizeAndFraction(&phase[i],
2405 						&modulo[i], true) == false) {
2406 					/*
2407 					 * this will help to stop reporting
2408 					 * this timing synchronizable
2409 					 */
2410 					DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2411 					grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2412 				}
2413 			}
2414 		}
2415 
2416 		for (i = 0; i < group_size; i++) {
2417 			if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2418 				dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2419 					dc->res_pool->dp_clock_source,
2420 					grouped_pipes[i]->stream_res.tg->inst,
2421 					phase[i], modulo[i]);
2422 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2423 					dc->res_pool->dp_clock_source,
2424 					grouped_pipes[i]->stream_res.tg->inst, &pclk);
2425 				grouped_pipes[i]->stream->timing.pix_clk_100hz =
2426 					pclk*get_clock_divider(grouped_pipes[i], false);
2427 				if (master == -1)
2428 					master = i;
2429 			}
2430 		}
2431 
2432 	}
2433 
2434 	kfree(hw_crtc_timing);
2435 	return master;
2436 }
2437 
dcn10_enable_vblanks_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2438 void dcn10_enable_vblanks_synchronization(
2439 	struct dc *dc,
2440 	int group_index,
2441 	int group_size,
2442 	struct pipe_ctx *grouped_pipes[])
2443 {
2444 	struct dc_context *dc_ctx = dc->ctx;
2445 	struct output_pixel_processor *opp;
2446 	struct timing_generator *tg;
2447 	int i, width = 0, height = 0, master;
2448 
2449 	DC_LOGGER_INIT(dc_ctx->logger);
2450 
2451 	for (i = 1; i < group_size; i++) {
2452 		opp = grouped_pipes[i]->stream_res.opp;
2453 		tg = grouped_pipes[i]->stream_res.tg;
2454 		tg->funcs->get_otg_active_size(tg, &width, &height);
2455 
2456 		if (!tg->funcs->is_tg_enabled(tg)) {
2457 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2458 			return;
2459 		}
2460 
2461 		if (opp->funcs->opp_program_dpg_dimensions)
2462 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2463 	}
2464 
2465 	for (i = 0; i < group_size; i++) {
2466 		if (grouped_pipes[i]->stream == NULL)
2467 			continue;
2468 		grouped_pipes[i]->stream->vblank_synchronized = false;
2469 		grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2470 	}
2471 
2472 	DC_SYNC_INFO("Aligning DP DTOs\n");
2473 
2474 	master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2475 
2476 	DC_SYNC_INFO("Synchronizing VBlanks\n");
2477 
2478 	if (master >= 0) {
2479 		for (i = 0; i < group_size; i++) {
2480 			if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2481 				grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2482 					grouped_pipes[master]->stream_res.tg,
2483 					grouped_pipes[i]->stream_res.tg,
2484 					grouped_pipes[master]->stream->timing.pix_clk_100hz,
2485 					grouped_pipes[i]->stream->timing.pix_clk_100hz,
2486 					get_clock_divider(grouped_pipes[master], false),
2487 					get_clock_divider(grouped_pipes[i], false));
2488 			grouped_pipes[i]->stream->vblank_synchronized = true;
2489 		}
2490 		grouped_pipes[master]->stream->vblank_synchronized = true;
2491 		DC_SYNC_INFO("Sync complete\n");
2492 	}
2493 
2494 	for (i = 1; i < group_size; i++) {
2495 		opp = grouped_pipes[i]->stream_res.opp;
2496 		tg = grouped_pipes[i]->stream_res.tg;
2497 		tg->funcs->get_otg_active_size(tg, &width, &height);
2498 		if (opp->funcs->opp_program_dpg_dimensions)
2499 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2500 	}
2501 }
2502 
dcn10_enable_timing_synchronization(struct dc * dc,struct dc_state * state,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2503 void dcn10_enable_timing_synchronization(
2504 	struct dc *dc,
2505 	struct dc_state *state,
2506 	int group_index,
2507 	int group_size,
2508 	struct pipe_ctx *grouped_pipes[])
2509 {
2510 	struct dc_context *dc_ctx = dc->ctx;
2511 	struct output_pixel_processor *opp;
2512 	struct timing_generator *tg;
2513 	int i, width = 0, height = 0;
2514 
2515 	DC_LOGGER_INIT(dc_ctx->logger);
2516 
2517 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
2518 
2519 	for (i = 1; i < group_size; i++) {
2520 		if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2521 			continue;
2522 
2523 		opp = grouped_pipes[i]->stream_res.opp;
2524 		tg = grouped_pipes[i]->stream_res.tg;
2525 		tg->funcs->get_otg_active_size(tg, &width, &height);
2526 
2527 		if (!tg->funcs->is_tg_enabled(tg)) {
2528 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2529 			return;
2530 		}
2531 
2532 		if (opp->funcs->opp_program_dpg_dimensions)
2533 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2534 	}
2535 
2536 	for (i = 0; i < group_size; i++) {
2537 		if (grouped_pipes[i]->stream == NULL)
2538 			continue;
2539 
2540 		if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2541 			continue;
2542 
2543 		grouped_pipes[i]->stream->vblank_synchronized = false;
2544 	}
2545 
2546 	for (i = 1; i < group_size; i++) {
2547 		if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2548 			continue;
2549 
2550 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2551 				grouped_pipes[i]->stream_res.tg,
2552 				grouped_pipes[0]->stream_res.tg->inst);
2553 	}
2554 
2555 	DC_SYNC_INFO("Waiting for trigger\n");
2556 
2557 	/* Need to get only check 1 pipe for having reset as all the others are
2558 	 * synchronized. Look at last pipe programmed to reset.
2559 	 */
2560 
2561 	if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM)
2562 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2563 
2564 	for (i = 1; i < group_size; i++) {
2565 		if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2566 			continue;
2567 
2568 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2569 				grouped_pipes[i]->stream_res.tg);
2570 	}
2571 
2572 	for (i = 1; i < group_size; i++) {
2573 		if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2574 			continue;
2575 
2576 		opp = grouped_pipes[i]->stream_res.opp;
2577 		tg = grouped_pipes[i]->stream_res.tg;
2578 		tg->funcs->get_otg_active_size(tg, &width, &height);
2579 		if (opp->funcs->opp_program_dpg_dimensions)
2580 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2581 	}
2582 
2583 	DC_SYNC_INFO("Sync complete\n");
2584 }
2585 
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2586 void dcn10_enable_per_frame_crtc_position_reset(
2587 	struct dc *dc,
2588 	int group_size,
2589 	struct pipe_ctx *grouped_pipes[])
2590 {
2591 	struct dc_context *dc_ctx = dc->ctx;
2592 	int i;
2593 
2594 	DC_LOGGER_INIT(dc_ctx->logger);
2595 
2596 	DC_SYNC_INFO("Setting up\n");
2597 	for (i = 0; i < group_size; i++)
2598 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2599 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2600 					grouped_pipes[i]->stream_res.tg,
2601 					0,
2602 					&grouped_pipes[i]->stream->triggered_crtc_reset);
2603 
2604 	DC_SYNC_INFO("Waiting for trigger\n");
2605 
2606 	for (i = 0; i < group_size; i++)
2607 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2608 
2609 	DC_SYNC_INFO("Multi-display sync is complete\n");
2610 }
2611 
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2612 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2613 		struct vm_system_aperture_param *apt,
2614 		struct dce_hwseq *hws)
2615 {
2616 	PHYSICAL_ADDRESS_LOC physical_page_number;
2617 	uint32_t logical_addr_low;
2618 	uint32_t logical_addr_high;
2619 
2620 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2621 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2622 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2623 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2624 
2625 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2626 			LOGICAL_ADDR, &logical_addr_low);
2627 
2628 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2629 			LOGICAL_ADDR, &logical_addr_high);
2630 
2631 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2632 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2633 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2634 }
2635 
2636 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2637 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2638 		struct vm_context0_param *vm0,
2639 		struct dce_hwseq *hws)
2640 {
2641 	PHYSICAL_ADDRESS_LOC fb_base;
2642 	PHYSICAL_ADDRESS_LOC fb_offset;
2643 	uint32_t fb_base_value;
2644 	uint32_t fb_offset_value;
2645 
2646 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2647 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2648 
2649 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2650 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2651 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2652 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2653 
2654 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2655 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2656 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2657 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2658 
2659 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2660 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2661 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2662 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2663 
2664 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2665 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2666 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2667 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2668 
2669 	/*
2670 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2671 	 * Therefore we need to do
2672 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2673 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2674 	 */
2675 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2676 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2677 	vm0->pte_base.quad_part += fb_base.quad_part;
2678 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2679 }
2680 
2681 
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2682 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2683 {
2684 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2685 	struct vm_system_aperture_param apt = {0};
2686 	struct vm_context0_param vm0 = {0};
2687 
2688 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2689 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2690 
2691 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2692 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2693 }
2694 
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2695 static void dcn10_enable_plane(
2696 	struct dc *dc,
2697 	struct pipe_ctx *pipe_ctx,
2698 	struct dc_state *context)
2699 {
2700 	struct dce_hwseq *hws = dc->hwseq;
2701 
2702 	if (dc->debug.sanity_checks) {
2703 		hws->funcs.verify_allow_pstate_change_high(dc);
2704 	}
2705 
2706 	undo_DEGVIDCN10_253_wa(dc);
2707 
2708 	power_on_plane_resources(dc->hwseq,
2709 		pipe_ctx->plane_res.hubp->inst);
2710 
2711 	/* enable DCFCLK current DCHUB */
2712 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2713 
2714 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2715 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2716 			pipe_ctx->stream_res.opp,
2717 			true);
2718 
2719 	if (dc->config.gpu_vm_support)
2720 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2721 
2722 	if (dc->debug.sanity_checks) {
2723 		hws->funcs.verify_allow_pstate_change_high(dc);
2724 	}
2725 
2726 	if (!pipe_ctx->top_pipe
2727 		&& pipe_ctx->plane_state
2728 		&& pipe_ctx->plane_state->flip_int_enabled
2729 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2730 			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2731 
2732 }
2733 
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2734 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2735 {
2736 	int i = 0;
2737 	struct dpp_grph_csc_adjustment adjust;
2738 	memset(&adjust, 0, sizeof(adjust));
2739 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2740 
2741 
2742 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2743 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2744 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2745 			adjust.temperature_matrix[i] =
2746 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2747 	} else if (pipe_ctx->plane_state &&
2748 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2749 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2750 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2751 			adjust.temperature_matrix[i] =
2752 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2753 	}
2754 
2755 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2756 }
2757 
2758 
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2759 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2760 {
2761 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2762 		if (pipe_ctx->top_pipe) {
2763 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2764 
2765 			while (top->top_pipe)
2766 				top = top->top_pipe; // Traverse to top pipe_ctx
2767 			if (top->plane_state && top->plane_state->layer_index == 0 && !top->plane_state->global_alpha)
2768 				// Global alpha used by top plane for PIP overlay
2769 				// Pre-multiplied/per-pixel alpha used by MPO
2770 				// Check top plane's global alpha to ensure layer_index > 0 not caused by PIP
2771 				return true; // MPO in use and front plane not hidden
2772 		}
2773 	}
2774 	return false;
2775 }
2776 
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2777 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2778 {
2779 	// Override rear plane RGB bias to fix MPO brightness
2780 	uint16_t rgb_bias = matrix[3];
2781 
2782 	matrix[3] = 0;
2783 	matrix[7] = 0;
2784 	matrix[11] = 0;
2785 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2786 	matrix[3] = rgb_bias;
2787 	matrix[7] = rgb_bias;
2788 	matrix[11] = rgb_bias;
2789 }
2790 
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2791 void dcn10_program_output_csc(struct dc *dc,
2792 		struct pipe_ctx *pipe_ctx,
2793 		enum dc_color_space colorspace,
2794 		uint16_t *matrix,
2795 		int opp_id)
2796 {
2797 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2798 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2799 
2800 			/* MPO is broken with RGB colorspaces when OCSC matrix
2801 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2802 			 * Blending adds offsets from front + rear to rear plane
2803 			 *
2804 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2805 			 * black value pixels add offset instead of rear + front
2806 			 */
2807 
2808 			int16_t rgb_bias = matrix[3];
2809 			// matrix[3/7/11] are all the same offset value
2810 
2811 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2812 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2813 			} else {
2814 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2815 			}
2816 		}
2817 	} else {
2818 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2819 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2820 	}
2821 }
2822 
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2823 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2824 {
2825 	struct dc_bias_and_scale bns_params = {0};
2826 
2827 	// program the input csc
2828 	dpp->funcs->dpp_setup(dpp,
2829 			plane_state->format,
2830 			EXPANSION_MODE_ZERO,
2831 			plane_state->input_csc_color_matrix,
2832 			plane_state->color_space,
2833 			NULL);
2834 
2835 	//set scale and bias registers
2836 	build_prescale_params(&bns_params, plane_state);
2837 	if (dpp->funcs->dpp_program_bias_and_scale)
2838 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2839 }
2840 
dcn10_update_visual_confirm_color(struct dc * dc,struct pipe_ctx * pipe_ctx,int mpcc_id)2841 void dcn10_update_visual_confirm_color(struct dc *dc,
2842 		struct pipe_ctx *pipe_ctx,
2843 		int mpcc_id)
2844 {
2845 	struct mpc *mpc = dc->res_pool->mpc;
2846 
2847 	if (mpc->funcs->set_bg_color) {
2848 		mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
2849 	}
2850 }
2851 
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2852 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2853 {
2854 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2855 	struct mpcc_blnd_cfg blnd_cfg = {0};
2856 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2857 	int mpcc_id;
2858 	struct mpcc *new_mpcc;
2859 	struct mpc *mpc = dc->res_pool->mpc;
2860 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2861 
2862 	blnd_cfg.overlap_only = false;
2863 	blnd_cfg.global_gain = 0xff;
2864 
2865 	if (per_pixel_alpha) {
2866 		/* DCN1.0 has output CM before MPC which seems to screw with
2867 		 * pre-multiplied alpha.
2868 		 */
2869 		blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2870 				pipe_ctx->stream->output_color_space)
2871 						&& pipe_ctx->plane_state->pre_multiplied_alpha);
2872 		if (pipe_ctx->plane_state->global_alpha) {
2873 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2874 			blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2875 		} else {
2876 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2877 		}
2878 	} else {
2879 		blnd_cfg.pre_multiplied_alpha = false;
2880 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2881 	}
2882 
2883 	if (pipe_ctx->plane_state->global_alpha)
2884 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2885 	else
2886 		blnd_cfg.global_alpha = 0xff;
2887 
2888 	/*
2889 	 * TODO: remove hack
2890 	 * Note: currently there is a bug in init_hw such that
2891 	 * on resume from hibernate, BIOS sets up MPCC0, and
2892 	 * we do mpcc_remove but the mpcc cannot go to idle
2893 	 * after remove. This cause us to pick mpcc1 here,
2894 	 * which causes a pstate hang for yet unknown reason.
2895 	 */
2896 	mpcc_id = hubp->inst;
2897 
2898 	/* If there is no full update, don't need to touch MPC tree*/
2899 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2900 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2901 		dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2902 		return;
2903 	}
2904 
2905 	/* check if this MPCC is already being used */
2906 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2907 	/* remove MPCC if being used */
2908 	if (new_mpcc != NULL)
2909 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2910 	else
2911 		if (dc->debug.sanity_checks)
2912 			mpc->funcs->assert_mpcc_idle_before_connect(
2913 					dc->res_pool->mpc, mpcc_id);
2914 
2915 	/* Call MPC to insert new plane */
2916 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2917 			mpc_tree_params,
2918 			&blnd_cfg,
2919 			NULL,
2920 			NULL,
2921 			hubp->inst,
2922 			mpcc_id);
2923 	dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2924 
2925 	ASSERT(new_mpcc != NULL);
2926 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2927 	hubp->mpcc_id = mpcc_id;
2928 }
2929 
update_scaler(struct pipe_ctx * pipe_ctx)2930 static void update_scaler(struct pipe_ctx *pipe_ctx)
2931 {
2932 	bool per_pixel_alpha =
2933 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2934 
2935 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2936 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2937 	/* scaler configuration */
2938 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2939 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2940 }
2941 
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2942 static void dcn10_update_dchubp_dpp(
2943 	struct dc *dc,
2944 	struct pipe_ctx *pipe_ctx,
2945 	struct dc_state *context)
2946 {
2947 	struct dce_hwseq *hws = dc->hwseq;
2948 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2949 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2950 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2951 	struct plane_size size = plane_state->plane_size;
2952 	unsigned int compat_level = 0;
2953 	bool should_divided_by_2 = false;
2954 
2955 	/* depends on DML calculation, DPP clock value may change dynamically */
2956 	/* If request max dpp clk is lower than current dispclk, no need to
2957 	 * divided by 2
2958 	 */
2959 	if (plane_state->update_flags.bits.full_update) {
2960 
2961 		/* new calculated dispclk, dppclk are stored in
2962 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2963 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2964 		 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2965 		 * dispclk will put in use after optimize_bandwidth when
2966 		 * ramp_up_dispclk_with_dpp is called.
2967 		 * there are two places for dppclk be put in use. One location
2968 		 * is the same as the location as dispclk. Another is within
2969 		 * update_dchubp_dpp which happens between pre_bandwidth and
2970 		 * optimize_bandwidth.
2971 		 * dppclk updated within update_dchubp_dpp will cause new
2972 		 * clock values of dispclk and dppclk not be in use at the same
2973 		 * time. when clocks are decreased, this may cause dppclk is
2974 		 * lower than previous configuration and let pipe stuck.
2975 		 * for example, eDP + external dp,  change resolution of DP from
2976 		 * 1920x1080x144hz to 1280x960x60hz.
2977 		 * before change: dispclk = 337889 dppclk = 337889
2978 		 * change mode, dcn10_validate_bandwidth calculate
2979 		 *                dispclk = 143122 dppclk = 143122
2980 		 * update_dchubp_dpp be executed before dispclk be updated,
2981 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2982 		 * 168944. this will cause pipe pstate warning issue.
2983 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2984 		 * dispclk is going to be decreased, keep dppclk = dispclk
2985 		 **/
2986 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2987 				dc->clk_mgr->clks.dispclk_khz)
2988 			should_divided_by_2 = false;
2989 		else
2990 			should_divided_by_2 =
2991 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2992 					dc->clk_mgr->clks.dispclk_khz / 2;
2993 
2994 		dpp->funcs->dpp_dppclk_control(
2995 				dpp,
2996 				should_divided_by_2,
2997 				true);
2998 
2999 		if (dc->res_pool->dccg)
3000 			dc->res_pool->dccg->funcs->update_dpp_dto(
3001 					dc->res_pool->dccg,
3002 					dpp->inst,
3003 					pipe_ctx->plane_res.bw.dppclk_khz);
3004 		else
3005 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
3006 						dc->clk_mgr->clks.dispclk_khz / 2 :
3007 							dc->clk_mgr->clks.dispclk_khz;
3008 	}
3009 
3010 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
3011 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
3012 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
3013 	 */
3014 	if (plane_state->update_flags.bits.full_update) {
3015 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
3016 
3017 		hubp->funcs->hubp_setup(
3018 			hubp,
3019 			&pipe_ctx->dlg_regs,
3020 			&pipe_ctx->ttu_regs,
3021 			&pipe_ctx->rq_regs,
3022 			&pipe_ctx->pipe_dlg_param);
3023 		hubp->funcs->hubp_setup_interdependent(
3024 			hubp,
3025 			&pipe_ctx->dlg_regs,
3026 			&pipe_ctx->ttu_regs);
3027 	}
3028 
3029 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
3030 
3031 	if (plane_state->update_flags.bits.full_update ||
3032 		plane_state->update_flags.bits.bpp_change)
3033 		dcn10_update_dpp(dpp, plane_state);
3034 
3035 	if (plane_state->update_flags.bits.full_update ||
3036 		plane_state->update_flags.bits.per_pixel_alpha_change ||
3037 		plane_state->update_flags.bits.global_alpha_change)
3038 		hws->funcs.update_mpcc(dc, pipe_ctx);
3039 
3040 	if (plane_state->update_flags.bits.full_update ||
3041 		plane_state->update_flags.bits.per_pixel_alpha_change ||
3042 		plane_state->update_flags.bits.global_alpha_change ||
3043 		plane_state->update_flags.bits.scaling_change ||
3044 		plane_state->update_flags.bits.position_change) {
3045 		update_scaler(pipe_ctx);
3046 	}
3047 
3048 	if (plane_state->update_flags.bits.full_update ||
3049 		plane_state->update_flags.bits.scaling_change ||
3050 		plane_state->update_flags.bits.position_change) {
3051 		hubp->funcs->mem_program_viewport(
3052 			hubp,
3053 			&pipe_ctx->plane_res.scl_data.viewport,
3054 			&pipe_ctx->plane_res.scl_data.viewport_c);
3055 	}
3056 
3057 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
3058 		dc->hwss.set_cursor_attribute(pipe_ctx);
3059 		dc->hwss.set_cursor_position(pipe_ctx);
3060 
3061 		if (dc->hwss.set_cursor_sdr_white_level)
3062 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
3063 	}
3064 
3065 	if (plane_state->update_flags.bits.full_update) {
3066 		/*gamut remap*/
3067 		dc->hwss.program_gamut_remap(pipe_ctx);
3068 
3069 		dc->hwss.program_output_csc(dc,
3070 				pipe_ctx,
3071 				pipe_ctx->stream->output_color_space,
3072 				pipe_ctx->stream->csc_color_matrix.matrix,
3073 				pipe_ctx->stream_res.opp->inst);
3074 	}
3075 
3076 	if (plane_state->update_flags.bits.full_update ||
3077 		plane_state->update_flags.bits.pixel_format_change ||
3078 		plane_state->update_flags.bits.horizontal_mirror_change ||
3079 		plane_state->update_flags.bits.rotation_change ||
3080 		plane_state->update_flags.bits.swizzle_change ||
3081 		plane_state->update_flags.bits.dcc_change ||
3082 		plane_state->update_flags.bits.bpp_change ||
3083 		plane_state->update_flags.bits.scaling_change ||
3084 		plane_state->update_flags.bits.plane_size_change) {
3085 		hubp->funcs->hubp_program_surface_config(
3086 			hubp,
3087 			plane_state->format,
3088 			&plane_state->tiling_info,
3089 			&size,
3090 			plane_state->rotation,
3091 			&plane_state->dcc,
3092 			plane_state->horizontal_mirror,
3093 			compat_level);
3094 	}
3095 
3096 	hubp->power_gated = false;
3097 
3098 	dc->hwss.update_plane_addr(dc, pipe_ctx);
3099 
3100 	if (is_pipe_tree_visible(pipe_ctx))
3101 		hubp->funcs->set_blank(hubp, false);
3102 }
3103 
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)3104 void dcn10_blank_pixel_data(
3105 		struct dc *dc,
3106 		struct pipe_ctx *pipe_ctx,
3107 		bool blank)
3108 {
3109 	enum dc_color_space color_space;
3110 	struct tg_color black_color = {0};
3111 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
3112 	struct dc_stream_state *stream = pipe_ctx->stream;
3113 
3114 	/* program otg blank color */
3115 	color_space = stream->output_color_space;
3116 	color_space_to_black_color(dc, color_space, &black_color);
3117 
3118 	/*
3119 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
3120 	 * alternate between Cb and Cr, so both channels need the pixel
3121 	 * value for Y
3122 	 */
3123 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3124 		black_color.color_r_cr = black_color.color_g_y;
3125 
3126 
3127 	if (stream_res->tg->funcs->set_blank_color)
3128 		stream_res->tg->funcs->set_blank_color(
3129 				stream_res->tg,
3130 				&black_color);
3131 
3132 	if (!blank) {
3133 		if (stream_res->tg->funcs->set_blank)
3134 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
3135 		if (stream_res->abm) {
3136 			dc->hwss.set_pipe(pipe_ctx);
3137 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
3138 		}
3139 	} else {
3140 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
3141 		if (stream_res->tg->funcs->set_blank) {
3142 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
3143 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
3144 		}
3145 	}
3146 }
3147 
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)3148 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
3149 {
3150 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
3151 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
3152 	struct custom_float_format fmt;
3153 
3154 	fmt.exponenta_bits = 6;
3155 	fmt.mantissa_bits = 12;
3156 	fmt.sign = true;
3157 
3158 
3159 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
3160 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
3161 
3162 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
3163 			pipe_ctx->plane_res.dpp, hw_mult);
3164 }
3165 
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)3166 void dcn10_program_pipe(
3167 		struct dc *dc,
3168 		struct pipe_ctx *pipe_ctx,
3169 		struct dc_state *context)
3170 {
3171 	struct dce_hwseq *hws = dc->hwseq;
3172 
3173 	if (pipe_ctx->top_pipe == NULL) {
3174 		bool blank = !is_pipe_tree_visible(pipe_ctx);
3175 
3176 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
3177 				pipe_ctx->stream_res.tg,
3178 				calculate_vready_offset_for_group(pipe_ctx),
3179 				pipe_ctx->pipe_dlg_param.vstartup_start,
3180 				pipe_ctx->pipe_dlg_param.vupdate_offset,
3181 				pipe_ctx->pipe_dlg_param.vupdate_width,
3182 				pipe_ctx->pipe_dlg_param.pstate_keepout);
3183 
3184 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
3185 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
3186 
3187 		if (hws->funcs.setup_vupdate_interrupt)
3188 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
3189 
3190 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
3191 	}
3192 
3193 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
3194 		dcn10_enable_plane(dc, pipe_ctx, context);
3195 
3196 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
3197 
3198 	hws->funcs.set_hdr_multiplier(pipe_ctx);
3199 
3200 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
3201 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
3202 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
3203 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
3204 
3205 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
3206 	 * only do gamma programming for full update.
3207 	 * TODO: This can be further optimized/cleaned up
3208 	 * Always call this for now since it does memcmp inside before
3209 	 * doing heavy calculation and programming
3210 	 */
3211 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
3212 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
3213 }
3214 
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)3215 void dcn10_wait_for_pending_cleared(struct dc *dc,
3216 		struct dc_state *context)
3217 {
3218 		struct pipe_ctx *pipe_ctx;
3219 		struct timing_generator *tg;
3220 		int i;
3221 
3222 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
3223 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
3224 			tg = pipe_ctx->stream_res.tg;
3225 
3226 			/*
3227 			 * Only wait for top pipe's tg penindg bit
3228 			 * Also skip if pipe is disabled.
3229 			 */
3230 			if (pipe_ctx->top_pipe ||
3231 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
3232 			    !tg->funcs->is_tg_enabled(tg))
3233 				continue;
3234 
3235 			/*
3236 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
3237 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
3238 			 * seems to not trigger the update right away, and if we
3239 			 * lock again before VUPDATE then we don't get a separated
3240 			 * operation.
3241 			 */
3242 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3243 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3244 		}
3245 }
3246 
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)3247 void dcn10_post_unlock_program_front_end(
3248 		struct dc *dc,
3249 		struct dc_state *context)
3250 {
3251 	int i;
3252 
3253 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3254 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3255 
3256 		if (!pipe_ctx->top_pipe &&
3257 			!pipe_ctx->prev_odm_pipe &&
3258 			pipe_ctx->stream) {
3259 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
3260 
3261 			if (context->stream_status[i].plane_count == 0)
3262 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3263 		}
3264 	}
3265 
3266 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3267 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3268 			dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3269 
3270 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3271 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3272 			dc->hwss.optimize_bandwidth(dc, context);
3273 			break;
3274 		}
3275 
3276 	if (dc->hwseq->wa.DEGVIDCN10_254)
3277 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3278 }
3279 
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3280 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3281 {
3282 	uint8_t i;
3283 
3284 	for (i = 0; i < context->stream_count; i++) {
3285 		if (context->streams[i]->timing.timing_3d_format
3286 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3287 			/*
3288 			 * Disable stutter
3289 			 */
3290 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3291 			break;
3292 		}
3293 	}
3294 }
3295 
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3296 void dcn10_prepare_bandwidth(
3297 		struct dc *dc,
3298 		struct dc_state *context)
3299 {
3300 	struct dce_hwseq *hws = dc->hwseq;
3301 	struct hubbub *hubbub = dc->res_pool->hubbub;
3302 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3303 
3304 	if (dc->debug.sanity_checks)
3305 		hws->funcs.verify_allow_pstate_change_high(dc);
3306 
3307 	if (context->stream_count == 0)
3308 		context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3309 
3310 	dc->clk_mgr->funcs->update_clocks(
3311 			dc->clk_mgr,
3312 			context,
3313 			false);
3314 
3315 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3316 			&context->bw_ctx.bw.dcn.watermarks,
3317 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3318 			true);
3319 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3320 
3321 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3322 		DC_FP_START();
3323 		dcn_get_soc_clks(
3324 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3325 		DC_FP_END();
3326 		dcn_bw_notify_pplib_of_wm_ranges(
3327 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3328 	}
3329 
3330 	if (dc->debug.sanity_checks)
3331 		hws->funcs.verify_allow_pstate_change_high(dc);
3332 }
3333 
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3334 void dcn10_optimize_bandwidth(
3335 		struct dc *dc,
3336 		struct dc_state *context)
3337 {
3338 	struct dce_hwseq *hws = dc->hwseq;
3339 	struct hubbub *hubbub = dc->res_pool->hubbub;
3340 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3341 
3342 	if (dc->debug.sanity_checks)
3343 		hws->funcs.verify_allow_pstate_change_high(dc);
3344 
3345 	if (context->stream_count == 0)
3346 		context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3347 
3348 	dc->clk_mgr->funcs->update_clocks(
3349 			dc->clk_mgr,
3350 			context,
3351 			true);
3352 
3353 	hubbub->funcs->program_watermarks(hubbub,
3354 			&context->bw_ctx.bw.dcn.watermarks,
3355 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3356 			true);
3357 
3358 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3359 
3360 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3361 		DC_FP_START();
3362 		dcn_get_soc_clks(
3363 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3364 		DC_FP_END();
3365 		dcn_bw_notify_pplib_of_wm_ranges(
3366 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3367 	}
3368 
3369 	if (dc->debug.sanity_checks)
3370 		hws->funcs.verify_allow_pstate_change_high(dc);
3371 }
3372 
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)3373 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3374 		int num_pipes, struct dc_crtc_timing_adjust adjust)
3375 {
3376 	int i = 0;
3377 	struct drr_params params = {0};
3378 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3379 	unsigned int event_triggers = 0x800;
3380 	// Note DRR trigger events are generated regardless of whether num frames met.
3381 	unsigned int num_frames = 2;
3382 
3383 	params.vertical_total_max = adjust.v_total_max;
3384 	params.vertical_total_min = adjust.v_total_min;
3385 	params.vertical_total_mid = adjust.v_total_mid;
3386 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3387 	/* TODO: If multiple pipes are to be supported, you need
3388 	 * some GSL stuff. Static screen triggers may be programmed differently
3389 	 * as well.
3390 	 */
3391 	for (i = 0; i < num_pipes; i++) {
3392 		/* dc_state_destruct() might null the stream resources, so fetch tg
3393 		 * here first to avoid a race condition. The lifetime of the pointee
3394 		 * itself (the timing_generator object) is not a problem here.
3395 		 */
3396 		struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
3397 
3398 		if ((tg != NULL) && tg->funcs) {
3399 			set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
3400 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3401 				if (tg->funcs->set_static_screen_control)
3402 					tg->funcs->set_static_screen_control(
3403 						tg, event_triggers, num_frames);
3404 		}
3405 	}
3406 }
3407 
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3408 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3409 		int num_pipes,
3410 		struct crtc_position *position)
3411 {
3412 	int i = 0;
3413 
3414 	/* TODO: handle pipes > 1
3415 	 */
3416 	for (i = 0; i < num_pipes; i++)
3417 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3418 }
3419 
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3420 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3421 		int num_pipes, const struct dc_static_screen_params *params)
3422 {
3423 	unsigned int i;
3424 	unsigned int triggers = 0;
3425 
3426 	if (params->triggers.surface_update)
3427 		triggers |= 0x80;
3428 	if (params->triggers.cursor_update)
3429 		triggers |= 0x2;
3430 	if (params->triggers.force_trigger)
3431 		triggers |= 0x1;
3432 
3433 	for (i = 0; i < num_pipes; i++)
3434 		pipe_ctx[i]->stream_res.tg->funcs->
3435 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3436 					triggers, params->num_frames);
3437 }
3438 
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3439 static void dcn10_config_stereo_parameters(
3440 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3441 {
3442 	enum view_3d_format view_format = stream->view_format;
3443 	enum dc_timing_3d_format timing_3d_format =\
3444 			stream->timing.timing_3d_format;
3445 	bool non_stereo_timing = false;
3446 
3447 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3448 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3449 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3450 		non_stereo_timing = true;
3451 
3452 	if (non_stereo_timing == false &&
3453 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3454 
3455 		flags->PROGRAM_STEREO         = 1;
3456 		flags->PROGRAM_POLARITY       = 1;
3457 		if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3458 			timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3459 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3460 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3461 
3462 			if (stream->link && stream->link->ddc) {
3463 				enum display_dongle_type dongle = \
3464 						stream->link->ddc->dongle_type;
3465 
3466 				if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3467 					dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3468 					dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3469 					flags->DISABLE_STEREO_DP_SYNC = 1;
3470 			}
3471 		}
3472 		flags->RIGHT_EYE_POLARITY =\
3473 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3474 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3475 			flags->FRAME_PACKED = 1;
3476 	}
3477 
3478 	return;
3479 }
3480 
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3481 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3482 {
3483 	struct crtc_stereo_flags flags = { 0 };
3484 	struct dc_stream_state *stream = pipe_ctx->stream;
3485 
3486 	dcn10_config_stereo_parameters(stream, &flags);
3487 
3488 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3489 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3490 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3491 	} else {
3492 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3493 	}
3494 
3495 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3496 		pipe_ctx->stream_res.opp,
3497 		flags.PROGRAM_STEREO == 1,
3498 		&stream->timing);
3499 
3500 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3501 		pipe_ctx->stream_res.tg,
3502 		&stream->timing,
3503 		&flags);
3504 
3505 	return;
3506 }
3507 
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3508 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3509 {
3510 	int i;
3511 
3512 	for (i = 0; i < res_pool->pipe_count; i++) {
3513 		if (res_pool->hubps[i]->inst == mpcc_inst)
3514 			return res_pool->hubps[i];
3515 	}
3516 	ASSERT(false);
3517 	return NULL;
3518 }
3519 
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3520 void dcn10_wait_for_mpcc_disconnect(
3521 		struct dc *dc,
3522 		struct resource_pool *res_pool,
3523 		struct pipe_ctx *pipe_ctx)
3524 {
3525 	struct dce_hwseq *hws = dc->hwseq;
3526 	int mpcc_inst;
3527 
3528 	if (dc->debug.sanity_checks) {
3529 		hws->funcs.verify_allow_pstate_change_high(dc);
3530 	}
3531 
3532 	if (!pipe_ctx->stream_res.opp)
3533 		return;
3534 
3535 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3536 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3537 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3538 
3539 			if (pipe_ctx->stream_res.tg &&
3540 				pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3541 				res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3542 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3543 			hubp->funcs->set_blank(hubp, true);
3544 		}
3545 	}
3546 
3547 	if (dc->debug.sanity_checks) {
3548 		hws->funcs.verify_allow_pstate_change_high(dc);
3549 	}
3550 
3551 }
3552 
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3553 bool dcn10_dummy_display_power_gating(
3554 	struct dc *dc,
3555 	uint8_t controller_id,
3556 	struct dc_bios *dcb,
3557 	enum pipe_gating_control power_gating)
3558 {
3559 	return true;
3560 }
3561 
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3562 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3563 {
3564 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3565 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3566 	bool flip_pending;
3567 	struct dc *dc = pipe_ctx->stream->ctx->dc;
3568 
3569 	if (plane_state == NULL)
3570 		return;
3571 
3572 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3573 					pipe_ctx->plane_res.hubp);
3574 
3575 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3576 
3577 	if (!flip_pending)
3578 		plane_state->status.current_address = plane_state->status.requested_address;
3579 
3580 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3581 			tg->funcs->is_stereo_left_eye) {
3582 		plane_state->status.is_right_eye =
3583 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3584 	}
3585 
3586 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3587 		struct dce_hwseq *hwseq = dc->hwseq;
3588 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3589 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3590 
3591 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3592 			struct hubbub *hubbub = dc->res_pool->hubbub;
3593 
3594 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3595 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3596 		}
3597 	}
3598 }
3599 
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3600 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3601 {
3602 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3603 
3604 	/* In DCN, this programming sequence is owned by the hubbub */
3605 	hubbub->funcs->update_dchub(hubbub, dh_data);
3606 }
3607 
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3608 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3609 {
3610 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3611 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3612 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3613 	struct dc_cursor_mi_param param = {
3614 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3615 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3616 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3617 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3618 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3619 		.rotation = pipe_ctx->plane_state->rotation,
3620 		.mirror = pipe_ctx->plane_state->horizontal_mirror,
3621 		.stream = pipe_ctx->stream,
3622 	};
3623 	bool pipe_split_on = false;
3624 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3625 		(pipe_ctx->prev_odm_pipe != NULL);
3626 
3627 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3628 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3629 	int x_pos = pos_cpy.x;
3630 	int y_pos = pos_cpy.y;
3631 
3632 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3633 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3634 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3635 			pipe_split_on = true;
3636 		}
3637 	}
3638 
3639 	/**
3640 	 * DC cursor is stream space, HW cursor is plane space and drawn
3641 	 * as part of the framebuffer.
3642 	 *
3643 	 * Cursor position can't be negative, but hotspot can be used to
3644 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3645 	 * than the cursor size.
3646 	 */
3647 
3648 	/**
3649 	 * Translate cursor from stream space to plane space.
3650 	 *
3651 	 * If the cursor is scaled then we need to scale the position
3652 	 * to be in the approximately correct place. We can't do anything
3653 	 * about the actual size being incorrect, that's a limitation of
3654 	 * the hardware.
3655 	 */
3656 	if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3657 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3658 				pipe_ctx->plane_state->dst_rect.width;
3659 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3660 				pipe_ctx->plane_state->dst_rect.height;
3661 	} else {
3662 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3663 				pipe_ctx->plane_state->dst_rect.width;
3664 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3665 				pipe_ctx->plane_state->dst_rect.height;
3666 	}
3667 
3668 	/**
3669 	 * If the cursor's source viewport is clipped then we need to
3670 	 * translate the cursor to appear in the correct position on
3671 	 * the screen.
3672 	 *
3673 	 * This translation isn't affected by scaling so it needs to be
3674 	 * done *after* we adjust the position for the scale factor.
3675 	 *
3676 	 * This is only done by opt-in for now since there are still
3677 	 * some usecases like tiled display that might enable the
3678 	 * cursor on both streams while expecting dc to clip it.
3679 	 */
3680 	if (pos_cpy.translate_by_source) {
3681 		x_pos += pipe_ctx->plane_state->src_rect.x;
3682 		y_pos += pipe_ctx->plane_state->src_rect.y;
3683 	}
3684 
3685 	/**
3686 	 * If the position is negative then we need to add to the hotspot
3687 	 * to shift the cursor outside the plane.
3688 	 */
3689 
3690 	if (x_pos < 0) {
3691 		pos_cpy.x_hotspot -= x_pos;
3692 		x_pos = 0;
3693 	}
3694 
3695 	if (y_pos < 0) {
3696 		pos_cpy.y_hotspot -= y_pos;
3697 		y_pos = 0;
3698 	}
3699 
3700 	pos_cpy.x = (uint32_t)x_pos;
3701 	pos_cpy.y = (uint32_t)y_pos;
3702 
3703 	if (pipe_ctx->plane_state->address.type
3704 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3705 		pos_cpy.enable = false;
3706 
3707 	if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
3708 		pos_cpy.enable = false;
3709 
3710 
3711 	if (param.rotation == ROTATION_ANGLE_0) {
3712 		int viewport_width =
3713 			pipe_ctx->plane_res.scl_data.viewport.width;
3714 		int viewport_x =
3715 			pipe_ctx->plane_res.scl_data.viewport.x;
3716 
3717 		if (param.mirror) {
3718 			if (pipe_split_on || odm_combine_on) {
3719 				if (pos_cpy.x >= viewport_width + viewport_x) {
3720 					pos_cpy.x = 2 * viewport_width
3721 							- pos_cpy.x + 2 * viewport_x;
3722 				} else {
3723 					uint32_t temp_x = pos_cpy.x;
3724 
3725 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3726 					if (temp_x >= viewport_x +
3727 						(int)hubp->curs_attr.width || pos_cpy.x
3728 						<= (int)hubp->curs_attr.width +
3729 						pipe_ctx->plane_state->src_rect.x) {
3730 						pos_cpy.x = 2 * viewport_width - temp_x;
3731 					}
3732 				}
3733 			} else {
3734 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3735 			}
3736 		}
3737 	}
3738 	// Swap axis and mirror horizontally
3739 	else if (param.rotation == ROTATION_ANGLE_90) {
3740 		uint32_t temp_x = pos_cpy.x;
3741 
3742 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3743 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3744 		pos_cpy.y = temp_x;
3745 	}
3746 	// Swap axis and mirror vertically
3747 	else if (param.rotation == ROTATION_ANGLE_270) {
3748 		uint32_t temp_y = pos_cpy.y;
3749 		int viewport_height =
3750 			pipe_ctx->plane_res.scl_data.viewport.height;
3751 		int viewport_y =
3752 			pipe_ctx->plane_res.scl_data.viewport.y;
3753 
3754 		/**
3755 		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3756 		 * For pipe split cases:
3757 		 * - apply offset of viewport.y to normalize pos_cpy.x
3758 		 * - calculate the pos_cpy.y as before
3759 		 * - shift pos_cpy.y back by same offset to get final value
3760 		 * - since we iterate through both pipes, use the lower
3761 		 *   viewport.y for offset
3762 		 * For non pipe split cases, use the same calculation for
3763 		 *  pos_cpy.y as the 180 degree rotation case below,
3764 		 *  but use pos_cpy.x as our input because we are rotating
3765 		 *  270 degrees
3766 		 */
3767 		if (pipe_split_on || odm_combine_on) {
3768 			int pos_cpy_x_offset;
3769 			int other_pipe_viewport_y;
3770 
3771 			if (pipe_split_on) {
3772 				if (pipe_ctx->bottom_pipe) {
3773 					other_pipe_viewport_y =
3774 						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3775 				} else {
3776 					other_pipe_viewport_y =
3777 						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3778 				}
3779 			} else {
3780 				if (pipe_ctx->next_odm_pipe) {
3781 					other_pipe_viewport_y =
3782 						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3783 				} else {
3784 					other_pipe_viewport_y =
3785 						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3786 				}
3787 			}
3788 			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3789 				other_pipe_viewport_y : viewport_y;
3790 			pos_cpy.x -= pos_cpy_x_offset;
3791 			if (pos_cpy.x > viewport_height) {
3792 				pos_cpy.x = pos_cpy.x - viewport_height;
3793 				pos_cpy.y = viewport_height - pos_cpy.x;
3794 			} else {
3795 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3796 			}
3797 			pos_cpy.y += pos_cpy_x_offset;
3798 		} else {
3799 			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3800 		}
3801 		pos_cpy.x = temp_y;
3802 	}
3803 	// Mirror horizontally and vertically
3804 	else if (param.rotation == ROTATION_ANGLE_180) {
3805 		int viewport_width =
3806 			pipe_ctx->plane_res.scl_data.viewport.width;
3807 		int viewport_x =
3808 			pipe_ctx->plane_res.scl_data.viewport.x;
3809 
3810 		if (!param.mirror) {
3811 			if (pipe_split_on || odm_combine_on) {
3812 				if (pos_cpy.x >= viewport_width + viewport_x) {
3813 					pos_cpy.x = 2 * viewport_width
3814 							- pos_cpy.x + 2 * viewport_x;
3815 				} else {
3816 					uint32_t temp_x = pos_cpy.x;
3817 
3818 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3819 					if (temp_x >= viewport_x +
3820 						(int)hubp->curs_attr.width || pos_cpy.x
3821 						<= (int)hubp->curs_attr.width +
3822 						pipe_ctx->plane_state->src_rect.x) {
3823 						pos_cpy.x = temp_x + viewport_width;
3824 					}
3825 				}
3826 			} else {
3827 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3828 			}
3829 		}
3830 
3831 		/**
3832 		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3833 		 * Calculation:
3834 		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3835 		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
3836 		 * Simplify it as:
3837 		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3838 		 */
3839 		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3840 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3841 	}
3842 
3843 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3844 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3845 }
3846 
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3847 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3848 {
3849 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3850 
3851 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3852 			pipe_ctx->plane_res.hubp, attributes);
3853 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3854 		pipe_ctx->plane_res.dpp, attributes);
3855 }
3856 
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3857 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3858 {
3859 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3860 	struct fixed31_32 multiplier;
3861 	struct dpp_cursor_attributes opt_attr = { 0 };
3862 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3863 	struct custom_float_format fmt;
3864 
3865 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3866 		return;
3867 
3868 	fmt.exponenta_bits = 5;
3869 	fmt.mantissa_bits = 10;
3870 	fmt.sign = true;
3871 
3872 	if (sdr_white_level > 80) {
3873 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3874 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3875 	}
3876 
3877 	opt_attr.scale = hw_scale;
3878 	opt_attr.bias = 0;
3879 
3880 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3881 			pipe_ctx->plane_res.dpp, &opt_attr);
3882 }
3883 
3884 /*
3885  * apply_front_porch_workaround  TODO FPGA still need?
3886  *
3887  * This is a workaround for a bug that has existed since R5xx and has not been
3888  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3889  */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3890 static void apply_front_porch_workaround(
3891 	struct dc_crtc_timing *timing)
3892 {
3893 	if (timing->flags.INTERLACE == 1) {
3894 		if (timing->v_front_porch < 2)
3895 			timing->v_front_porch = 2;
3896 	} else {
3897 		if (timing->v_front_porch < 1)
3898 			timing->v_front_porch = 1;
3899 	}
3900 }
3901 
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3902 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3903 {
3904 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3905 	struct dc_crtc_timing patched_crtc_timing;
3906 	int vesa_sync_start;
3907 	int asic_blank_end;
3908 	int interlace_factor;
3909 
3910 	patched_crtc_timing = *dc_crtc_timing;
3911 	apply_front_porch_workaround(&patched_crtc_timing);
3912 
3913 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3914 
3915 	vesa_sync_start = patched_crtc_timing.v_addressable +
3916 			patched_crtc_timing.v_border_bottom +
3917 			patched_crtc_timing.v_front_porch;
3918 
3919 	asic_blank_end = (patched_crtc_timing.v_total -
3920 			vesa_sync_start -
3921 			patched_crtc_timing.v_border_top)
3922 			* interlace_factor;
3923 
3924 	return asic_blank_end -
3925 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3926 }
3927 
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3928 void dcn10_calc_vupdate_position(
3929 		struct dc *dc,
3930 		struct pipe_ctx *pipe_ctx,
3931 		uint32_t *start_line,
3932 		uint32_t *end_line)
3933 {
3934 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3935 	int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3936 
3937 	if (vupdate_pos >= 0)
3938 		*start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3939 	else
3940 		*start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3941 	*end_line = (*start_line + 2) % timing->v_total;
3942 }
3943 
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3944 static void dcn10_cal_vline_position(
3945 		struct dc *dc,
3946 		struct pipe_ctx *pipe_ctx,
3947 		uint32_t *start_line,
3948 		uint32_t *end_line)
3949 {
3950 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3951 	int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3952 
3953 	if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3954 		if (vline_pos > 0)
3955 			vline_pos--;
3956 		else if (vline_pos < 0)
3957 			vline_pos++;
3958 
3959 		vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3960 		if (vline_pos >= 0)
3961 			*start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3962 		else
3963 			*start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3964 		*end_line = (*start_line + 2) % timing->v_total;
3965 	} else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3966 		// vsync is line 0 so start_line is just the requested line offset
3967 		*start_line = vline_pos;
3968 		*end_line = (*start_line + 2) % timing->v_total;
3969 	} else
3970 		ASSERT(0);
3971 }
3972 
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3973 void dcn10_setup_periodic_interrupt(
3974 		struct dc *dc,
3975 		struct pipe_ctx *pipe_ctx)
3976 {
3977 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3978 	uint32_t start_line = 0;
3979 	uint32_t end_line = 0;
3980 
3981 	dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3982 
3983 	tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3984 }
3985 
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3986 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3987 {
3988 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3989 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3990 
3991 	if (start_line < 0) {
3992 		ASSERT(0);
3993 		start_line = 0;
3994 	}
3995 
3996 	if (tg->funcs->setup_vertical_interrupt2)
3997 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3998 }
3999 
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)4000 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
4001 		struct dc_link_settings *link_settings)
4002 {
4003 	struct encoder_unblank_param params = {0};
4004 	struct dc_stream_state *stream = pipe_ctx->stream;
4005 	struct dc_link *link = stream->link;
4006 	struct dce_hwseq *hws = link->dc->hwseq;
4007 
4008 	/* only 3 items below are used by unblank */
4009 	params.timing = pipe_ctx->stream->timing;
4010 
4011 	params.link_settings.link_rate = link_settings->link_rate;
4012 
4013 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
4014 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
4015 			params.timing.pix_clk_100hz /= 2;
4016 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
4017 	}
4018 
4019 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
4020 		hws->funcs.edp_backlight_control(link, true);
4021 	}
4022 }
4023 
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)4024 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
4025 				const uint8_t *custom_sdp_message,
4026 				unsigned int sdp_message_size)
4027 {
4028 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
4029 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
4030 				pipe_ctx->stream_res.stream_enc,
4031 				custom_sdp_message,
4032 				sdp_message_size);
4033 	}
4034 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)4035 enum dc_status dcn10_set_clock(struct dc *dc,
4036 			enum dc_clock_type clock_type,
4037 			uint32_t clk_khz,
4038 			uint32_t stepping)
4039 {
4040 	struct dc_state *context = dc->current_state;
4041 	struct dc_clock_config clock_cfg = {0};
4042 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
4043 
4044 	if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
4045 		return DC_FAIL_UNSUPPORTED_1;
4046 
4047 	dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
4048 		context, clock_type, &clock_cfg);
4049 
4050 	if (clk_khz > clock_cfg.max_clock_khz)
4051 		return DC_FAIL_CLK_EXCEED_MAX;
4052 
4053 	if (clk_khz < clock_cfg.min_clock_khz)
4054 		return DC_FAIL_CLK_BELOW_MIN;
4055 
4056 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
4057 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
4058 
4059 	/*update internal request clock for update clock use*/
4060 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
4061 		current_clocks->dispclk_khz = clk_khz;
4062 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
4063 		current_clocks->dppclk_khz = clk_khz;
4064 	else
4065 		return DC_ERROR_UNEXPECTED;
4066 
4067 	if (dc->clk_mgr->funcs->update_clocks)
4068 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
4069 				context, true);
4070 	return DC_OK;
4071 
4072 }
4073 
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)4074 void dcn10_get_clock(struct dc *dc,
4075 			enum dc_clock_type clock_type,
4076 			struct dc_clock_config *clock_cfg)
4077 {
4078 	struct dc_state *context = dc->current_state;
4079 
4080 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
4081 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
4082 
4083 }
4084 
dcn10_get_dcc_en_bits(struct dc * dc,int * dcc_en_bits)4085 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
4086 {
4087 	struct resource_pool *pool = dc->res_pool;
4088 	int i;
4089 
4090 	for (i = 0; i < pool->pipe_count; i++) {
4091 		struct hubp *hubp = pool->hubps[i];
4092 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
4093 
4094 		hubp->funcs->hubp_read_state(hubp);
4095 
4096 		if (!s->blank_en)
4097 			dcc_en_bits[i] = s->dcc_en ? 1 : 0;
4098 	}
4099 }
4100 
4101 /**
4102  * dcn10_reset_surface_dcc_and_tiling - Set DCC and tiling in DCN to their disable mode.
4103  *
4104  * @pipe_ctx: Pointer to the pipe context structure.
4105  * @plane_state: Surface state
4106  * @clear_tiling: If true set tiling to Linear, otherwise does not change tiling
4107  *
4108  * This function is responsible for call the HUBP block to disable DCC and set
4109  * tiling to the linear mode.
4110  */
dcn10_reset_surface_dcc_and_tiling(struct pipe_ctx * pipe_ctx,struct dc_plane_state * plane_state,bool clear_tiling)4111 void dcn10_reset_surface_dcc_and_tiling(struct pipe_ctx *pipe_ctx,
4112 					struct dc_plane_state *plane_state,
4113 					bool clear_tiling)
4114 {
4115 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
4116 
4117 	if (!hubp)
4118 		return;
4119 
4120 	/* if framebuffer is tiled, disable tiling */
4121 	if (clear_tiling && hubp->funcs->hubp_clear_tiling)
4122 		hubp->funcs->hubp_clear_tiling(hubp);
4123 
4124 	/* force page flip to see the new content of the framebuffer */
4125 	hubp->funcs->hubp_program_surface_flip_and_addr(hubp,
4126 							&plane_state->address,
4127 							true);
4128 }
4129