xref: /linux/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce) !
1 // SPDX-License-Identifier: MIT
2 //
3 // Copyright 2024 Advanced Micro Devices, Inc.
4 
5 
6 #include "os_types.h"
7 #include "dm_services.h"
8 #include "basics/dc_common.h"
9 #include "dm_helpers.h"
10 #include "core_types.h"
11 #include "resource.h"
12 #include "dccg.h"
13 #include "dce/dce_hwseq.h"
14 #include "reg_helper.h"
15 #include "abm.h"
16 #include "hubp.h"
17 #include "dchubbub.h"
18 #include "timing_generator.h"
19 #include "opp.h"
20 #include "ipp.h"
21 #include "mpc.h"
22 #include "mcif_wb.h"
23 #include "dc_dmub_srv.h"
24 #include "link_hwss.h"
25 #include "dpcd_defs.h"
26 #include "clk_mgr.h"
27 #include "dsc.h"
28 #include "link.h"
29 
30 #include "dce/dmub_hw_lock_mgr.h"
31 #include "dcn10/dcn10_cm_common.h"
32 #include "dcn20/dcn20_optc.h"
33 #include "dcn30/dcn30_cm_common.h"
34 #include "dcn32/dcn32_hwseq.h"
35 #include "dcn401_hwseq.h"
36 #include "dcn401/dcn401_resource.h"
37 #include "dc_state_priv.h"
38 #include "link_enc_cfg.h"
39 
40 #define DC_LOGGER_INIT(logger)
41 
42 #define CTX \
43 	hws->ctx
44 #define REG(reg)\
45 	hws->regs->reg
46 #define DC_LOGGER \
47 	dc->ctx->logger
48 
49 
50 #undef FN
51 #define FN(reg_name, field_name) \
52 	hws->shifts->field_name, hws->masks->field_name
53 
dcn401_initialize_min_clocks(struct dc * dc)54 void dcn401_initialize_min_clocks(struct dc *dc)
55 {
56 	struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
57 
58 	clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ;
59 	clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
60 	clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
61 	clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
62 	clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
63 	if (dc->debug.disable_boot_optimizations) {
64 		clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
65 	} else {
66 		/* Even though DPG_EN = 1 for the connected display, it still requires the
67 		 * correct timing so we cannot set DISPCLK to min freq or it could cause
68 		 * audio corruption. Read current DISPCLK from DENTIST and request the same
69 		 * freq to ensure that the timing is valid and unchanged.
70 		 */
71 		clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
72 	}
73 	clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
74 	clocks->fclk_p_state_change_support = true;
75 	clocks->p_state_change_support = true;
76 
77 	dc->clk_mgr->funcs->update_clocks(
78 			dc->clk_mgr,
79 			dc->current_state,
80 			true);
81 }
82 
dcn401_program_gamut_remap(struct pipe_ctx * pipe_ctx)83 void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
84 {
85 	unsigned int i = 0;
86 	struct mpc_grph_gamut_adjustment mpc_adjust;
87 	unsigned int mpcc_id = pipe_ctx->plane_res.mpcc_inst;
88 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
89 
90 	//For now assert if location is not pre-blend
91 	if (pipe_ctx->plane_state)
92 		ASSERT(pipe_ctx->plane_state->mcm_location == MPCC_MOVABLE_CM_LOCATION_BEFORE);
93 
94 	// program MPCC_MCM_FIRST_GAMUT_REMAP
95 	memset(&mpc_adjust, 0, sizeof(mpc_adjust));
96 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
97 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_FIRST_GAMUT_REMAP;
98 
99 	if (pipe_ctx->plane_state &&
100 		pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
101 		mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
102 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
103 			mpc_adjust.temperature_matrix[i] =
104 			pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
105 	}
106 
107 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
108 
109 	// program MPCC_MCM_SECOND_GAMUT_REMAP for Bypass / Disable for now
110 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
111 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_SECOND_GAMUT_REMAP;
112 
113 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
114 
115 	// program MPCC_OGAM_GAMUT_REMAP same as is currently used on DCN3x
116 	memset(&mpc_adjust, 0, sizeof(mpc_adjust));
117 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
118 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_OGAM_GAMUT_REMAP;
119 
120 	if (pipe_ctx->top_pipe == NULL) {
121 		if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
122 			mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
123 			for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
124 				mpc_adjust.temperature_matrix[i] =
125 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
126 		}
127 	}
128 
129 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
130 }
131 
dcn401_init_hw(struct dc * dc)132 void dcn401_init_hw(struct dc *dc)
133 {
134 	struct abm **abms = dc->res_pool->multiple_abms;
135 	struct dce_hwseq *hws = dc->hwseq;
136 	struct dc_bios *dcb = dc->ctx->dc_bios;
137 	struct resource_pool *res_pool = dc->res_pool;
138 	int i;
139 	int edp_num;
140 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
141 	uint32_t user_level = MAX_BACKLIGHT_LEVEL;
142 	int current_dchub_ref_freq = 0;
143 
144 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
145 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
146 
147 		// mark dcmode limits present if any clock has distinct AC and DC values from SMU
148 		dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present &&
149 				dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr);
150 	}
151 
152 	// Initialize the dccg
153 	if (res_pool->dccg->funcs->dccg_init)
154 		res_pool->dccg->funcs->dccg_init(res_pool->dccg);
155 
156 	// Disable DMUB Initialization until IPS state programming is finalized
157 	//if (!dcb->funcs->is_accelerated_mode(dcb)) {
158 	//	hws->funcs.bios_golden_init(dc);
159 	//}
160 
161 	// Set default OPTC memory power states
162 	if (dc->debug.enable_mem_low_power.bits.optc) {
163 		// Shutdown when unassigned and light sleep in VBLANK
164 		REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
165 	}
166 
167 	if (dc->debug.enable_mem_low_power.bits.vga) {
168 		// Power down VGA memory
169 		REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
170 	}
171 
172 	if (dc->ctx->dc_bios->fw_info_valid) {
173 		res_pool->ref_clocks.xtalin_clock_inKhz =
174 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
175 
176 		if (res_pool->hubbub) {
177 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
178 					dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
179 					&res_pool->ref_clocks.dccg_ref_clock_inKhz);
180 
181 			current_dchub_ref_freq = res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
182 
183 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
184 					res_pool->ref_clocks.dccg_ref_clock_inKhz,
185 					&res_pool->ref_clocks.dchub_ref_clock_inKhz);
186 		} else {
187 			// Not all ASICs have DCCG sw component
188 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
189 					res_pool->ref_clocks.xtalin_clock_inKhz;
190 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
191 					res_pool->ref_clocks.xtalin_clock_inKhz;
192 		}
193 	} else
194 		ASSERT_CRITICAL(false);
195 
196 	for (i = 0; i < dc->link_count; i++) {
197 		/* Power up AND update implementation according to the
198 		 * required signal (which may be different from the
199 		 * default signal on connector).
200 		 */
201 		struct dc_link *link = dc->links[i];
202 
203 		link->link_enc->funcs->hw_init(link->link_enc);
204 
205 		/* Check for enabled DIG to identify enabled display */
206 		if (link->link_enc->funcs->is_dig_enabled &&
207 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
208 			link->link_status.link_active = true;
209 			link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
210 			if (link->link_enc->funcs->fec_is_active &&
211 					link->link_enc->funcs->fec_is_active(link->link_enc))
212 				link->fec_state = dc_link_fec_enabled;
213 		}
214 	}
215 
216 	/* enable_power_gating_plane before dsc_pg_control because
217 	 * FORCEON = 1 with hw default value on bootup, resume from s3
218 	 */
219 	if (hws->funcs.enable_power_gating_plane)
220 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
221 
222 	/* we want to turn off all dp displays before doing detection */
223 	dc->link_srv->blank_all_dp_displays(dc);
224 
225 	/* If taking control over from VBIOS, we may want to optimize our first
226 	 * mode set, so we need to skip powering down pipes until we know which
227 	 * pipes we want to use.
228 	 * Otherwise, if taking control is not possible, we need to power
229 	 * everything down.
230 	 */
231 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
232 		/* Disable boot optimizations means power down everything including PHY, DIG,
233 		 * and OTG (i.e. the boot is not optimized because we do a full power down).
234 		 */
235 		if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations)
236 			dc->hwss.enable_accelerated_mode(dc, dc->current_state);
237 		else
238 			hws->funcs.init_pipes(dc, dc->current_state);
239 
240 		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
241 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
242 					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
243 
244 		dcn401_initialize_min_clocks(dc);
245 
246 		/* On HW init, allow idle optimizations after pipes have been turned off.
247 		 *
248 		 * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state
249 		 * is reset (i.e. not in idle at the time hw init is called), but software state
250 		 * still has idle_optimizations = true, so we must disable idle optimizations first
251 		 * (i.e. set false), then re-enable (set true).
252 		 */
253 		dc_allow_idle_optimizations(dc, false);
254 		dc_allow_idle_optimizations(dc, true);
255 	}
256 
257 	/* In headless boot cases, DIG may be turned
258 	 * on which causes HW/SW discrepancies.
259 	 * To avoid this, power down hardware on boot
260 	 * if DIG is turned on and seamless boot not enabled
261 	 */
262 	if (!dc->config.seamless_boot_edp_requested) {
263 		struct dc_link *edp_links[MAX_NUM_EDP];
264 		struct dc_link *edp_link;
265 
266 		dc_get_edp_links(dc, edp_links, &edp_num);
267 		if (edp_num) {
268 			for (i = 0; i < edp_num; i++) {
269 				edp_link = edp_links[i];
270 				if (edp_link->link_enc->funcs->is_dig_enabled &&
271 						edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
272 						dc->hwss.edp_backlight_control &&
273 						hws->funcs.power_down &&
274 						dc->hwss.edp_power_control) {
275 					dc->hwss.edp_backlight_control(edp_link, false);
276 					hws->funcs.power_down(dc);
277 					dc->hwss.edp_power_control(edp_link, false);
278 				}
279 			}
280 		} else {
281 			for (i = 0; i < dc->link_count; i++) {
282 				struct dc_link *link = dc->links[i];
283 
284 				if (link->link_enc->funcs->is_dig_enabled &&
285 						link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
286 						hws->funcs.power_down) {
287 					hws->funcs.power_down(dc);
288 					break;
289 				}
290 
291 			}
292 		}
293 	}
294 
295 	for (i = 0; i < res_pool->audio_count; i++) {
296 		struct audio *audio = res_pool->audios[i];
297 
298 		audio->funcs->hw_init(audio);
299 	}
300 
301 	for (i = 0; i < dc->link_count; i++) {
302 		struct dc_link *link = dc->links[i];
303 
304 		if (link->panel_cntl) {
305 			backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
306 			user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
307 		}
308 	}
309 
310 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
311 		if (abms[i] != NULL && abms[i]->funcs != NULL)
312 			abms[i]->funcs->abm_init(abms[i], backlight, user_level);
313 	}
314 
315 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
316 	REG_WRITE(DIO_MEM_PWR_CTRL, 0);
317 
318 	if (!dc->debug.disable_clock_gate) {
319 		/* enable all DCN clock gating */
320 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
321 
322 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
323 
324 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
325 	}
326 
327 	dcn401_setup_hpo_hw_control(hws, true);
328 
329 	if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
330 		dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
331 
332 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
333 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
334 
335 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
336 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
337 				dc->res_pool->hubbub, false, false);
338 
339 	if (dc->res_pool->hubbub->funcs->init_crb)
340 		dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
341 
342 	if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
343 		dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
344 
345 	// Get DMCUB capabilities
346 	if (dc->ctx->dmub_srv) {
347 		dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
348 		dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
349 		dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
350 		dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
351 		dc->debug.fams2_config.bits.enable &=
352 				dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support
353 		if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
354 			|| res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
355 			/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
356 			if (dc->clk_mgr)
357 				dc->res_pool->funcs->update_bw_bounding_box(dc,
358 									    dc->clk_mgr->bw_params);
359 		}
360 	}
361 }
362 
dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc * dc,struct pipe_ctx * pipe_ctx,enum MCM_LUT_XABLE * shaper_xable,enum MCM_LUT_XABLE * lut3d_xable,enum MCM_LUT_XABLE * lut1d_xable)363 static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ctx *pipe_ctx,
364 		enum MCM_LUT_XABLE *shaper_xable,
365 		enum MCM_LUT_XABLE *lut3d_xable,
366 		enum MCM_LUT_XABLE *lut1d_xable)
367 {
368 	enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting = DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL;
369 	bool lut1d_enable = false;
370 	struct mpc *mpc = dc->res_pool->mpc;
371 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
372 
373 	if (!pipe_ctx->plane_state)
374 		return;
375 	shaper_3dlut_setting = pipe_ctx->plane_state->mcm_shaper_3dlut_setting;
376 	lut1d_enable = pipe_ctx->plane_state->mcm_lut1d_enable;
377 	mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
378 	pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
379 
380 	*lut1d_xable = lut1d_enable ? MCM_LUT_ENABLE : MCM_LUT_DISABLE;
381 
382 	switch (shaper_3dlut_setting) {
383 	case DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL:
384 		*lut3d_xable = *shaper_xable = MCM_LUT_DISABLE;
385 		break;
386 	case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER:
387 		*lut3d_xable = MCM_LUT_DISABLE;
388 		*shaper_xable = MCM_LUT_ENABLE;
389 		break;
390 	case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT:
391 		*lut3d_xable = *shaper_xable = MCM_LUT_ENABLE;
392 		break;
393 	}
394 }
395 
dcn401_populate_mcm_luts(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_cm2_func_luts mcm_luts,bool lut_bank_a)396 void dcn401_populate_mcm_luts(struct dc *dc,
397 		struct pipe_ctx *pipe_ctx,
398 		struct dc_cm2_func_luts mcm_luts,
399 		bool lut_bank_a)
400 {
401 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
402 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
403 	int mpcc_id = hubp->inst;
404 	struct mpc *mpc = dc->res_pool->mpc;
405 	union mcm_lut_params m_lut_params;
406 	enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
407 	enum hubp_3dlut_fl_format format = 0;
408 	enum hubp_3dlut_fl_mode mode;
409 	enum hubp_3dlut_fl_width width = 0;
410 	enum hubp_3dlut_fl_addressing_mode addr_mode;
411 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
412 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
413 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
414 	enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
415 	enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
416 	enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
417 	bool rval;
418 
419 	dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
420 
421 	/* 1D LUT */
422 	if (mcm_luts.lut1d_func) {
423 		memset(&m_lut_params, 0, sizeof(m_lut_params));
424 		if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
425 			m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
426 		else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
427 			rval = cm3_helper_translate_curve_to_hw_format(
428 					mcm_luts.lut1d_func,
429 					&dpp_base->regamma_params, false);
430 			m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
431 		}
432 		if (m_lut_params.pwl) {
433 			if (mpc->funcs->populate_lut)
434 				mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id);
435 		}
436 		if (mpc->funcs->program_lut_mode)
437 			mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id);
438 	}
439 
440 	/* Shaper */
441 	if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) {
442 		memset(&m_lut_params, 0, sizeof(m_lut_params));
443 		if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
444 			m_lut_params.pwl = &mcm_luts.shaper->pwl;
445 		else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
446 			ASSERT(false);
447 			rval = cm3_helper_translate_curve_to_hw_format(
448 					mcm_luts.shaper,
449 					&dpp_base->regamma_params, true);
450 			m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
451 		}
452 		if (m_lut_params.pwl) {
453 			if (mpc->funcs->mcm.populate_lut)
454 				mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
455 			if (mpc->funcs->program_lut_mode)
456 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id);
457 		}
458 	}
459 
460 	/* 3DLUT */
461 	switch (lut3d_src) {
462 	case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
463 		memset(&m_lut_params, 0, sizeof(m_lut_params));
464 		if (hubp->funcs->hubp_enable_3dlut_fl)
465 			hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
466 
467 		if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
468 			m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
469 			if (mpc->funcs->populate_lut)
470 				mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, m_lut_params, lut_bank_a, mpcc_id);
471 			if (mpc->funcs->program_lut_mode)
472 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a,
473 						mpcc_id);
474 		}
475 		break;
476 		case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
477 		switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
478 		case DC_CM2_GPU_MEM_SIZE_171717:
479 			width = hubp_3dlut_fl_width_17;
480 			break;
481 		case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
482 			width = hubp_3dlut_fl_width_transformed;
483 			break;
484 		default:
485 			//TODO: handle default case
486 			break;
487 		}
488 
489 		//check for support
490 		if (mpc->funcs->mcm.is_config_supported &&
491 			!mpc->funcs->mcm.is_config_supported(width))
492 			break;
493 
494 		if (mpc->funcs->program_lut_read_write_control)
495 			mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
496 		if (mpc->funcs->program_lut_mode)
497 			mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
498 
499 		if (hubp->funcs->hubp_program_3dlut_fl_addr)
500 			hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
501 
502 		if (mpc->funcs->mcm.program_bit_depth)
503 			mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
504 
505 		switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
506 		case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
507 			mode = hubp_3dlut_fl_mode_native_1;
508 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
509 			break;
510 		case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
511 			mode = hubp_3dlut_fl_mode_native_2;
512 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
513 			break;
514 		case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
515 			mode = hubp_3dlut_fl_mode_transform;
516 			addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
517 			break;
518 		default:
519 			mode = hubp_3dlut_fl_mode_disable;
520 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
521 			break;
522 		}
523 		if (hubp->funcs->hubp_program_3dlut_fl_mode)
524 			hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
525 
526 		if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode)
527 			hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
528 
529 		switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
530 		case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
531 			format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
532 			break;
533 		case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
534 			format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
535 			break;
536 		case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
537 			format = hubp_3dlut_fl_format_float_fp1_5_10;
538 			break;
539 		}
540 		if (hubp->funcs->hubp_program_3dlut_fl_format)
541 			hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
542 		if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
543 				mpc->funcs->mcm.program_bias_scale) {
544 			mpc->funcs->mcm.program_bias_scale(mpc,
545 				mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
546 				mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
547 				mpcc_id);
548 			hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
549 						mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
550 						mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
551 		}
552 
553 		//navi 4x has a bug and r and blue are swapped and need to be worked around here in
554 		//TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
555 		switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
556 		case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
557 		default:
558 			crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
559 			crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
560 			crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
561 			break;
562 		}
563 
564 		if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
565 			hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
566 					crossbar_bit_slice_cr_r,
567 					crossbar_bit_slice_y_g,
568 					crossbar_bit_slice_cb_b);
569 
570 		if (mpc->funcs->mcm.program_lut_read_write_control)
571 			mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
572 
573 		if (mpc->funcs->mcm.program_3dlut_size)
574 			mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
575 
576 		if (mpc->funcs->update_3dlut_fast_load_select)
577 			mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
578 
579 		if (hubp->funcs->hubp_enable_3dlut_fl)
580 			hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
581 		else {
582 			if (mpc->funcs->program_lut_mode) {
583 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
584 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
585 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
586 			}
587 		}
588 		break;
589 
590 	}
591 }
592 
dcn401_trigger_3dlut_dma_load(struct dc * dc,struct pipe_ctx * pipe_ctx)593 void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
594 {
595 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
596 
597 	if (hubp->funcs->hubp_enable_3dlut_fl) {
598 		hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
599 	}
600 }
601 
dcn401_set_mcm_luts(struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)602 bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
603 				const struct dc_plane_state *plane_state)
604 {
605 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
606 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
607 	struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc;
608 	struct mpc *mpc = dc->res_pool->mpc;
609 	bool result;
610 	const struct pwl_params *lut_params = NULL;
611 	bool rval;
612 
613 	if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
614 		dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a);
615 		return true;
616 	}
617 
618 	mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
619 	pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
620 	// 1D LUT
621 	if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
622 		lut_params = &plane_state->blend_tf.pwl;
623 	else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
624 		rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf,
625 				&dpp_base->regamma_params, false);
626 		lut_params = rval ? &dpp_base->regamma_params : NULL;
627 	}
628 	result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
629 	lut_params = NULL;
630 
631 	// Shaper
632 	if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
633 		lut_params = &plane_state->in_shaper_func.pwl;
634 	else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
635 		// TODO: dpp_base replace
636 		rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func,
637 				&dpp_base->shaper_params, true);
638 		lut_params = rval ? &dpp_base->shaper_params : NULL;
639 	}
640 	result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
641 
642 	// 3D
643 	if (mpc->funcs->program_3dlut) {
644 		if (plane_state->lut3d_func.state.bits.initialized == 1)
645 			result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
646 		else
647 			result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
648 	}
649 
650 	return result;
651 }
652 
dcn401_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)653 bool dcn401_set_output_transfer_func(struct dc *dc,
654 				struct pipe_ctx *pipe_ctx,
655 				const struct dc_stream_state *stream)
656 {
657 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
658 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
659 	const struct pwl_params *params = NULL;
660 	bool ret = false;
661 
662 	/* program OGAM or 3DLUT only for the top pipe*/
663 	if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) {
664 		/*program shaper and 3dlut in MPC*/
665 		ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream);
666 		if (ret == false && mpc->funcs->set_output_gamma) {
667 			if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
668 				params = &stream->out_transfer_func.pwl;
669 			else if (pipe_ctx->stream->out_transfer_func.type ==
670 					TF_TYPE_DISTRIBUTED_POINTS &&
671 					cm3_helper_translate_curve_to_hw_format(
672 					&stream->out_transfer_func,
673 					&mpc->blender_params, false))
674 				params = &mpc->blender_params;
675 			/* there are no ROM LUTs in OUTGAM */
676 			if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
677 				BREAK_TO_DEBUGGER();
678 		}
679 	}
680 
681 	if (mpc->funcs->set_output_gamma)
682 		mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
683 
684 	return ret;
685 }
686 
dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx * pipe_ctx,unsigned int * tmds_div)687 void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx,
688 				unsigned int *tmds_div)
689 {
690 	struct dc_stream_state *stream = pipe_ctx->stream;
691 
692 	if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
693 		if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
694 			*tmds_div = PIXEL_RATE_DIV_BY_2;
695 		else
696 			*tmds_div = PIXEL_RATE_DIV_BY_4;
697 	} else {
698 		*tmds_div = PIXEL_RATE_DIV_BY_1;
699 	}
700 
701 	if (*tmds_div == PIXEL_RATE_DIV_NA)
702 		ASSERT(false);
703 
704 }
705 
enable_stream_timing_calc(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc,unsigned int * tmds_div,int * opp_inst,int * opp_cnt,struct pipe_ctx * opp_heads[MAX_PIPES],bool * manual_mode,struct drr_params * params,unsigned int * event_triggers)706 static void enable_stream_timing_calc(
707 		struct pipe_ctx *pipe_ctx,
708 		struct dc_state *context,
709 		struct dc *dc,
710 		unsigned int *tmds_div,
711 		int *opp_inst,
712 		int *opp_cnt,
713 		struct pipe_ctx *opp_heads[MAX_PIPES],
714 		bool *manual_mode,
715 		struct drr_params *params,
716 		unsigned int *event_triggers)
717 {
718 	struct dc_stream_state *stream = pipe_ctx->stream;
719 	int i;
720 
721 	if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal))
722 		dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
723 
724 	*opp_cnt = resource_get_opp_heads_for_otg_master(pipe_ctx, &context->res_ctx, opp_heads);
725 	for (i = 0; i < *opp_cnt; i++)
726 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
727 
728 	if (dc_is_tmds_signal(stream->signal)) {
729 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
730 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
731 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
732 		else
733 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
734 	}
735 
736 	params->vertical_total_min = stream->adjust.v_total_min;
737 	params->vertical_total_max = stream->adjust.v_total_max;
738 	params->vertical_total_mid = stream->adjust.v_total_mid;
739 	params->vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
740 
741 	// DRR should set trigger event to monitor surface update event
742 	if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
743 		*event_triggers = 0x80;
744 }
745 
dcn401_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)746 enum dc_status dcn401_enable_stream_timing(
747 		struct pipe_ctx *pipe_ctx,
748 		struct dc_state *context,
749 		struct dc *dc)
750 {
751 	struct dce_hwseq *hws = dc->hwseq;
752 	struct dc_stream_state *stream = pipe_ctx->stream;
753 	struct drr_params params = {0};
754 	unsigned int event_triggers = 0;
755 	int opp_cnt = 1;
756 	int opp_inst[MAX_PIPES] = {0};
757 	struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
758 	struct dc_crtc_timing patched_crtc_timing = stream->timing;
759 	bool manual_mode = false;
760 	unsigned int tmds_div = PIXEL_RATE_DIV_NA;
761 	unsigned int unused_div = PIXEL_RATE_DIV_NA;
762 	int odm_slice_width;
763 	int last_odm_slice_width;
764 	int i;
765 
766 	if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
767 		return DC_OK;
768 
769 	enable_stream_timing_calc(pipe_ctx, context, dc, &tmds_div, opp_inst,
770 			&opp_cnt, opp_heads, &manual_mode, &params, &event_triggers);
771 
772 	if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
773 		dc->res_pool->dccg->funcs->set_pixel_rate_div(
774 			dc->res_pool->dccg, pipe_ctx->stream_res.tg->inst,
775 			tmds_div, unused_div);
776 	}
777 
778 	/* TODO check if timing_changed, disable stream if timing changed */
779 
780 	if (opp_cnt > 1) {
781 		odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
782 		last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
783 		pipe_ctx->stream_res.tg->funcs->set_odm_combine(
784 				pipe_ctx->stream_res.tg,
785 				opp_inst, opp_cnt,
786 				odm_slice_width, last_odm_slice_width);
787 	}
788 
789 	/* set DTBCLK_P */
790 	if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) {
791 		if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
792 			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst);
793 		}
794 	}
795 
796 	/* HW program guide assume display already disable
797 	 * by unplug sequence. OTG assume stop.
798 	 */
799 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
800 
801 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
802 			pipe_ctx->clock_source,
803 			&pipe_ctx->stream_res.pix_clk_params,
804 			dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
805 			&pipe_ctx->pll_settings)) {
806 		BREAK_TO_DEBUGGER();
807 		return DC_ERROR_UNEXPECTED;
808 	}
809 
810 	if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
811 		dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
812 
813 	/* if we are borrowing from hblank, h_addressable needs to be adjusted */
814 	if (dc->debug.enable_hblank_borrow)
815 		patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow;
816 
817 	pipe_ctx->stream_res.tg->funcs->program_timing(
818 		pipe_ctx->stream_res.tg,
819 		&patched_crtc_timing,
820 		(unsigned int)pipe_ctx->global_sync.dcn4x.vready_offset_pixels,
821 		(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
822 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
823 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
824 		(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines,
825 		pipe_ctx->stream->signal,
826 		true);
827 
828 	for (i = 0; i < opp_cnt; i++) {
829 		opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
830 				opp_heads[i]->stream_res.opp,
831 				true);
832 		opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
833 				opp_heads[i]->stream_res.opp,
834 				stream->timing.pixel_encoding,
835 				resource_is_pipe_type(opp_heads[i], OTG_MASTER));
836 	}
837 
838 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
839 			pipe_ctx->stream_res.opp,
840 			true);
841 
842 	hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
843 
844 	/* VTG is  within DCHUB command block. DCFCLK is always on */
845 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
846 		BREAK_TO_DEBUGGER();
847 		return DC_ERROR_UNEXPECTED;
848 	}
849 
850 	hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
851 	set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
852 
853 	/* Event triggers and num frames initialized for DRR, but can be
854 	 * later updated for PSR use. Note DRR trigger events are generated
855 	 * regardless of whether num frames met.
856 	 */
857 	if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
858 		pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
859 				pipe_ctx->stream_res.tg, event_triggers, 2);
860 
861 	/* TODO program crtc source select for non-virtual signal*/
862 	/* TODO program FMT */
863 	/* TODO setup link_enc */
864 	/* TODO set stream attributes */
865 	/* TODO program audio */
866 	/* TODO enable stream if timing changed */
867 	/* TODO unblank stream if DP */
868 
869 	if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
870 		if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
871 			pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
872 	}
873 
874 	return DC_OK;
875 }
876 
get_phyd32clk_src(struct dc_link * link)877 static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
878 {
879 	switch (link->link_enc->transmitter) {
880 	case TRANSMITTER_UNIPHY_A:
881 		return PHYD32CLKA;
882 	case TRANSMITTER_UNIPHY_B:
883 		return PHYD32CLKB;
884 	case TRANSMITTER_UNIPHY_C:
885 		return PHYD32CLKC;
886 	case TRANSMITTER_UNIPHY_D:
887 		return PHYD32CLKD;
888 	case TRANSMITTER_UNIPHY_E:
889 		return PHYD32CLKE;
890 	default:
891 		return PHYD32CLKA;
892 	}
893 }
894 
dcn401_enable_stream_calc(struct pipe_ctx * pipe_ctx,int * dp_hpo_inst,enum phyd32clk_clock_source * phyd32clk,unsigned int * tmds_div,uint32_t * early_control)895 static void dcn401_enable_stream_calc(
896 		struct pipe_ctx *pipe_ctx,
897 		int *dp_hpo_inst,
898 		enum phyd32clk_clock_source *phyd32clk,
899 		unsigned int *tmds_div,
900 		uint32_t *early_control)
901 {
902 
903 	struct dc *dc = pipe_ctx->stream->ctx->dc;
904 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
905 	enum dc_lane_count lane_count =
906 			pipe_ctx->stream->link->cur_link_settings.lane_count;
907 	uint32_t active_total_with_borders;
908 
909 	if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx))
910 		*dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
911 
912 	*phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
913 
914 	if (dc_is_tmds_signal(pipe_ctx->stream->signal))
915 		dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
916 	else
917 		*tmds_div = PIXEL_RATE_DIV_BY_1;
918 
919 	/* enable early control to avoid corruption on DP monitor*/
920 	active_total_with_borders =
921 			timing->h_addressable
922 				+ timing->h_border_left
923 				+ timing->h_border_right;
924 
925 	if (lane_count != 0)
926 		*early_control = active_total_with_borders % lane_count;
927 
928 	if (*early_control == 0)
929 		*early_control = lane_count;
930 
931 }
932 
dcn401_enable_stream(struct pipe_ctx * pipe_ctx)933 void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
934 {
935 	uint32_t early_control = 0;
936 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
937 	struct dc_link *link = pipe_ctx->stream->link;
938 	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
939 	struct dc *dc = pipe_ctx->stream->ctx->dc;
940 	struct dccg *dccg = dc->res_pool->dccg;
941 	enum phyd32clk_clock_source phyd32clk;
942 	int dp_hpo_inst = 0;
943 	unsigned int tmds_div = PIXEL_RATE_DIV_NA;
944 	unsigned int unused_div = PIXEL_RATE_DIV_NA;
945 	struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
946 	struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
947 
948 	if (!dc->config.unify_link_enc_assignment)
949 		link_enc = link_enc_cfg_get_link_enc(link);
950 
951 	dcn401_enable_stream_calc(pipe_ctx, &dp_hpo_inst, &phyd32clk,
952 				&tmds_div, &early_control);
953 
954 	if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
955 		if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
956 			dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst);
957 			if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) {
958 				dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
959 			} else {
960 				dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
961 			}
962 		} else {
963 			dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
964 					link_enc->transmitter - TRANSMITTER_UNIPHY_A);
965 		}
966 	}
967 
968 	if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
969 		dc->res_pool->dccg->funcs->set_pixel_rate_div(
970 			dc->res_pool->dccg,
971 			pipe_ctx->stream_res.tg->inst,
972 			tmds_div,
973 			unused_div);
974 	}
975 
976 	link_hwss->setup_stream_encoder(pipe_ctx);
977 
978 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
979 		if (dc->hwss.program_dmdata_engine)
980 			dc->hwss.program_dmdata_engine(pipe_ctx);
981 	}
982 
983 	dc->hwss.update_info_frame(pipe_ctx);
984 
985 	if (dc_is_dp_signal(pipe_ctx->stream->signal))
986 		dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
987 
988 	tg->funcs->set_early_control(tg, early_control);
989 }
990 
dcn401_setup_hpo_hw_control(const struct dce_hwseq * hws,bool enable)991 void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
992 {
993 	REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, enable);
994 }
995 
adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width,struct dc_cursor_position * pos_cpy)996 void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy)
997 {
998 	if (cursor_width <= 128) {
999 		pos_cpy->x_hotspot /= 2;
1000 		pos_cpy->x_hotspot += 1;
1001 	} else {
1002 		pos_cpy->x_hotspot /= 2;
1003 		pos_cpy->x_hotspot += 2;
1004 	}
1005 }
1006 
disable_link_output_symclk_on_tx_off(struct dc_link * link,enum dp_link_encoding link_encoding)1007 static void disable_link_output_symclk_on_tx_off(struct dc_link *link, enum dp_link_encoding link_encoding)
1008 {
1009 	struct dc *dc = link->ctx->dc;
1010 	struct pipe_ctx *pipe_ctx = NULL;
1011 	uint8_t i;
1012 
1013 	for (i = 0; i < MAX_PIPES; i++) {
1014 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1015 		if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
1016 			pipe_ctx->clock_source->funcs->program_pix_clk(
1017 					pipe_ctx->clock_source,
1018 					&pipe_ctx->stream_res.pix_clk_params,
1019 					link_encoding,
1020 					&pipe_ctx->pll_settings);
1021 			break;
1022 		}
1023 	}
1024 }
1025 
dcn401_disable_link_output(struct dc_link * link,const struct link_resource * link_res,enum signal_type signal)1026 void dcn401_disable_link_output(struct dc_link *link,
1027 		const struct link_resource *link_res,
1028 		enum signal_type signal)
1029 {
1030 	struct dc *dc = link->ctx->dc;
1031 	const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
1032 	struct dmcu *dmcu = dc->res_pool->dmcu;
1033 
1034 	if (signal == SIGNAL_TYPE_EDP &&
1035 			link->dc->hwss.edp_backlight_control &&
1036 			!link->skip_implict_edp_power_control)
1037 		link->dc->hwss.edp_backlight_control(link, false);
1038 	else if (dmcu != NULL && dmcu->funcs->lock_phy)
1039 		dmcu->funcs->lock_phy(dmcu);
1040 
1041 	if (dc_is_tmds_signal(signal) && link->phy_state.symclk_ref_cnts.otg > 0) {
1042 		disable_link_output_symclk_on_tx_off(link, DP_UNKNOWN_ENCODING);
1043 		link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
1044 	} else {
1045 		link_hwss->disable_link_output(link, link_res, signal);
1046 		link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
1047 	}
1048 
1049 	if (signal == SIGNAL_TYPE_EDP &&
1050 			link->dc->hwss.edp_backlight_control &&
1051 			!link->skip_implict_edp_power_control)
1052 		link->dc->hwss.edp_power_control(link, false);
1053 	else if (dmcu != NULL && dmcu->funcs->lock_phy)
1054 		dmcu->funcs->unlock_phy(dmcu);
1055 
1056 	dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
1057 }
1058 
dcn401_set_cursor_position(struct pipe_ctx * pipe_ctx)1059 void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
1060 {
1061 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
1062 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1063 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1064 	struct dc_cursor_mi_param param = {
1065 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
1066 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
1067 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
1068 		.recout = pipe_ctx->plane_res.scl_data.recout,
1069 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
1070 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
1071 		.rotation = pipe_ctx->plane_state->rotation,
1072 		.mirror = pipe_ctx->plane_state->horizontal_mirror,
1073 		.stream = pipe_ctx->stream
1074 	};
1075 	struct rect odm_slice_src = { 0 };
1076 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
1077 		(pipe_ctx->prev_odm_pipe != NULL);
1078 	int prev_odm_width = 0;
1079 	struct pipe_ctx *prev_odm_pipe = NULL;
1080 	bool mpc_combine_on = false;
1081 	int  bottom_pipe_x_pos = 0;
1082 
1083 	int x_pos = pos_cpy.x;
1084 	int y_pos = pos_cpy.y;
1085 	int recout_x_pos = 0;
1086 	int recout_y_pos = 0;
1087 
1088 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
1089 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
1090 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
1091 			mpc_combine_on = true;
1092 		}
1093 	}
1094 
1095 	/* DCN4 moved cursor composition after Scaler, so in HW it is in
1096 	 * recout space and for HW Cursor position programming need to
1097 	 * translate to recout space.
1098 	 *
1099 	 * Cursor X and Y position programmed into HW can't be negative,
1100 	 * in fact it is X, Y coordinate shifted for the HW Cursor Hot spot
1101 	 * position that goes into HW X and Y coordinates while HW Hot spot
1102 	 * X and Y coordinates are length relative to the cursor top left
1103 	 * corner, hotspot must be smaller than the cursor size.
1104 	 *
1105 	 * DMs/DC interface for Cursor position is in stream->src space, and
1106 	 * DMs supposed to transform Cursor coordinates to stream->src space,
1107 	 * then here we need to translate Cursor coordinates to stream->dst
1108 	 * space, as now in HW, Cursor coordinates are in per pipe recout
1109 	 * space, and for the given pipe valid coordinates are only in range
1110 	 * from 0,0 - recout width, recout height space.
1111 	 * If certain pipe combining is in place, need to further adjust per
1112 	 * pipe to make sure each pipe enabling cursor on its part of the
1113 	 * screen.
1114 	 */
1115 	x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width /
1116 		pipe_ctx->stream->src.width;
1117 	y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height /
1118 		pipe_ctx->stream->src.height;
1119 
1120 	/* If the cursor's source viewport is clipped then we need to
1121 	 * translate the cursor to appear in the correct position on
1122 	 * the screen.
1123 	 *
1124 	 * This translation isn't affected by scaling so it needs to be
1125 	 * done *after* we adjust the position for the scale factor.
1126 	 *
1127 	 * This is only done by opt-in for now since there are still
1128 	 * some usecases like tiled display that might enable the
1129 	 * cursor on both streams while expecting dc to clip it.
1130 	 */
1131 	if (pos_cpy.translate_by_source) {
1132 		x_pos += pipe_ctx->plane_state->src_rect.x;
1133 		y_pos += pipe_ctx->plane_state->src_rect.y;
1134 	}
1135 
1136 	/* Adjust for ODM Combine
1137 	 * next/prev_odm_offset is to account for scaled modes that have underscan
1138 	 */
1139 	if (odm_combine_on) {
1140 		prev_odm_pipe = pipe_ctx->prev_odm_pipe;
1141 
1142 		while (prev_odm_pipe != NULL) {
1143 			odm_slice_src = resource_get_odm_slice_src_rect(prev_odm_pipe);
1144 			prev_odm_width += odm_slice_src.width;
1145 			prev_odm_pipe = prev_odm_pipe->prev_odm_pipe;
1146 		}
1147 
1148 		x_pos -= (prev_odm_width);
1149 	}
1150 
1151 	/* If the position is negative then we need to add to the hotspot
1152 	 * to fix cursor size between ODM slices
1153 	 */
1154 
1155 	if (x_pos < 0) {
1156 		pos_cpy.x_hotspot -= x_pos;
1157 		if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1158 			adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1159 		x_pos = 0;
1160 	}
1161 
1162 	if (y_pos < 0) {
1163 		pos_cpy.y_hotspot -= y_pos;
1164 		y_pos = 0;
1165 	}
1166 
1167 	/* If the position on bottom MPC pipe is negative then we need to add to the hotspot and
1168 	 * adjust x_pos on bottom pipe to make cursor visible when crossing between MPC slices.
1169 	 */
1170 	if (mpc_combine_on &&
1171 		pipe_ctx->top_pipe &&
1172 		(pipe_ctx == pipe_ctx->top_pipe->bottom_pipe)) {
1173 
1174 		bottom_pipe_x_pos = x_pos - pipe_ctx->plane_res.scl_data.recout.x;
1175 		if (bottom_pipe_x_pos < 0) {
1176 			x_pos = pipe_ctx->plane_res.scl_data.recout.x;
1177 			pos_cpy.x_hotspot -= bottom_pipe_x_pos;
1178 			if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1179 				adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1180 		}
1181 	}
1182 
1183 	pos_cpy.x = (uint32_t)x_pos;
1184 	pos_cpy.y = (uint32_t)y_pos;
1185 
1186 	if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
1187 		pos_cpy.enable = false;
1188 
1189 	x_pos = pos_cpy.x - param.recout.x;
1190 	y_pos = pos_cpy.y - param.recout.y;
1191 
1192 	recout_x_pos = x_pos - pos_cpy.x_hotspot;
1193 	recout_y_pos = y_pos - pos_cpy.y_hotspot;
1194 
1195 	if (recout_x_pos >= (int)param.recout.width)
1196 		pos_cpy.enable = false;  /* not visible beyond right edge*/
1197 
1198 	if (recout_y_pos >= (int)param.recout.height)
1199 		pos_cpy.enable = false;  /* not visible beyond bottom edge*/
1200 
1201 	if (recout_x_pos + (int)hubp->curs_attr.width <= 0)
1202 		pos_cpy.enable = false;  /* not visible beyond left edge*/
1203 
1204 	if (recout_y_pos + (int)hubp->curs_attr.height <= 0)
1205 		pos_cpy.enable = false;  /* not visible beyond top edge*/
1206 
1207 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
1208 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
1209 }
1210 
dcn401_check_no_memory_request_for_cab(struct dc * dc)1211 static bool dcn401_check_no_memory_request_for_cab(struct dc *dc)
1212 {
1213 	int i;
1214 
1215 	/* First, check no-memory-request case */
1216 	for (i = 0; i < dc->current_state->stream_count; i++) {
1217 		if ((dc->current_state->stream_status[i].plane_count) &&
1218 			(dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED))
1219 			/* Fail eligibility on a visible stream */
1220 			return false;
1221 	}
1222 
1223 	return true;
1224 }
1225 
dcn401_calculate_cab_allocation(struct dc * dc,struct dc_state * ctx)1226 static uint32_t dcn401_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
1227 {
1228 	int i;
1229 	uint8_t num_ways = 0;
1230 	uint32_t mall_ss_size_bytes = 0;
1231 
1232 	mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
1233 	// TODO add additional logic for PSR active stream exclusion optimization
1234 	// mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;
1235 
1236 	// Include cursor size for CAB allocation
1237 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1238 		struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i];
1239 
1240 		if (!pipe->stream || !pipe->plane_state)
1241 			continue;
1242 
1243 		mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false);
1244 	}
1245 
1246 	// Convert number of cache lines required to number of ways
1247 	if (dc->debug.force_mall_ss_num_ways > 0)
1248 		num_ways = dc->debug.force_mall_ss_num_ways;
1249 	else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes)
1250 		num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes);
1251 	else
1252 		num_ways = 0;
1253 
1254 	return num_ways;
1255 }
1256 
dcn401_apply_idle_power_optimizations(struct dc * dc,bool enable)1257 bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
1258 {
1259 	union dmub_rb_cmd cmd;
1260 	uint8_t ways, i;
1261 	int j;
1262 	bool mall_ss_unsupported = false;
1263 	struct dc_plane_state *plane = NULL;
1264 
1265 	if (!dc->ctx->dmub_srv || !dc->current_state)
1266 		return false;
1267 
1268 	for (i = 0; i < dc->current_state->stream_count; i++) {
1269 		/* MALL SS messaging is not supported with PSR at this time */
1270 		if (dc->current_state->streams[i] != NULL &&
1271 				dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
1272 			DC_LOG_MALL("MALL SS not supported with PSR at this time\n");
1273 			return false;
1274 		}
1275 	}
1276 
1277 	memset(&cmd, 0, sizeof(cmd));
1278 	cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
1279 	cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
1280 
1281 	if (enable) {
1282 		if (dcn401_check_no_memory_request_for_cab(dc)) {
1283 			/* 1. Check no memory request case for CAB.
1284 			 * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message
1285 			 */
1286 			DC_LOG_MALL("sending CAB action NO_DCN_REQ\n");
1287 			cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
1288 		} else {
1289 			/* 2. Check if all surfaces can fit in CAB.
1290 			 * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message
1291 			 * and configure HUBP's to fetch from MALL
1292 			 */
1293 			ways = dcn401_calculate_cab_allocation(dc, dc->current_state);
1294 
1295 			/* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
1296 			 * or TMZ surface, don't try to enter MALL.
1297 			 */
1298 			for (i = 0; i < dc->current_state->stream_count; i++) {
1299 				for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
1300 					plane = dc->current_state->stream_status[i].plane_states[j];
1301 
1302 					if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
1303 							plane->address.tmz_surface) {
1304 						mall_ss_unsupported = true;
1305 						break;
1306 					}
1307 				}
1308 				if (mall_ss_unsupported)
1309 					break;
1310 			}
1311 			if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
1312 				cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
1313 				cmd.cab.cab_alloc_ways = ways;
1314 				DC_LOG_MALL("cab allocation: %d ways. CAB action: DCN_SS_FIT_IN_CAB\n", ways);
1315 			} else {
1316 				cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB;
1317 				DC_LOG_MALL("frame does not fit in CAB: %d ways required. CAB action: DCN_SS_NOT_FIT_IN_CAB\n", ways);
1318 			}
1319 		}
1320 	} else {
1321 		/* Disable CAB */
1322 		cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION;
1323 		DC_LOG_MALL("idle optimization disabled\n");
1324 	}
1325 
1326 	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1327 
1328 	return true;
1329 }
1330 
dcn401_wait_for_dcc_meta_propagation(const struct dc * dc,const struct pipe_ctx * top_pipe)1331 void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
1332 		const struct pipe_ctx *top_pipe)
1333 {
1334 	bool is_wait_needed = false;
1335 	const struct pipe_ctx *pipe_ctx = top_pipe;
1336 
1337 	/* check if any surfaces are updating address while using flip immediate and dcc */
1338 	while (pipe_ctx != NULL) {
1339 		if (pipe_ctx->plane_state &&
1340 				pipe_ctx->plane_state->dcc.enable &&
1341 				pipe_ctx->plane_state->flip_immediate &&
1342 				pipe_ctx->plane_state->update_flags.bits.addr_update) {
1343 			is_wait_needed = true;
1344 			break;
1345 		}
1346 
1347 		/* check next pipe */
1348 		pipe_ctx = pipe_ctx->bottom_pipe;
1349 	}
1350 
1351 	if (is_wait_needed && dc->debug.dcc_meta_propagation_delay_us > 0) {
1352 		udelay(dc->debug.dcc_meta_propagation_delay_us);
1353 	}
1354 }
1355 
dcn401_prepare_bandwidth(struct dc * dc,struct dc_state * context)1356 void dcn401_prepare_bandwidth(struct dc *dc,
1357 	struct dc_state *context)
1358 {
1359 	struct hubbub *hubbub = dc->res_pool->hubbub;
1360 	bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
1361 	unsigned int compbuf_size = 0;
1362 
1363 	/* Any transition into P-State support should disable MCLK switching first to avoid hangs */
1364 	if (p_state_change_support) {
1365 		dc->optimized_required = true;
1366 		context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
1367 	}
1368 
1369 	if (dc->clk_mgr->dc_mode_softmax_enabled)
1370 		if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1371 				context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1372 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
1373 
1374 	/* Increase clocks */
1375 	dc->clk_mgr->funcs->update_clocks(
1376 			dc->clk_mgr,
1377 			context,
1378 			false);
1379 
1380 	/* program dchubbub watermarks:
1381 	 * For assigning wm_optimized_required, use |= operator since we don't want
1382 	 * to clear the value if the optimize has not happened yet
1383 	 */
1384 	dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
1385 					&context->bw_ctx.bw.dcn.watermarks,
1386 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1387 					false);
1388 	/* update timeout thresholds */
1389 	if (hubbub->funcs->program_arbiter) {
1390 		dc->wm_optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
1391 	}
1392 
1393 	/* decrease compbuf size */
1394 	if (hubbub->funcs->program_compbuf_segments) {
1395 		compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
1396 		dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
1397 
1398 		hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
1399 	}
1400 
1401 	if (dc->debug.fams2_config.bits.enable) {
1402 		dcn401_fams2_global_control_lock(dc, context, true);
1403 		dcn401_fams2_update_config(dc, context, false);
1404 		dcn401_fams2_global_control_lock(dc, context, false);
1405 	}
1406 
1407 	if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
1408 		/* After disabling P-State, restore the original value to ensure we get the correct P-State
1409 		 * on the next optimize. */
1410 		context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
1411 	}
1412 }
1413 
dcn401_optimize_bandwidth(struct dc * dc,struct dc_state * context)1414 void dcn401_optimize_bandwidth(
1415 		struct dc *dc,
1416 		struct dc_state *context)
1417 {
1418 	int i;
1419 	struct hubbub *hubbub = dc->res_pool->hubbub;
1420 
1421 	/* enable fams2 if needed */
1422 	if (dc->debug.fams2_config.bits.enable) {
1423 		dcn401_fams2_global_control_lock(dc, context, true);
1424 		dcn401_fams2_update_config(dc, context, true);
1425 		dcn401_fams2_global_control_lock(dc, context, false);
1426 	}
1427 
1428 	/* program dchubbub watermarks */
1429 	hubbub->funcs->program_watermarks(hubbub,
1430 					&context->bw_ctx.bw.dcn.watermarks,
1431 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1432 					true);
1433 	/* update timeout thresholds */
1434 	if (hubbub->funcs->program_arbiter) {
1435 		hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, true);
1436 	}
1437 
1438 	if (dc->clk_mgr->dc_mode_softmax_enabled)
1439 		if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1440 				context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1441 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
1442 
1443 	/* increase compbuf size */
1444 	if (hubbub->funcs->program_compbuf_segments)
1445 		hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1446 
1447 	dc->clk_mgr->funcs->update_clocks(
1448 			dc->clk_mgr,
1449 			context,
1450 			true);
1451 	if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
1452 		for (i = 0; i < dc->res_pool->pipe_count; ++i) {
1453 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1454 
1455 			if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
1456 				&& pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
1457 				&& pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
1458 					pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
1459 						pipe_ctx->dlg_regs.min_dst_y_next_start);
1460 		}
1461 	}
1462 }
1463 
dcn401_fams2_global_control_lock(struct dc * dc,struct dc_state * context,bool lock)1464 void dcn401_fams2_global_control_lock(struct dc *dc,
1465 		struct dc_state *context,
1466 		bool lock)
1467 {
1468 	/* use always for now */
1469 	union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1470 
1471 	if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
1472 		return;
1473 
1474 	hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1475 	hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1476 	hw_lock_cmd.bits.lock = lock;
1477 	hw_lock_cmd.bits.should_release = !lock;
1478 	dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1479 }
1480 
dcn401_fams2_global_control_lock_fast(union block_sequence_params * params)1481 void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params)
1482 {
1483 	struct dc *dc = params->fams2_global_control_lock_fast_params.dc;
1484 	bool lock = params->fams2_global_control_lock_fast_params.lock;
1485 
1486 	if (params->fams2_global_control_lock_fast_params.is_required) {
1487 		union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1488 
1489 		hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1490 		hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1491 		hw_lock_cmd.bits.lock = lock;
1492 		hw_lock_cmd.bits.should_release = !lock;
1493 		dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1494 	}
1495 }
1496 
dcn401_fams2_update_config(struct dc * dc,struct dc_state * context,bool enable)1497 void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable)
1498 {
1499 	bool fams2_required;
1500 
1501 	if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
1502 		return;
1503 
1504 	fams2_required = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
1505 
1506 	dc_dmub_srv_fams2_update_config(dc, context, enable && fams2_required);
1507 }
1508 
update_dsc_for_odm_change(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1509 static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context,
1510 		struct pipe_ctx *otg_master)
1511 {
1512 	int i;
1513 	struct pipe_ctx *old_pipe;
1514 	struct pipe_ctx *new_pipe;
1515 	struct pipe_ctx *old_opp_heads[MAX_PIPES];
1516 	struct pipe_ctx *old_otg_master;
1517 	int old_opp_head_count = 0;
1518 
1519 	old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
1520 
1521 	if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
1522 		old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
1523 									   &dc->current_state->res_ctx,
1524 									   old_opp_heads);
1525 	} else {
1526 		// DC cannot assume that the current state and the new state
1527 		// share the same OTG pipe since this is not true when called
1528 		// in the context of a commit stream not checked. Hence, set
1529 		// old_otg_master to NULL to skip the DSC configuration.
1530 		old_otg_master = NULL;
1531 	}
1532 
1533 
1534 	if (otg_master->stream_res.dsc)
1535 		dcn32_update_dsc_on_stream(otg_master,
1536 				otg_master->stream->timing.flags.DSC);
1537 	if (old_otg_master && old_otg_master->stream_res.dsc) {
1538 		for (i = 0; i < old_opp_head_count; i++) {
1539 			old_pipe = old_opp_heads[i];
1540 			new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
1541 			if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc)
1542 				old_pipe->stream_res.dsc->funcs->dsc_disconnect(
1543 						old_pipe->stream_res.dsc);
1544 		}
1545 	}
1546 }
1547 
dcn401_update_odm(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1548 void dcn401_update_odm(struct dc *dc, struct dc_state *context,
1549 		struct pipe_ctx *otg_master)
1550 {
1551 	struct pipe_ctx *opp_heads[MAX_PIPES];
1552 	int opp_inst[MAX_PIPES] = {0};
1553 	int opp_head_count;
1554 	int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
1555 	int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
1556 	int i;
1557 
1558 	opp_head_count = resource_get_opp_heads_for_otg_master(
1559 			otg_master, &context->res_ctx, opp_heads);
1560 
1561 	for (i = 0; i < opp_head_count; i++)
1562 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
1563 	if (opp_head_count > 1)
1564 		otg_master->stream_res.tg->funcs->set_odm_combine(
1565 				otg_master->stream_res.tg,
1566 				opp_inst, opp_head_count,
1567 				odm_slice_width, last_odm_slice_width);
1568 	else
1569 		otg_master->stream_res.tg->funcs->set_odm_bypass(
1570 				otg_master->stream_res.tg,
1571 				&otg_master->stream->timing);
1572 
1573 	for (i = 0; i < opp_head_count; i++) {
1574 		opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
1575 				opp_heads[i]->stream_res.opp,
1576 				true);
1577 		opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
1578 				opp_heads[i]->stream_res.opp,
1579 				opp_heads[i]->stream->timing.pixel_encoding,
1580 				resource_is_pipe_type(opp_heads[i], OTG_MASTER));
1581 	}
1582 
1583 	update_dsc_for_odm_change(dc, context, otg_master);
1584 
1585 	if (!resource_is_pipe_type(otg_master, DPP_PIPE))
1586 		/*
1587 		 * blank pattern is generated by OPP, reprogram blank pattern
1588 		 * due to OPP count change
1589 		 */
1590 		dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
1591 }
1592 
dcn401_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)1593 void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
1594 		struct dc_link_settings *link_settings)
1595 {
1596 	struct encoder_unblank_param params = {0};
1597 	struct dc_stream_state *stream = pipe_ctx->stream;
1598 	struct dc_link *link = stream->link;
1599 	struct dce_hwseq *hws = link->dc->hwseq;
1600 
1601 	/* calculate parameters for unblank */
1602 	params.opp_cnt = resource_get_odm_slice_count(pipe_ctx);
1603 
1604 	params.timing = pipe_ctx->stream->timing;
1605 	params.link_settings.link_rate = link_settings->link_rate;
1606 	params.pix_per_cycle = pipe_ctx->stream_res.pix_clk_params.dio_se_pix_per_cycle;
1607 
1608 	if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
1609 		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
1610 				pipe_ctx->stream_res.hpo_dp_stream_enc,
1611 				pipe_ctx->stream_res.tg->inst);
1612 	} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
1613 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
1614 	}
1615 
1616 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
1617 		hws->funcs.edp_backlight_control(link, true);
1618 }
1619 
dcn401_hardware_release(struct dc * dc)1620 void dcn401_hardware_release(struct dc *dc)
1621 {
1622 	dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
1623 
1624 	/* If pstate unsupported, or still supported
1625 	 * by firmware, force it supported by dcn
1626 	 */
1627 	if (dc->current_state) {
1628 		if ((!dc->clk_mgr->clks.p_state_change_support ||
1629 				dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
1630 				dc->res_pool->hubbub->funcs->force_pstate_change_control)
1631 			dc->res_pool->hubbub->funcs->force_pstate_change_control(
1632 					dc->res_pool->hubbub, true, true);
1633 
1634 		dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
1635 		dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
1636 	}
1637 }
1638 
dcn401_wait_for_det_buffer_update_under_otg_master(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1639 void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
1640 {
1641 	struct pipe_ctx *opp_heads[MAX_PIPES];
1642 	struct pipe_ctx *dpp_pipes[MAX_PIPES];
1643 	struct hubbub *hubbub = dc->res_pool->hubbub;
1644 	int dpp_count = 0;
1645 
1646 	if (!otg_master->stream)
1647 		return;
1648 
1649 	int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
1650 			&context->res_ctx, opp_heads);
1651 
1652 	for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
1653 		if (opp_heads[slice_idx]->plane_state) {
1654 			dpp_count = resource_get_dpp_pipes_for_opp_head(
1655 					opp_heads[slice_idx],
1656 					&context->res_ctx,
1657 					dpp_pipes);
1658 			for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
1659 				struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx];
1660 					if (dpp_pipe && hubbub &&
1661 						dpp_pipe->plane_res.hubp &&
1662 						hubbub->funcs->wait_for_det_update)
1663 						hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst);
1664 			}
1665 		} else {
1666 			if (hubbub && opp_heads[slice_idx]->plane_res.hubp && hubbub->funcs->wait_for_det_update)
1667 				hubbub->funcs->wait_for_det_update(hubbub, opp_heads[slice_idx]->plane_res.hubp->inst);
1668 		}
1669 	}
1670 }
1671 
dcn401_interdependent_update_lock(struct dc * dc,struct dc_state * context,bool lock)1672 void dcn401_interdependent_update_lock(struct dc *dc,
1673 		struct dc_state *context, bool lock)
1674 {
1675 	unsigned int i = 0;
1676 	struct pipe_ctx *pipe = NULL;
1677 	struct timing_generator *tg = NULL;
1678 
1679 	if (lock) {
1680 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1681 			pipe = &context->res_ctx.pipe_ctx[i];
1682 			tg = pipe->stream_res.tg;
1683 
1684 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1685 					!tg->funcs->is_tg_enabled(tg) ||
1686 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
1687 				continue;
1688 			dc->hwss.pipe_control_lock(dc, pipe, true);
1689 		}
1690 	} else {
1691 		/* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/
1692 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1693 			pipe = &context->res_ctx.pipe_ctx[i];
1694 			tg = pipe->stream_res.tg;
1695 
1696 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1697 					!tg->funcs->is_tg_enabled(tg) ||
1698 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1699 				continue;
1700 			}
1701 
1702 			if (dc->scratch.pipes_to_unlock_first[i]) {
1703 				struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1704 				dc->hwss.pipe_control_lock(dc, pipe, false);
1705 				/* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/
1706 				dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe);
1707 			}
1708 		}
1709 
1710 		/* Unlocking the rest of the pipes */
1711 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1712 			if (dc->scratch.pipes_to_unlock_first[i])
1713 				continue;
1714 
1715 			pipe = &context->res_ctx.pipe_ctx[i];
1716 			tg = pipe->stream_res.tg;
1717 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1718 					!tg->funcs->is_tg_enabled(tg) ||
1719 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1720 				continue;
1721 			}
1722 
1723 			dc->hwss.pipe_control_lock(dc, pipe, false);
1724 		}
1725 	}
1726 }
1727 
dcn401_perform_3dlut_wa_unlock(struct pipe_ctx * pipe_ctx)1728 void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
1729 {
1730 	/* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that
1731 	 * HUBP will properly fetch 3DLUT contents after unlock.
1732 	 *
1733 	 * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless
1734 	 * of whether OTG lock is currently being held or not.
1735 	 */
1736 	struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL };
1737 	struct pipe_ctx *odm_pipe, *mpc_pipe;
1738 	int i, wa_pipe_ct = 0;
1739 
1740 	for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) {
1741 		for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) {
1742 			if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src
1743 						== DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM
1744 					&& mpc_pipe->plane_state->mcm_shaper_3dlut_setting
1745 						== DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) {
1746 				wa_pipes[wa_pipe_ct++] = mpc_pipe;
1747 			}
1748 		}
1749 	}
1750 
1751 	if (wa_pipe_ct > 0) {
1752 		if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
1753 			pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true);
1754 
1755 		for (i = 0; i < wa_pipe_ct; ++i) {
1756 			if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
1757 				wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
1758 		}
1759 
1760 		pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
1761 		if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status)
1762 			pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false);
1763 
1764 		for (i = 0; i < wa_pipe_ct; ++i) {
1765 			if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
1766 				wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
1767 		}
1768 
1769 		if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
1770 			pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false);
1771 	} else {
1772 		pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
1773 	}
1774 }
1775 
dcn401_program_outstanding_updates(struct dc * dc,struct dc_state * context)1776 void dcn401_program_outstanding_updates(struct dc *dc,
1777 		struct dc_state *context)
1778 {
1779 	struct hubbub *hubbub = dc->res_pool->hubbub;
1780 
1781 	/* update compbuf if required */
1782 	if (hubbub->funcs->program_compbuf_segments)
1783 		hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1784 }
1785 
dcn401_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1786 void dcn401_reset_back_end_for_pipe(
1787 		struct dc *dc,
1788 		struct pipe_ctx *pipe_ctx,
1789 		struct dc_state *context)
1790 {
1791 	struct dc_link *link = pipe_ctx->stream->link;
1792 	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
1793 
1794 	DC_LOGGER_INIT(dc->ctx->logger);
1795 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1796 		pipe_ctx->stream = NULL;
1797 		return;
1798 	}
1799 
1800 	/* DPMS may already disable or */
1801 	/* dpms_off status is incorrect due to fastboot
1802 	 * feature. When system resume from S4 with second
1803 	 * screen only, the dpms_off would be true but
1804 	 * VBIOS lit up eDP, so check link status too.
1805 	 */
1806 	if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1807 		dc->link_srv->set_dpms_off(pipe_ctx);
1808 	else if (pipe_ctx->stream_res.audio)
1809 		dc->hwss.disable_audio_stream(pipe_ctx);
1810 
1811 	/* free acquired resources */
1812 	if (pipe_ctx->stream_res.audio) {
1813 		/*disable az_endpoint*/
1814 		pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1815 
1816 		/*free audio*/
1817 		if (dc->caps.dynamic_audio == true) {
1818 			/*we have to dynamic arbitrate the audio endpoints*/
1819 			/*we free the resource, need reset is_audio_acquired*/
1820 			update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1821 					pipe_ctx->stream_res.audio, false);
1822 			pipe_ctx->stream_res.audio = NULL;
1823 		}
1824 	}
1825 
1826 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1827 	 * back end share by all pipes and will be disable only when disable
1828 	 * parent pipe.
1829 	 */
1830 	if (pipe_ctx->top_pipe == NULL) {
1831 
1832 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
1833 
1834 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1835 
1836 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1837 		if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
1838 			pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
1839 					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
1840 
1841 		set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
1842 
1843 		/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
1844 		 * the case where the same symclk is shared across multiple otg
1845 		 * instances
1846 		 */
1847 		if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
1848 			link->phy_state.symclk_ref_cnts.otg = 0;
1849 		if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
1850 			link_hwss->disable_link_output(link,
1851 					&pipe_ctx->link_res, pipe_ctx->stream->signal);
1852 			link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
1853 		}
1854 
1855 		/* reset DTBCLK_P */
1856 		if (dc->res_pool->dccg->funcs->set_dtbclk_p_src)
1857 			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst);
1858 	}
1859 
1860 /*
1861  * In case of a dangling plane, setting this to NULL unconditionally
1862  * causes failures during reset hw ctx where, if stream is NULL,
1863  * it is expected that the pipe_ctx pointers to pipes and plane are NULL.
1864  */
1865 	pipe_ctx->stream = NULL;
1866 	pipe_ctx->top_pipe = NULL;
1867 	pipe_ctx->bottom_pipe = NULL;
1868 	pipe_ctx->next_odm_pipe = NULL;
1869 	pipe_ctx->prev_odm_pipe = NULL;
1870 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1871 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1872 }
1873 
dcn401_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1874 void dcn401_reset_hw_ctx_wrap(
1875 		struct dc *dc,
1876 		struct dc_state *context)
1877 {
1878 	int i;
1879 	struct dce_hwseq *hws = dc->hwseq;
1880 
1881 	/* Reset Back End*/
1882 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1883 		struct pipe_ctx *pipe_ctx_old =
1884 			&dc->current_state->res_ctx.pipe_ctx[i];
1885 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1886 
1887 		if (!pipe_ctx_old->stream)
1888 			continue;
1889 
1890 		if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
1891 			continue;
1892 
1893 		if (!pipe_ctx->stream ||
1894 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1895 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1896 
1897 			if (hws->funcs.reset_back_end_for_pipe)
1898 				hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1899 			if (hws->funcs.enable_stream_gating)
1900 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1901 			if (old_clk)
1902 				old_clk->funcs->cs_power_down(old_clk);
1903 		}
1904 	}
1905 }
1906 
dcn401_calculate_vready_offset_for_group(struct pipe_ctx * pipe)1907 static unsigned int dcn401_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
1908 {
1909 	struct pipe_ctx *other_pipe;
1910 	unsigned int vready_offset = pipe->global_sync.dcn4x.vready_offset_pixels;
1911 
1912 	/* Always use the largest vready_offset of all connected pipes */
1913 	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
1914 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
1915 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
1916 	}
1917 	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
1918 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
1919 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
1920 	}
1921 	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
1922 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
1923 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
1924 	}
1925 	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
1926 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
1927 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
1928 	}
1929 
1930 	return vready_offset;
1931 }
1932 
dcn401_program_tg(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dce_hwseq * hws)1933 static void dcn401_program_tg(
1934 	struct dc *dc,
1935 	struct pipe_ctx *pipe_ctx,
1936 	struct dc_state *context,
1937 	struct dce_hwseq *hws)
1938 {
1939 	pipe_ctx->stream_res.tg->funcs->program_global_sync(
1940 		pipe_ctx->stream_res.tg,
1941 		dcn401_calculate_vready_offset_for_group(pipe_ctx),
1942 		(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
1943 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
1944 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
1945 		(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
1946 
1947 	if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
1948 		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
1949 
1950 	pipe_ctx->stream_res.tg->funcs->set_vtg_params(
1951 		pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
1952 
1953 	if (hws->funcs.setup_vupdate_interrupt)
1954 		hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
1955 }
1956 
dcn401_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1957 void dcn401_program_pipe(
1958 	struct dc *dc,
1959 	struct pipe_ctx *pipe_ctx,
1960 	struct dc_state *context)
1961 {
1962 	struct dce_hwseq *hws = dc->hwseq;
1963 
1964 	/* Only need to unblank on top pipe */
1965 	if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
1966 		if (pipe_ctx->update_flags.bits.enable ||
1967 			pipe_ctx->update_flags.bits.odm ||
1968 			pipe_ctx->stream->update_flags.bits.abm_level)
1969 			hws->funcs.blank_pixel_data(dc, pipe_ctx,
1970 				!pipe_ctx->plane_state ||
1971 				!pipe_ctx->plane_state->visible);
1972 	}
1973 
1974 	/* Only update TG on top pipe */
1975 	if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
1976 		&& !pipe_ctx->prev_odm_pipe)
1977 		dcn401_program_tg(dc, pipe_ctx, context, hws);
1978 
1979 	if (pipe_ctx->update_flags.bits.odm)
1980 		hws->funcs.update_odm(dc, context, pipe_ctx);
1981 
1982 	if (pipe_ctx->update_flags.bits.enable) {
1983 		if (hws->funcs.enable_plane)
1984 			hws->funcs.enable_plane(dc, pipe_ctx, context);
1985 		else
1986 			dc->hwss.enable_plane(dc, pipe_ctx, context);
1987 
1988 		if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
1989 			dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
1990 	}
1991 
1992 	if (pipe_ctx->update_flags.bits.det_size) {
1993 		if (dc->res_pool->hubbub->funcs->program_det_size)
1994 			dc->res_pool->hubbub->funcs->program_det_size(
1995 				dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
1996 		if (dc->res_pool->hubbub->funcs->program_det_segments)
1997 			dc->res_pool->hubbub->funcs->program_det_segments(
1998 				dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
1999 	}
2000 
2001 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
2002 	    pipe_ctx->plane_state->update_flags.raw ||
2003 	    pipe_ctx->stream->update_flags.raw))
2004 		dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
2005 
2006 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
2007 		pipe_ctx->plane_state->update_flags.bits.hdr_mult))
2008 		hws->funcs.set_hdr_multiplier(pipe_ctx);
2009 
2010 	if (pipe_ctx->plane_state &&
2011 		(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2012 			pipe_ctx->plane_state->update_flags.bits.gamma_change ||
2013 			pipe_ctx->plane_state->update_flags.bits.lut_3d ||
2014 			pipe_ctx->update_flags.bits.enable))
2015 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2016 
2017 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2018 	 * only do gamma programming for powering on, internal memcmp to avoid
2019 	 * updating on slave planes
2020 	 */
2021 	if (pipe_ctx->update_flags.bits.enable ||
2022 		pipe_ctx->update_flags.bits.plane_changed ||
2023 		pipe_ctx->stream->update_flags.bits.out_tf ||
2024 		(pipe_ctx->plane_state &&
2025 			pipe_ctx->plane_state->update_flags.bits.output_tf_change))
2026 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2027 
2028 	/* If the pipe has been enabled or has a different opp, we
2029 	 * should reprogram the fmt. This deals with cases where
2030 	 * interation between mpc and odm combine on different streams
2031 	 * causes a different pipe to be chosen to odm combine with.
2032 	 */
2033 	if (pipe_ctx->update_flags.bits.enable
2034 		|| pipe_ctx->update_flags.bits.opp_changed) {
2035 
2036 		pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
2037 			pipe_ctx->stream_res.opp,
2038 			COLOR_SPACE_YCBCR601,
2039 			pipe_ctx->stream->timing.display_color_depth,
2040 			pipe_ctx->stream->signal);
2041 
2042 		pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
2043 			pipe_ctx->stream_res.opp,
2044 			&pipe_ctx->stream->bit_depth_params,
2045 			&pipe_ctx->stream->clamping);
2046 	}
2047 
2048 	/* Set ABM pipe after other pipe configurations done */
2049 	if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
2050 		if (pipe_ctx->stream_res.abm) {
2051 			dc->hwss.set_pipe(pipe_ctx);
2052 			pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
2053 				pipe_ctx->stream->abm_level);
2054 		}
2055 	}
2056 
2057 	if (pipe_ctx->update_flags.bits.test_pattern_changed) {
2058 		struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
2059 		struct bit_depth_reduction_params params;
2060 
2061 		memset(&params, 0, sizeof(params));
2062 		odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
2063 		dc->hwss.set_disp_pattern_generator(dc,
2064 			pipe_ctx,
2065 			pipe_ctx->stream_res.test_pattern_params.test_pattern,
2066 			pipe_ctx->stream_res.test_pattern_params.color_space,
2067 			pipe_ctx->stream_res.test_pattern_params.color_depth,
2068 			NULL,
2069 			pipe_ctx->stream_res.test_pattern_params.width,
2070 			pipe_ctx->stream_res.test_pattern_params.height,
2071 			pipe_ctx->stream_res.test_pattern_params.offset);
2072 	}
2073 }
2074 
dcn401_program_front_end_for_ctx(struct dc * dc,struct dc_state * context)2075 void dcn401_program_front_end_for_ctx(
2076 	struct dc *dc,
2077 	struct dc_state *context)
2078 {
2079 	int i;
2080 	unsigned int prev_hubp_count = 0;
2081 	unsigned int hubp_count = 0;
2082 	struct dce_hwseq *hws = dc->hwseq;
2083 	struct pipe_ctx *pipe = NULL;
2084 
2085 	DC_LOGGER_INIT(dc->ctx->logger);
2086 
2087 	if (resource_is_pipe_topology_changed(dc->current_state, context))
2088 		resource_log_pipe_topology_update(dc, context);
2089 
2090 	if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2091 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2092 			pipe = &context->res_ctx.pipe_ctx[i];
2093 
2094 			if (pipe->plane_state) {
2095 				if (pipe->plane_state->triplebuffer_flips)
2096 					BREAK_TO_DEBUGGER();
2097 
2098 				/*turn off triple buffer for full update*/
2099 				dc->hwss.program_triplebuffer(
2100 					dc, pipe, pipe->plane_state->triplebuffer_flips);
2101 			}
2102 		}
2103 	}
2104 
2105 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2106 		if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
2107 			prev_hubp_count++;
2108 		if (context->res_ctx.pipe_ctx[i].plane_state)
2109 			hubp_count++;
2110 	}
2111 
2112 	if (prev_hubp_count == 0 && hubp_count > 0) {
2113 		if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
2114 			dc->res_pool->hubbub->funcs->force_pstate_change_control(
2115 				dc->res_pool->hubbub, true, false);
2116 		udelay(500);
2117 	}
2118 
2119 	/* Set pipe update flags and lock pipes */
2120 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2121 		dc->hwss.detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
2122 			&context->res_ctx.pipe_ctx[i]);
2123 
2124 	/* When disabling phantom pipes, turn on phantom OTG first (so we can get double
2125 	 * buffer updates properly)
2126 	 */
2127 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2128 		struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
2129 
2130 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2131 
2132 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
2133 			dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
2134 			struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
2135 
2136 			if (tg->funcs->enable_crtc) {
2137 				if (dc->hwseq->funcs.blank_pixel_data)
2138 					dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
2139 
2140 				tg->funcs->enable_crtc(tg);
2141 			}
2142 		}
2143 	}
2144 	/* OTG blank before disabling all front ends */
2145 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2146 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
2147 			&& !context->res_ctx.pipe_ctx[i].top_pipe
2148 			&& !context->res_ctx.pipe_ctx[i].prev_odm_pipe
2149 			&& context->res_ctx.pipe_ctx[i].stream)
2150 			hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
2151 
2152 
2153 	/* Disconnect mpcc */
2154 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2155 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
2156 			|| context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
2157 			struct hubbub *hubbub = dc->res_pool->hubbub;
2158 
2159 			/* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
2160 			 * then we want to do the programming here (effectively it's being disabled). If we do
2161 			 * the programming later the DET won't be updated until the OTG for the phantom pipe is
2162 			 * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
2163 			 * DET allocation.
2164 			 */
2165 			if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
2166 				(context->res_ctx.pipe_ctx[i].plane_state &&
2167 				dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) ==
2168 				SUBVP_PHANTOM))) {
2169 				if (hubbub->funcs->program_det_size)
2170 					hubbub->funcs->program_det_size(hubbub,
2171 						dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
2172 				if (dc->res_pool->hubbub->funcs->program_det_segments)
2173 					dc->res_pool->hubbub->funcs->program_det_segments(
2174 						hubbub,	dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
2175 			}
2176 			hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
2177 				&dc->current_state->res_ctx.pipe_ctx[i]);
2178 			DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
2179 		}
2180 
2181 	/* update ODM for blanked OTG master pipes */
2182 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2183 		pipe = &context->res_ctx.pipe_ctx[i];
2184 		if (resource_is_pipe_type(pipe, OTG_MASTER) &&
2185 			!resource_is_pipe_type(pipe, DPP_PIPE) &&
2186 			pipe->update_flags.bits.odm &&
2187 			hws->funcs.update_odm)
2188 			hws->funcs.update_odm(dc, context, pipe);
2189 	}
2190 
2191 	/*
2192 	 * Program all updated pipes, order matters for mpcc setup. Start with
2193 	 * top pipe and program all pipes that follow in order
2194 	 */
2195 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2196 		pipe = &context->res_ctx.pipe_ctx[i];
2197 
2198 		if (pipe->plane_state && !pipe->top_pipe) {
2199 			while (pipe) {
2200 				if (hws->funcs.program_pipe)
2201 					hws->funcs.program_pipe(dc, pipe, context);
2202 				else {
2203 					/* Don't program phantom pipes in the regular front end programming sequence.
2204 					 * There is an MPO transition case where a pipe being used by a video plane is
2205 					 * transitioned directly to be a phantom pipe when closing the MPO video.
2206 					 * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
2207 					 * right away) but the MPO still exists until the double buffered update of the
2208 					 * main pipe so we will get a frame of underflow if the phantom pipe is
2209 					 * programmed here.
2210 					 */
2211 					if (pipe->stream &&
2212 						dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
2213 						dcn401_program_pipe(dc, pipe, context);
2214 				}
2215 
2216 				pipe = pipe->bottom_pipe;
2217 			}
2218 		}
2219 
2220 		/* Program secondary blending tree and writeback pipes */
2221 		pipe = &context->res_ctx.pipe_ctx[i];
2222 		if (!pipe->top_pipe && !pipe->prev_odm_pipe
2223 			&& pipe->stream && pipe->stream->num_wb_info > 0
2224 			&& (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
2225 				|| pipe->stream->update_flags.raw)
2226 			&& hws->funcs.program_all_writeback_pipes_in_tree)
2227 			hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
2228 
2229 		/* Avoid underflow by check of pipe line read when adding 2nd plane. */
2230 		if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
2231 			!pipe->top_pipe &&
2232 			pipe->stream &&
2233 			pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
2234 			dc->current_state->stream_status[0].plane_count == 1 &&
2235 			context->stream_status[0].plane_count > 1) {
2236 			pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
2237 		}
2238 	}
2239 }
2240 
dcn401_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)2241 void dcn401_post_unlock_program_front_end(
2242 	struct dc *dc,
2243 	struct dc_state *context)
2244 {
2245 	// Timeout for pipe enable
2246 	unsigned int timeout_us = 100000;
2247 	unsigned int polling_interval_us = 1;
2248 	struct dce_hwseq *hwseq = dc->hwseq;
2249 	int i;
2250 
2251 	DC_LOGGER_INIT(dc->ctx->logger);
2252 
2253 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2254 		if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
2255 			!resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
2256 			dc->hwss.post_unlock_reset_opp(dc,
2257 				&dc->current_state->res_ctx.pipe_ctx[i]);
2258 
2259 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2260 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2261 			dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
2262 
2263 	/*
2264 	 * If we are enabling a pipe, we need to wait for pending clear as this is a critical
2265 	 * part of the enable operation otherwise, DM may request an immediate flip which
2266 	 * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
2267 	 * is unsupported on DCN.
2268 	 */
2269 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2270 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2271 		// Don't check flip pending on phantom pipes
2272 		if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
2273 			dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
2274 			struct hubp *hubp = pipe->plane_res.hubp;
2275 			int j = 0;
2276 
2277 			for (j = 0; j < timeout_us / polling_interval_us
2278 				&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
2279 				udelay(polling_interval_us);
2280 		}
2281 	}
2282 
2283 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2284 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2285 		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2286 
2287 		/* When going from a smaller ODM slice count to larger, we must ensure double
2288 		 * buffer update completes before we return to ensure we don't reduce DISPCLK
2289 		 * before we've transitioned to 2:1 or 4:1
2290 		 */
2291 		if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
2292 			resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
2293 			dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
2294 			int j = 0;
2295 			struct timing_generator *tg = pipe->stream_res.tg;
2296 
2297 			if (tg->funcs->get_optc_double_buffer_pending) {
2298 				for (j = 0; j < timeout_us / polling_interval_us
2299 					&& tg->funcs->get_optc_double_buffer_pending(tg); j++)
2300 					udelay(polling_interval_us);
2301 			}
2302 		}
2303 	}
2304 
2305 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
2306 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
2307 			dc->res_pool->hubbub, false, false);
2308 
2309 
2310 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2311 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2312 
2313 		if (pipe->plane_state && !pipe->top_pipe) {
2314 			/* Program phantom pipe here to prevent a frame of underflow in the MPO transition
2315 			 * case (if a pipe being used for a video plane transitions to a phantom pipe, it
2316 			 * can underflow due to HUBP_VTG_SEL programming if done in the regular front end
2317 			 * programming sequence).
2318 			 */
2319 			while (pipe) {
2320 				if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
2321 					/* When turning on the phantom pipe we want to run through the
2322 					 * entire enable sequence, so apply all the "enable" flags.
2323 					 */
2324 					if (dc->hwss.apply_update_flags_for_phantom)
2325 						dc->hwss.apply_update_flags_for_phantom(pipe);
2326 					if (dc->hwss.update_phantom_vp_position)
2327 						dc->hwss.update_phantom_vp_position(dc, context, pipe);
2328 					dcn401_program_pipe(dc, pipe, context);
2329 				}
2330 				pipe = pipe->bottom_pipe;
2331 			}
2332 		}
2333 	}
2334 
2335 	if (!hwseq)
2336 		return;
2337 
2338 	/* P-State support transitions:
2339 	 * Natural -> FPO:      P-State disabled in prepare, force disallow anytime is safe
2340 	 * FPO -> Natural:      Unforce anytime after FW disable is safe (P-State will assert naturally)
2341 	 * Unsupported -> FPO:  P-State enabled in optimize, force disallow anytime is safe
2342 	 * FPO -> Unsupported:  P-State disabled in prepare, unforce disallow anytime is safe
2343 	 * FPO <-> SubVP:       Force disallow is maintained on the FPO / SubVP pipes
2344 	 */
2345 	if (hwseq->funcs.update_force_pstate)
2346 		dc->hwseq->funcs.update_force_pstate(dc, context);
2347 
2348 	/* Only program the MALL registers after all the main and phantom pipes
2349 	 * are done programming.
2350 	 */
2351 	if (hwseq->funcs.program_mall_pipe_config)
2352 		hwseq->funcs.program_mall_pipe_config(dc, context);
2353 
2354 	/* WA to apply WM setting*/
2355 	if (hwseq->wa.DEGVIDCN21)
2356 		dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
2357 
2358 
2359 	/* WA for stutter underflow during MPO transitions when adding 2nd plane */
2360 	if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
2361 
2362 		if (dc->current_state->stream_status[0].plane_count == 1 &&
2363 			context->stream_status[0].plane_count > 1) {
2364 
2365 			struct timing_generator *tg = dc->res_pool->timing_generators[0];
2366 
2367 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
2368 
2369 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
2370 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame =
2371 				tg->funcs->get_frame_count(tg);
2372 		}
2373 	}
2374 }
2375 
dcn401_update_bandwidth(struct dc * dc,struct dc_state * context)2376 bool dcn401_update_bandwidth(
2377 	struct dc *dc,
2378 	struct dc_state *context)
2379 {
2380 	int i;
2381 	struct dce_hwseq *hws = dc->hwseq;
2382 
2383 	/* recalculate DML parameters */
2384 	if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
2385 		return false;
2386 
2387 	/* apply updated bandwidth parameters */
2388 	dc->hwss.prepare_bandwidth(dc, context);
2389 
2390 	/* update hubp configs for all pipes */
2391 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2392 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2393 
2394 		if (pipe_ctx->plane_state == NULL)
2395 			continue;
2396 
2397 		if (pipe_ctx->top_pipe == NULL) {
2398 			bool blank = !is_pipe_tree_visible(pipe_ctx);
2399 
2400 			pipe_ctx->stream_res.tg->funcs->program_global_sync(
2401 				pipe_ctx->stream_res.tg,
2402 				dcn401_calculate_vready_offset_for_group(pipe_ctx),
2403 				(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
2404 				(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
2405 				(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
2406 				(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
2407 
2408 			pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2409 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
2410 
2411 			if (pipe_ctx->prev_odm_pipe == NULL)
2412 				hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2413 
2414 			if (hws->funcs.setup_vupdate_interrupt)
2415 				hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2416 		}
2417 
2418 		if (pipe_ctx->plane_res.hubp->funcs->hubp_setup2)
2419 			pipe_ctx->plane_res.hubp->funcs->hubp_setup2(
2420 				pipe_ctx->plane_res.hubp,
2421 				&pipe_ctx->hubp_regs,
2422 				&pipe_ctx->global_sync,
2423 				&pipe_ctx->stream->timing);
2424 	}
2425 
2426 	return true;
2427 }
2428 
dcn401_detect_pipe_changes(struct dc_state * old_state,struct dc_state * new_state,struct pipe_ctx * old_pipe,struct pipe_ctx * new_pipe)2429 void dcn401_detect_pipe_changes(struct dc_state *old_state,
2430 	struct dc_state *new_state,
2431 	struct pipe_ctx *old_pipe,
2432 	struct pipe_ctx *new_pipe)
2433 {
2434 	bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
2435 	bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
2436 
2437 	unsigned int old_pipe_vready_offset_pixels = old_pipe->global_sync.dcn4x.vready_offset_pixels;
2438 	unsigned int new_pipe_vready_offset_pixels = new_pipe->global_sync.dcn4x.vready_offset_pixels;
2439 	unsigned int old_pipe_vstartup_lines = old_pipe->global_sync.dcn4x.vstartup_lines;
2440 	unsigned int new_pipe_vstartup_lines = new_pipe->global_sync.dcn4x.vstartup_lines;
2441 	unsigned int old_pipe_vupdate_offset_pixels = old_pipe->global_sync.dcn4x.vupdate_offset_pixels;
2442 	unsigned int new_pipe_vupdate_offset_pixels = new_pipe->global_sync.dcn4x.vupdate_offset_pixels;
2443 	unsigned int old_pipe_vupdate_width_pixels = old_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
2444 	unsigned int new_pipe_vupdate_width_pixels = new_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
2445 
2446 	new_pipe->update_flags.raw = 0;
2447 
2448 	/* If non-phantom pipe is being transitioned to a phantom pipe,
2449 	 * set disable and return immediately. This is because the pipe
2450 	 * that was previously in use must be fully disabled before we
2451 	 * can "enable" it as a phantom pipe (since the OTG will certainly
2452 	 * be different). The post_unlock sequence will set the correct
2453 	 * update flags to enable the phantom pipe.
2454 	 */
2455 	if (old_pipe->plane_state && !old_is_phantom &&
2456 		new_pipe->plane_state && new_is_phantom) {
2457 		new_pipe->update_flags.bits.disable = 1;
2458 		return;
2459 	}
2460 
2461 	if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
2462 		resource_is_odm_topology_changed(new_pipe, old_pipe))
2463 		/* Detect odm changes */
2464 		new_pipe->update_flags.bits.odm = 1;
2465 
2466 	/* Exit on unchanged, unused pipe */
2467 	if (!old_pipe->plane_state && !new_pipe->plane_state)
2468 		return;
2469 	/* Detect pipe enable/disable */
2470 	if (!old_pipe->plane_state && new_pipe->plane_state) {
2471 		new_pipe->update_flags.bits.enable = 1;
2472 		new_pipe->update_flags.bits.mpcc = 1;
2473 		new_pipe->update_flags.bits.dppclk = 1;
2474 		new_pipe->update_flags.bits.hubp_interdependent = 1;
2475 		new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
2476 		new_pipe->update_flags.bits.unbounded_req = 1;
2477 		new_pipe->update_flags.bits.gamut_remap = 1;
2478 		new_pipe->update_flags.bits.scaler = 1;
2479 		new_pipe->update_flags.bits.viewport = 1;
2480 		new_pipe->update_flags.bits.det_size = 1;
2481 		if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
2482 			new_pipe->stream_res.test_pattern_params.width != 0 &&
2483 			new_pipe->stream_res.test_pattern_params.height != 0)
2484 			new_pipe->update_flags.bits.test_pattern_changed = 1;
2485 		if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
2486 			new_pipe->update_flags.bits.odm = 1;
2487 			new_pipe->update_flags.bits.global_sync = 1;
2488 		}
2489 		return;
2490 	}
2491 
2492 	/* For SubVP we need to unconditionally enable because any phantom pipes are
2493 	 * always removed then newly added for every full updates whenever SubVP is in use.
2494 	 * The remove-add sequence of the phantom pipe always results in the pipe
2495 	 * being blanked in enable_stream_timing (DPG).
2496 	 */
2497 	if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
2498 		new_pipe->update_flags.bits.enable = 1;
2499 
2500 	/* Phantom pipes are effectively disabled, if the pipe was previously phantom
2501 	 * we have to enable
2502 	 */
2503 	if (old_pipe->plane_state && old_is_phantom &&
2504 		new_pipe->plane_state && !new_is_phantom)
2505 		new_pipe->update_flags.bits.enable = 1;
2506 
2507 	if (old_pipe->plane_state && !new_pipe->plane_state) {
2508 		new_pipe->update_flags.bits.disable = 1;
2509 		return;
2510 	}
2511 
2512 	/* Detect plane change */
2513 	if (old_pipe->plane_state != new_pipe->plane_state)
2514 		new_pipe->update_flags.bits.plane_changed = true;
2515 
2516 	/* Detect top pipe only changes */
2517 	if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
2518 		/* Detect global sync changes */
2519 		if ((old_pipe_vready_offset_pixels != new_pipe_vready_offset_pixels)
2520 			|| (old_pipe_vstartup_lines != new_pipe_vstartup_lines)
2521 			|| (old_pipe_vupdate_offset_pixels != new_pipe_vupdate_offset_pixels)
2522 			|| (old_pipe_vupdate_width_pixels != new_pipe_vupdate_width_pixels))
2523 			new_pipe->update_flags.bits.global_sync = 1;
2524 	}
2525 
2526 	if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
2527 		new_pipe->update_flags.bits.det_size = 1;
2528 
2529 	/*
2530 	 * Detect opp / tg change, only set on change, not on enable
2531 	 * Assume mpcc inst = pipe index, if not this code needs to be updated
2532 	 * since mpcc is what is affected by these. In fact all of our sequence
2533 	 * makes this assumption at the moment with how hubp reset is matched to
2534 	 * same index mpcc reset.
2535 	 */
2536 	if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
2537 		new_pipe->update_flags.bits.opp_changed = 1;
2538 	if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
2539 		new_pipe->update_flags.bits.tg_changed = 1;
2540 
2541 	/*
2542 	 * Detect mpcc blending changes, only dpp inst and opp matter here,
2543 	 * mpccs getting removed/inserted update connected ones during their own
2544 	 * programming
2545 	 */
2546 	if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
2547 		|| old_pipe->stream_res.opp != new_pipe->stream_res.opp)
2548 		new_pipe->update_flags.bits.mpcc = 1;
2549 
2550 	/* Detect dppclk change */
2551 	if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
2552 		new_pipe->update_flags.bits.dppclk = 1;
2553 
2554 	/* Check for scl update */
2555 	if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
2556 		new_pipe->update_flags.bits.scaler = 1;
2557 	/* Check for vp update */
2558 	if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
2559 		|| memcmp(&old_pipe->plane_res.scl_data.viewport_c,
2560 			&new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
2561 		new_pipe->update_flags.bits.viewport = 1;
2562 
2563 	/* Detect dlg/ttu/rq updates */
2564 	{
2565 		struct dml2_display_dlg_regs old_dlg_regs = old_pipe->hubp_regs.dlg_regs;
2566 		struct dml2_display_ttu_regs old_ttu_regs = old_pipe->hubp_regs.ttu_regs;
2567 		struct dml2_display_rq_regs	 old_rq_regs = old_pipe->hubp_regs.rq_regs;
2568 		struct dml2_display_dlg_regs *new_dlg_regs = &new_pipe->hubp_regs.dlg_regs;
2569 		struct dml2_display_ttu_regs *new_ttu_regs = &new_pipe->hubp_regs.ttu_regs;
2570 		struct dml2_display_rq_regs	 *new_rq_regs = &new_pipe->hubp_regs.rq_regs;
2571 
2572 		/* Detect pipe interdependent updates */
2573 		if ((old_dlg_regs.dst_y_prefetch != new_dlg_regs->dst_y_prefetch)
2574 			|| (old_dlg_regs.vratio_prefetch != new_dlg_regs->vratio_prefetch)
2575 			|| (old_dlg_regs.vratio_prefetch_c != new_dlg_regs->vratio_prefetch_c)
2576 			|| (old_dlg_regs.dst_y_per_vm_vblank != new_dlg_regs->dst_y_per_vm_vblank)
2577 			|| (old_dlg_regs.dst_y_per_row_vblank != new_dlg_regs->dst_y_per_row_vblank)
2578 			|| (old_dlg_regs.dst_y_per_vm_flip != new_dlg_regs->dst_y_per_vm_flip)
2579 			|| (old_dlg_regs.dst_y_per_row_flip != new_dlg_regs->dst_y_per_row_flip)
2580 			|| (old_dlg_regs.refcyc_per_meta_chunk_vblank_l != new_dlg_regs->refcyc_per_meta_chunk_vblank_l)
2581 			|| (old_dlg_regs.refcyc_per_meta_chunk_vblank_c != new_dlg_regs->refcyc_per_meta_chunk_vblank_c)
2582 			|| (old_dlg_regs.refcyc_per_meta_chunk_flip_l != new_dlg_regs->refcyc_per_meta_chunk_flip_l)
2583 			|| (old_dlg_regs.refcyc_per_line_delivery_pre_l != new_dlg_regs->refcyc_per_line_delivery_pre_l)
2584 			|| (old_dlg_regs.refcyc_per_line_delivery_pre_c != new_dlg_regs->refcyc_per_line_delivery_pre_c)
2585 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_l != new_ttu_regs->refcyc_per_req_delivery_pre_l)
2586 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_c != new_ttu_regs->refcyc_per_req_delivery_pre_c)
2587 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_cur0 !=
2588 				new_ttu_regs->refcyc_per_req_delivery_pre_cur0)
2589 			|| (old_ttu_regs.min_ttu_vblank != new_ttu_regs->min_ttu_vblank)
2590 			|| (old_ttu_regs.qos_level_flip != new_ttu_regs->qos_level_flip)) {
2591 			old_dlg_regs.dst_y_prefetch = new_dlg_regs->dst_y_prefetch;
2592 			old_dlg_regs.vratio_prefetch = new_dlg_regs->vratio_prefetch;
2593 			old_dlg_regs.vratio_prefetch_c = new_dlg_regs->vratio_prefetch_c;
2594 			old_dlg_regs.dst_y_per_vm_vblank = new_dlg_regs->dst_y_per_vm_vblank;
2595 			old_dlg_regs.dst_y_per_row_vblank = new_dlg_regs->dst_y_per_row_vblank;
2596 			old_dlg_regs.dst_y_per_vm_flip = new_dlg_regs->dst_y_per_vm_flip;
2597 			old_dlg_regs.dst_y_per_row_flip = new_dlg_regs->dst_y_per_row_flip;
2598 			old_dlg_regs.refcyc_per_meta_chunk_vblank_l = new_dlg_regs->refcyc_per_meta_chunk_vblank_l;
2599 			old_dlg_regs.refcyc_per_meta_chunk_vblank_c = new_dlg_regs->refcyc_per_meta_chunk_vblank_c;
2600 			old_dlg_regs.refcyc_per_meta_chunk_flip_l = new_dlg_regs->refcyc_per_meta_chunk_flip_l;
2601 			old_dlg_regs.refcyc_per_line_delivery_pre_l = new_dlg_regs->refcyc_per_line_delivery_pre_l;
2602 			old_dlg_regs.refcyc_per_line_delivery_pre_c = new_dlg_regs->refcyc_per_line_delivery_pre_c;
2603 			old_ttu_regs.refcyc_per_req_delivery_pre_l = new_ttu_regs->refcyc_per_req_delivery_pre_l;
2604 			old_ttu_regs.refcyc_per_req_delivery_pre_c = new_ttu_regs->refcyc_per_req_delivery_pre_c;
2605 			old_ttu_regs.refcyc_per_req_delivery_pre_cur0 = new_ttu_regs->refcyc_per_req_delivery_pre_cur0;
2606 			old_ttu_regs.min_ttu_vblank = new_ttu_regs->min_ttu_vblank;
2607 			old_ttu_regs.qos_level_flip = new_ttu_regs->qos_level_flip;
2608 			new_pipe->update_flags.bits.hubp_interdependent = 1;
2609 		}
2610 		/* Detect any other updates to ttu/rq/dlg */
2611 		if (memcmp(&old_dlg_regs, new_dlg_regs, sizeof(old_dlg_regs)) ||
2612 			memcmp(&old_ttu_regs, new_ttu_regs, sizeof(old_ttu_regs)) ||
2613 			memcmp(&old_rq_regs, new_rq_regs, sizeof(old_rq_regs)))
2614 			new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
2615 	}
2616 
2617 	if (old_pipe->unbounded_req != new_pipe->unbounded_req)
2618 		new_pipe->update_flags.bits.unbounded_req = 1;
2619 
2620 	if (memcmp(&old_pipe->stream_res.test_pattern_params,
2621 		&new_pipe->stream_res.test_pattern_params, sizeof(struct test_pattern_params))) {
2622 		new_pipe->update_flags.bits.test_pattern_changed = 1;
2623 	}
2624 }
2625 
dcn401_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)2626 void dcn401_plane_atomic_power_down(struct dc *dc,
2627 		struct dpp *dpp,
2628 		struct hubp *hubp)
2629 {
2630 	struct dce_hwseq *hws = dc->hwseq;
2631 	uint32_t org_ip_request_cntl = 0;
2632 
2633 	DC_LOGGER_INIT(dc->ctx->logger);
2634 
2635 	if (REG(DC_IP_REQUEST_CNTL)) {
2636 		REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
2637 		if (org_ip_request_cntl == 0)
2638 			REG_SET(DC_IP_REQUEST_CNTL, 0,
2639 				IP_REQUEST_EN, 1);
2640 	}
2641 
2642 	if (hws->funcs.dpp_pg_control)
2643 		hws->funcs.dpp_pg_control(hws, dpp->inst, false);
2644 
2645 	if (hws->funcs.hubp_pg_control)
2646 		hws->funcs.hubp_pg_control(hws, hubp->inst, false);
2647 
2648 	hubp->funcs->hubp_reset(hubp);
2649 	dpp->funcs->dpp_reset(dpp);
2650 
2651 	if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
2652 		REG_SET(DC_IP_REQUEST_CNTL, 0,
2653 			IP_REQUEST_EN, 0);
2654 
2655 	DC_LOG_DEBUG(
2656 			"Power gated front end %d\n", hubp->inst);
2657 
2658 	if (hws->funcs.dpp_root_clock_control)
2659 		hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
2660 }
2661