1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 
27 #include "dm_services.h"
28 #include "dm_helpers.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "dccg.h"
32 #include "dce/dce_hwseq.h"
33 #include "clk_mgr.h"
34 #include "reg_helper.h"
35 #include "abm.h"
36 #include "hubp.h"
37 #include "dchubbub.h"
38 #include "timing_generator.h"
39 #include "opp.h"
40 #include "ipp.h"
41 #include "mpc.h"
42 #include "mcif_wb.h"
43 #include "dc_dmub_srv.h"
44 #include "dcn35_hwseq.h"
45 #include "dcn35/dcn35_dccg.h"
46 #include "link_hwss.h"
47 #include "dpcd_defs.h"
48 #include "dce/dmub_outbox.h"
49 #include "link.h"
50 #include "dcn10/dcn10_hwseq.h"
51 #include "inc/link_enc_cfg.h"
52 #include "dcn30/dcn30_vpg.h"
53 #include "dce/dce_i2c_hw.h"
54 #include "dsc.h"
55 #include "dcn20/dcn20_optc.h"
56 #include "dcn30/dcn30_cm_common.h"
57 #include "dcn31/dcn31_hwseq.h"
58 #include "dcn20/dcn20_hwseq.h"
59 #include "dc_state_priv.h"
60 
61 #define DC_LOGGER_INIT(logger) \
62 	struct dal_logger *dc_logger = logger
63 
64 #define CTX \
65 	hws->ctx
66 #define REG(reg)\
67 	hws->regs->reg
68 #define DC_LOGGER \
69 	dc_logger
70 
71 
72 #undef FN
73 #define FN(reg_name, field_name) \
74 	hws->shifts->field_name, hws->masks->field_name
75 #if 0
76 static void enable_memory_low_power(struct dc *dc)
77 {
78 	struct dce_hwseq *hws = dc->hwseq;
79 	int i;
80 
81 	if (dc->debug.enable_mem_low_power.bits.dmcu) {
82 		// Force ERAM to shutdown if DMCU is not enabled
83 		if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
84 			REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
85 		}
86 	}
87 	/*dcn35 has default MEM_PWR enabled, make sure wake them up*/
88 	// Set default OPTC memory power states
89 	if (dc->debug.enable_mem_low_power.bits.optc) {
90 		// Shutdown when unassigned and light sleep in VBLANK
91 		REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
92 	}
93 
94 	if (dc->debug.enable_mem_low_power.bits.vga) {
95 		// Power down VGA memory
96 		REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
97 	}
98 
99 	if (dc->debug.enable_mem_low_power.bits.mpc &&
100 		dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode)
101 		dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc);
102 
103 	if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
104 		// Power down VPGs
105 		for (i = 0; i < dc->res_pool->stream_enc_count; i++)
106 			dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
107 #if defined(CONFIG_DRM_AMD_DC_DP2_0)
108 		for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
109 			dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
110 #endif
111 	}
112 
113 }
114 #endif
115 
dcn35_set_dmu_fgcg(struct dce_hwseq * hws,bool enable)116 void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable)
117 {
118 	REG_UPDATE_3(DMU_CLK_CNTL,
119 		RBBMIF_FGCG_REP_DIS, !enable,
120 		IHC_FGCG_REP_DIS, !enable,
121 		LONO_FGCG_REP_DIS, !enable
122 	);
123 }
124 
dcn35_setup_hpo_hw_control(const struct dce_hwseq * hws,bool enable)125 void dcn35_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
126 {
127 	REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
128 }
129 
dcn35_init_hw(struct dc * dc)130 void dcn35_init_hw(struct dc *dc)
131 {
132 	struct abm **abms = dc->res_pool->multiple_abms;
133 	struct dce_hwseq *hws = dc->hwseq;
134 	struct dc_bios *dcb = dc->ctx->dc_bios;
135 	struct resource_pool *res_pool = dc->res_pool;
136 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
137 	uint32_t user_level = MAX_BACKLIGHT_LEVEL;
138 	int i;
139 
140 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
141 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
142 
143 	//dcn35_set_dmu_fgcg(hws, dc->debug.enable_fine_grain_clock_gating.bits.dmu);
144 
145 	if (!dcb->funcs->is_accelerated_mode(dcb)) {
146 		/*this calls into dmubfw to do the init*/
147 		hws->funcs.bios_golden_init(dc);
148 	}
149 
150 	if (!dc->debug.disable_clock_gate) {
151 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
152 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2,  0);
153 
154 		/* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */
155 		REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1,
156 				PHYBSYMCLK_ROOT_GATE_DISABLE, 1,
157 				PHYCSYMCLK_ROOT_GATE_DISABLE, 1,
158 				PHYDSYMCLK_ROOT_GATE_DISABLE, 1,
159 				PHYESYMCLK_ROOT_GATE_DISABLE, 1);
160 
161 		REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL4,
162 				DPIASYMCLK0_GATE_DISABLE, 0,
163 				DPIASYMCLK1_GATE_DISABLE, 0,
164 				DPIASYMCLK2_GATE_DISABLE, 0,
165 				DPIASYMCLK3_GATE_DISABLE, 0);
166 
167 		REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0xFFFFFFFF);
168 		REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
169 				DTBCLK_P0_GATE_DISABLE, 0,
170 				DTBCLK_P1_GATE_DISABLE, 0,
171 				DTBCLK_P2_GATE_DISABLE, 0,
172 				DTBCLK_P3_GATE_DISABLE, 0);
173 		REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
174 				DPSTREAMCLK0_GATE_DISABLE, 0,
175 				DPSTREAMCLK1_GATE_DISABLE, 0,
176 				DPSTREAMCLK2_GATE_DISABLE, 0,
177 				DPSTREAMCLK3_GATE_DISABLE, 0);
178 
179 	}
180 
181 	// Initialize the dccg
182 	if (res_pool->dccg->funcs->dccg_init)
183 		res_pool->dccg->funcs->dccg_init(res_pool->dccg);
184 
185 	//enable_memory_low_power(dc);
186 
187 	if (dc->ctx->dc_bios->fw_info_valid) {
188 		res_pool->ref_clocks.xtalin_clock_inKhz =
189 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
190 
191 		if (res_pool->dccg && res_pool->hubbub) {
192 
193 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
194 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
195 				&res_pool->ref_clocks.dccg_ref_clock_inKhz);
196 
197 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
198 				res_pool->ref_clocks.dccg_ref_clock_inKhz,
199 				&res_pool->ref_clocks.dchub_ref_clock_inKhz);
200 		} else {
201 			// Not all ASICs have DCCG sw component
202 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
203 				res_pool->ref_clocks.xtalin_clock_inKhz;
204 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
205 				res_pool->ref_clocks.xtalin_clock_inKhz;
206 		}
207 	} else
208 		ASSERT_CRITICAL(false);
209 
210 	for (i = 0; i < dc->link_count; i++) {
211 		/* Power up AND update implementation according to the
212 		 * required signal (which may be different from the
213 		 * default signal on connector).
214 		 */
215 		struct dc_link *link = dc->links[i];
216 
217 		if (link->ep_type != DISPLAY_ENDPOINT_PHY)
218 			continue;
219 
220 		link->link_enc->funcs->hw_init(link->link_enc);
221 
222 		/* Check for enabled DIG to identify enabled display */
223 		if (link->link_enc->funcs->is_dig_enabled &&
224 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
225 			link->link_status.link_active = true;
226 			if (link->link_enc->funcs->fec_is_active &&
227 					link->link_enc->funcs->fec_is_active(link->link_enc))
228 				link->fec_state = dc_link_fec_enabled;
229 		}
230 	}
231 
232 	/* we want to turn off all dp displays before doing detection */
233 	dc->link_srv->blank_all_dp_displays(dc);
234 /*
235 	if (hws->funcs.enable_power_gating_plane)
236 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
237 */
238 	if (res_pool->hubbub->funcs->dchubbub_init)
239 		res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub);
240 	/* If taking control over from VBIOS, we may want to optimize our first
241 	 * mode set, so we need to skip powering down pipes until we know which
242 	 * pipes we want to use.
243 	 * Otherwise, if taking control is not possible, we need to power
244 	 * everything down.
245 	 */
246 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
247 
248 		// we want to turn off edp displays if odm is enabled and no seamless boot
249 		if (!dc->caps.seamless_odm) {
250 			for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
251 				struct timing_generator *tg = dc->res_pool->timing_generators[i];
252 				uint32_t num_opps, opp_id_src0, opp_id_src1;
253 
254 				num_opps = 1;
255 				if (tg) {
256 					if (tg->funcs->is_tg_enabled(tg) && tg->funcs->get_optc_source) {
257 						tg->funcs->get_optc_source(tg, &num_opps,
258 								&opp_id_src0, &opp_id_src1);
259 					}
260 				}
261 
262 				if (num_opps > 1) {
263 					dc->link_srv->blank_all_edp_displays(dc);
264 					break;
265 				}
266 			}
267 		}
268 
269 		hws->funcs.init_pipes(dc, dc->current_state);
270 		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
271 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
272 					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
273 	}
274 
275 	for (i = 0; i < res_pool->audio_count; i++) {
276 		struct audio *audio = res_pool->audios[i];
277 
278 		audio->funcs->hw_init(audio);
279 	}
280 
281 	for (i = 0; i < dc->link_count; i++) {
282 		struct dc_link *link = dc->links[i];
283 
284 		if (link->panel_cntl) {
285 			backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
286 			user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
287 		}
288 	}
289 	if (dc->ctx->dmub_srv) {
290 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
291 		if (abms[i] != NULL && abms[i]->funcs != NULL)
292 			abms[i]->funcs->abm_init(abms[i], backlight, user_level);
293 		}
294 	}
295 
296 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
297 	REG_WRITE(DIO_MEM_PWR_CTRL, 0);
298 
299 	// Set i2c to light sleep until engine is setup
300 	if (dc->debug.enable_mem_low_power.bits.i2c)
301 		REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 0);
302 
303 	if (hws->funcs.setup_hpo_hw_control)
304 		hws->funcs.setup_hpo_hw_control(hws, false);
305 
306 	if (!dc->debug.disable_clock_gate) {
307 		/* enable all DCN clock gating */
308 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
309 
310 		REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, 0,
311 				SYMCLKB_FE_GATE_DISABLE, 0,
312 				SYMCLKC_FE_GATE_DISABLE, 0,
313 				SYMCLKD_FE_GATE_DISABLE, 0,
314 				SYMCLKE_FE_GATE_DISABLE, 0);
315 		REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, 0);
316 		REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, 0,
317 				SYMCLKB_GATE_DISABLE, 0,
318 				SYMCLKC_GATE_DISABLE, 0,
319 				SYMCLKD_GATE_DISABLE, 0,
320 				SYMCLKE_GATE_DISABLE, 0);
321 
322 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
323 	}
324 
325 	if (dc->debug.disable_mem_low_power) {
326 		REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 1);
327 	}
328 	if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
329 		dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
330 
331 	if (dc->clk_mgr->funcs->notify_wm_ranges)
332 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
333 
334 	if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
335 		dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
336 
337 
338 
339 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
340 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
341 				dc->res_pool->hubbub, false, false);
342 
343 	if (dc->res_pool->hubbub->funcs->init_crb)
344 		dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
345 
346 	if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
347 		dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
348 	// Get DMCUB capabilities
349 	if (dc->ctx->dmub_srv) {
350 		dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
351 		dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
352 		dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
353 	}
354 
355 	if (dc->res_pool->pg_cntl) {
356 		if (dc->res_pool->pg_cntl->funcs->init_pg_status)
357 			dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl);
358 	}
359 }
360 
calc_mpc_flow_ctrl_cnt(const struct dc_stream_state * stream,int opp_cnt)361 static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
362 		int opp_cnt)
363 {
364 	bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
365 	int flow_ctrl_cnt;
366 
367 	if (opp_cnt >= 2)
368 		hblank_halved = true;
369 
370 	flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
371 			stream->timing.h_border_left -
372 			stream->timing.h_border_right;
373 
374 	if (hblank_halved)
375 		flow_ctrl_cnt /= 2;
376 
377 	/* ODM combine 4:1 case */
378 	if (opp_cnt == 4)
379 		flow_ctrl_cnt /= 2;
380 
381 	return flow_ctrl_cnt;
382 }
383 
update_dsc_on_stream(struct pipe_ctx * pipe_ctx,bool enable)384 static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
385 {
386 	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
387 	struct dc_stream_state *stream = pipe_ctx->stream;
388 	struct pipe_ctx *odm_pipe;
389 	int opp_cnt = 1;
390 
391 	DC_LOGGER_INIT(stream->ctx->logger);
392 
393 	ASSERT(dsc);
394 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
395 		opp_cnt++;
396 
397 	if (enable) {
398 		struct dsc_config dsc_cfg;
399 		struct dsc_optc_config dsc_optc_cfg;
400 		enum optc_dsc_mode optc_dsc_mode;
401 
402 		/* Enable DSC hw block */
403 		dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
404 		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
405 		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
406 		dsc_cfg.color_depth = stream->timing.display_color_depth;
407 		dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
408 		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
409 		ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
410 		dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
411 
412 		dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
413 		dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
414 		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
415 			struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
416 
417 			ASSERT(odm_dsc);
418 			odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
419 			odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
420 		}
421 		dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
422 		dsc_cfg.pic_width *= opp_cnt;
423 
424 		optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
425 
426 		/* Enable DSC in OPTC */
427 		DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
428 		pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
429 							optc_dsc_mode,
430 							dsc_optc_cfg.bytes_per_pixel,
431 							dsc_optc_cfg.slice_width);
432 	} else {
433 		/* disable DSC in OPTC */
434 		pipe_ctx->stream_res.tg->funcs->set_dsc_config(
435 				pipe_ctx->stream_res.tg,
436 				OPTC_DSC_DISABLED, 0, 0);
437 
438 		/* disable DSC block */
439 		dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
440 		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
441 			ASSERT(odm_pipe->stream_res.dsc);
442 			odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
443 		}
444 	}
445 }
446 
447 // Given any pipe_ctx, return the total ODM combine factor, and optionally return
448 // the OPPids which are used
get_odm_config(struct pipe_ctx * pipe_ctx,unsigned int * opp_instances)449 static unsigned int get_odm_config(struct pipe_ctx *pipe_ctx, unsigned int *opp_instances)
450 {
451 	unsigned int opp_count = 1;
452 	struct pipe_ctx *odm_pipe;
453 
454 	// First get to the top pipe
455 	for (odm_pipe = pipe_ctx; odm_pipe->prev_odm_pipe; odm_pipe = odm_pipe->prev_odm_pipe)
456 		;
457 
458 	// First pipe is always used
459 	if (opp_instances)
460 		opp_instances[0] = odm_pipe->stream_res.opp->inst;
461 
462 	// Find and count odm pipes, if any
463 	for (odm_pipe = odm_pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
464 		if (opp_instances)
465 			opp_instances[opp_count] = odm_pipe->stream_res.opp->inst;
466 		opp_count++;
467 	}
468 
469 	return opp_count;
470 }
471 
dcn35_update_odm(struct dc * dc,struct dc_state * context,struct pipe_ctx * pipe_ctx)472 void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
473 {
474 	struct pipe_ctx *odm_pipe;
475 	int opp_cnt = 0;
476 	int opp_inst[MAX_PIPES] = {0};
477 	bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
478 	struct mpc_dwb_flow_control flow_control;
479 	struct mpc *mpc = dc->res_pool->mpc;
480 	int i;
481 
482 	opp_cnt = get_odm_config(pipe_ctx, opp_inst);
483 
484 	if (opp_cnt > 1)
485 		pipe_ctx->stream_res.tg->funcs->set_odm_combine(
486 				pipe_ctx->stream_res.tg,
487 				opp_inst, opp_cnt,
488 				&pipe_ctx->stream->timing);
489 	else
490 		pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
491 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
492 
493 	rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
494 	flow_control.flow_ctrl_mode = 0;
495 	flow_control.flow_ctrl_cnt0 = 0x80;
496 	flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
497 	if (mpc->funcs->set_out_rate_control) {
498 		for (i = 0; i < opp_cnt; ++i) {
499 			mpc->funcs->set_out_rate_control(
500 					mpc, opp_inst[i],
501 					true,
502 					rate_control_2x_pclk,
503 					&flow_control);
504 		}
505 	}
506 
507 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
508 		odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
509 				odm_pipe->stream_res.opp,
510 				true);
511 	}
512 
513 	if (pipe_ctx->stream_res.dsc) {
514 		struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
515 
516 		update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC);
517 
518 		/* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */
519 		if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
520 				current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
521 			struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
522 			/* disconnect DSC block from stream */
523 			dsc->funcs->dsc_disconnect(dsc);
524 		}
525 	}
526 }
527 
dcn35_dpp_root_clock_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool clock_on)528 void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on)
529 {
530 	if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp)
531 		return;
532 
533 	if (hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control) {
534 		hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control(
535 			hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on);
536 	}
537 }
538 
dcn35_dsc_pg_control(struct dce_hwseq * hws,unsigned int dsc_inst,bool power_on)539 void dcn35_dsc_pg_control(
540 		struct dce_hwseq *hws,
541 		unsigned int dsc_inst,
542 		bool power_on)
543 {
544 	uint32_t power_gate = power_on ? 0 : 1;
545 	uint32_t pwr_status = power_on ? 0 : 2;
546 	uint32_t org_ip_request_cntl = 0;
547 
548 	if (hws->ctx->dc->debug.disable_dsc_power_gate)
549 		return;
550 	if (hws->ctx->dc->debug.ignore_pg)
551 		return;
552 	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
553 	if (org_ip_request_cntl == 0)
554 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
555 
556 	switch (dsc_inst) {
557 	case 0: /* DSC0 */
558 		REG_UPDATE(DOMAIN16_PG_CONFIG,
559 				DOMAIN_POWER_GATE, power_gate);
560 
561 		REG_WAIT(DOMAIN16_PG_STATUS,
562 				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
563 				1, 1000);
564 		break;
565 	case 1: /* DSC1 */
566 		REG_UPDATE(DOMAIN17_PG_CONFIG,
567 				DOMAIN_POWER_GATE, power_gate);
568 
569 		REG_WAIT(DOMAIN17_PG_STATUS,
570 				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
571 				1, 1000);
572 		break;
573 	case 2: /* DSC2 */
574 		REG_UPDATE(DOMAIN18_PG_CONFIG,
575 				DOMAIN_POWER_GATE, power_gate);
576 
577 		REG_WAIT(DOMAIN18_PG_STATUS,
578 				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
579 				1, 1000);
580 		break;
581 	case 3: /* DSC3 */
582 		REG_UPDATE(DOMAIN19_PG_CONFIG,
583 				DOMAIN_POWER_GATE, power_gate);
584 
585 		REG_WAIT(DOMAIN19_PG_STATUS,
586 				DOMAIN_PGFSM_PWR_STATUS, pwr_status,
587 				1, 1000);
588 		break;
589 	default:
590 		BREAK_TO_DEBUGGER();
591 		break;
592 	}
593 
594 	if (org_ip_request_cntl == 0)
595 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
596 }
597 
dcn35_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)598 void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable)
599 {
600 	bool force_on = true; /* disable power gating */
601 	uint32_t org_ip_request_cntl = 0;
602 
603 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
604 		return;
605 	if (hws->ctx->dc->debug.ignore_pg)
606 		return;
607 	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
608 	if (org_ip_request_cntl == 0)
609 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
610 	/* DCHUBP0/1/2/3/4/5 */
611 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
612 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
613 	/* DPP0/1/2/3/4/5 */
614 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
615 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
616 
617 	force_on = true; /* disable power gating */
618 	if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
619 		force_on = false;
620 
621 	/* DCS0/1/2/3/4 */
622 	REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
623 	REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
624 	REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
625 	REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
626 
627 
628 }
629 
630 /* In headless boot cases, DIG may be turned
631  * on which causes HW/SW discrepancies.
632  * To avoid this, power down hardware on boot
633  * if DIG is turned on
634  */
dcn35_power_down_on_boot(struct dc * dc)635 void dcn35_power_down_on_boot(struct dc *dc)
636 {
637 	struct dc_link *edp_links[MAX_NUM_EDP];
638 	struct dc_link *edp_link = NULL;
639 	int edp_num;
640 	int i = 0;
641 
642 	dc_get_edp_links(dc, edp_links, &edp_num);
643 	if (edp_num)
644 		edp_link = edp_links[0];
645 
646 	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
647 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
648 			dc->hwseq->funcs.edp_backlight_control &&
649 			dc->hwss.power_down &&
650 			dc->hwss.edp_power_control) {
651 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
652 		dc->hwss.power_down(dc);
653 		dc->hwss.edp_power_control(edp_link, false);
654 	} else {
655 		for (i = 0; i < dc->link_count; i++) {
656 			struct dc_link *link = dc->links[i];
657 
658 			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
659 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
660 					dc->hwss.power_down) {
661 				dc->hwss.power_down(dc);
662 				break;
663 			}
664 
665 		}
666 	}
667 
668 	/*
669 	 * Call update_clocks with empty context
670 	 * to send DISPLAY_OFF
671 	 * Otherwise DISPLAY_OFF may not be asserted
672 	 */
673 	if (dc->clk_mgr->funcs->set_low_power_state)
674 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
675 
676 	if (dc->clk_mgr->clks.pwr_state == DCN_PWR_STATE_LOW_POWER)
677 		dc_allow_idle_optimizations(dc, true);
678 }
679 
dcn35_apply_idle_power_optimizations(struct dc * dc,bool enable)680 bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
681 {
682 	struct dc_link *edp_links[MAX_NUM_EDP];
683 	int i, edp_num;
684 	if (dc->debug.dmcub_emulation)
685 		return true;
686 
687 	if (enable) {
688 		dc_get_edp_links(dc, edp_links, &edp_num);
689 		if (edp_num == 0 || edp_num > 1)
690 			return false;
691 
692 		for (i = 0; i < dc->current_state->stream_count; ++i) {
693 			struct dc_stream_state *stream = dc->current_state->streams[i];
694 
695 			if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal))
696 				return false;
697 		}
698 	}
699 
700 	// TODO: review other cases when idle optimization is allowed
701 	dc_dmub_srv_apply_idle_power_optimizations(dc, enable);
702 
703 	return true;
704 }
705 
dcn35_z10_restore(const struct dc * dc)706 void dcn35_z10_restore(const struct dc *dc)
707 {
708 	if (dc->debug.disable_z10)
709 		return;
710 
711 	dc_dmub_srv_apply_idle_power_optimizations(dc, false);
712 
713 	dcn31_z10_restore(dc);
714 }
715 
dcn35_init_pipes(struct dc * dc,struct dc_state * context)716 void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
717 {
718 	int i;
719 	struct dce_hwseq *hws = dc->hwseq;
720 	struct hubbub *hubbub = dc->res_pool->hubbub;
721 	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
722 	bool can_apply_seamless_boot = false;
723 
724 	for (i = 0; i < context->stream_count; i++) {
725 		if (context->streams[i]->apply_seamless_boot_optimization) {
726 			can_apply_seamless_boot = true;
727 			break;
728 		}
729 	}
730 
731 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
732 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
733 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
734 
735 		/* There is assumption that pipe_ctx is not mapping irregularly
736 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
737 		 * we will use the pipe, so don't disable
738 		 */
739 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
740 			continue;
741 
742 		/* Blank controller using driver code instead of
743 		 * command table.
744 		 */
745 		if (tg->funcs->is_tg_enabled(tg)) {
746 			if (hws->funcs.init_blank != NULL) {
747 				hws->funcs.init_blank(dc, tg);
748 				tg->funcs->lock(tg);
749 			} else {
750 				tg->funcs->lock(tg);
751 				tg->funcs->set_blank(tg, true);
752 				hwss_wait_for_blank_complete(tg);
753 			}
754 		}
755 	}
756 
757 	/* Reset det size */
758 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
759 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
760 		struct hubp *hubp = dc->res_pool->hubps[i];
761 
762 		/* Do not need to reset for seamless boot */
763 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
764 			continue;
765 
766 		if (hubbub && hubp) {
767 			if (hubbub->funcs->program_det_size)
768 				hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
769 		}
770 	}
771 
772 	/* num_opp will be equal to number of mpcc */
773 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
774 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
775 
776 		/* Cannot reset the MPC mux if seamless boot */
777 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
778 			continue;
779 
780 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
781 				dc->res_pool->mpc, i);
782 	}
783 
784 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
785 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
786 		struct hubp *hubp = dc->res_pool->hubps[i];
787 		struct dpp *dpp = dc->res_pool->dpps[i];
788 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
789 
790 		/* There is assumption that pipe_ctx is not mapping irregularly
791 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
792 		 * we will use the pipe, so don't disable
793 		 */
794 		if (can_apply_seamless_boot &&
795 			pipe_ctx->stream != NULL &&
796 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
797 				pipe_ctx->stream_res.tg)) {
798 			// Enable double buffering for OTG_BLANK no matter if
799 			// seamless boot is enabled or not to suppress global sync
800 			// signals when OTG blanked. This is to prevent pipe from
801 			// requesting data while in PSR.
802 			tg->funcs->tg_init(tg);
803 			hubp->power_gated = true;
804 			continue;
805 		}
806 
807 		/* Disable on the current state so the new one isn't cleared. */
808 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
809 
810 		dpp->funcs->dpp_reset(dpp);
811 
812 		pipe_ctx->stream_res.tg = tg;
813 		pipe_ctx->pipe_idx = i;
814 
815 		pipe_ctx->plane_res.hubp = hubp;
816 		pipe_ctx->plane_res.dpp = dpp;
817 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
818 		hubp->mpcc_id = dpp->inst;
819 		hubp->opp_id = OPP_ID_INVALID;
820 		hubp->power_gated = false;
821 
822 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
823 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
824 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
825 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
826 
827 		hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
828 
829 		if (tg->funcs->is_tg_enabled(tg))
830 			tg->funcs->unlock(tg);
831 
832 		dc->hwss.disable_plane(dc, context, pipe_ctx);
833 
834 		pipe_ctx->stream_res.tg = NULL;
835 		pipe_ctx->plane_res.hubp = NULL;
836 
837 		if (tg->funcs->is_tg_enabled(tg)) {
838 			if (tg->funcs->init_odm)
839 				tg->funcs->init_odm(tg);
840 		}
841 
842 		tg->funcs->tg_init(tg);
843 	}
844 
845 	if (pg_cntl != NULL) {
846 		if (pg_cntl->funcs->dsc_pg_control != NULL) {
847 			uint32_t num_opps = 0;
848 			uint32_t opp_id_src0 = OPP_ID_INVALID;
849 			uint32_t opp_id_src1 = OPP_ID_INVALID;
850 
851 			// Step 1: To find out which OPTC is running & OPTC DSC is ON
852 			// We can't use res_pool->res_cap->num_timing_generator to check
853 			// Because it records display pipes default setting built in driver,
854 			// not display pipes of the current chip.
855 			// Some ASICs would be fused display pipes less than the default setting.
856 			// In dcnxx_resource_construct function, driver would obatin real information.
857 			for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
858 				uint32_t optc_dsc_state = 0;
859 				struct timing_generator *tg = dc->res_pool->timing_generators[i];
860 
861 				if (tg->funcs->is_tg_enabled(tg)) {
862 					if (tg->funcs->get_dsc_status)
863 						tg->funcs->get_dsc_status(tg, &optc_dsc_state);
864 					// Only one OPTC with DSC is ON, so if we got one result,
865 					// we would exit this block. non-zero value is DSC enabled
866 					if (optc_dsc_state != 0) {
867 						tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
868 						break;
869 					}
870 				}
871 			}
872 
873 			// Step 2: To power down DSC but skip DSC  of running OPTC
874 			for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
875 				struct dcn_dsc_state s  = {0};
876 
877 				dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
878 
879 				if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
880 					s.dsc_clock_en && s.dsc_fw_en)
881 					continue;
882 
883 				pg_cntl->funcs->dsc_pg_control(pg_cntl, dc->res_pool->dscs[i]->inst, false);
884 			}
885 		}
886 	}
887 }
888 
dcn35_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)889 void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
890 			       struct dc_state *context)
891 {
892 	/* enable DCFCLK current DCHUB */
893 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
894 
895 	/* initialize HUBP on power up */
896 	pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
897 
898 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
899 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
900 			pipe_ctx->stream_res.opp,
901 			true);
902 	/*to do: insert PG here*/
903 	if (dc->vm_pa_config.valid) {
904 		struct vm_system_aperture_param apt;
905 
906 		apt.sys_default.quad_part = 0;
907 
908 		apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr;
909 		apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr;
910 
911 		// Program system aperture settings
912 		pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
913 	}
914 
915 	if (!pipe_ctx->top_pipe
916 		&& pipe_ctx->plane_state
917 		&& pipe_ctx->plane_state->flip_int_enabled
918 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
919 		pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
920 }
921 
922 /* disable HW used by plane.
923  * note:  cannot disable until disconnect is complete
924  */
dcn35_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)925 void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
926 {
927 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
928 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
929 
930 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
931 
932 	/* In flip immediate with pipe splitting case GSL is used for
933 	 * synchronization so we must disable it when the plane is disabled.
934 	 */
935 	if (pipe_ctx->stream_res.gsl_group != 0)
936 		dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false);
937 /*
938 	if (hubp->funcs->hubp_update_mall_sel)
939 		hubp->funcs->hubp_update_mall_sel(hubp, 0, false);
940 */
941 	dc->hwss.set_flip_control_gsl(pipe_ctx, false);
942 
943 	hubp->funcs->hubp_clk_cntl(hubp, false);
944 
945 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
946 /*to do, need to support both case*/
947 	hubp->power_gated = true;
948 
949 	dpp->funcs->dpp_reset(dpp);
950 
951 	pipe_ctx->stream = NULL;
952 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
953 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
954 	pipe_ctx->top_pipe = NULL;
955 	pipe_ctx->bottom_pipe = NULL;
956 	pipe_ctx->plane_state = NULL;
957 }
958 
dcn35_disable_plane(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)959 void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
960 {
961 	struct dce_hwseq *hws = dc->hwseq;
962 	bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
963 	struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
964 
965 	DC_LOGGER_INIT(dc->ctx->logger);
966 
967 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
968 		return;
969 
970 	if (hws->funcs.plane_atomic_disable)
971 		hws->funcs.plane_atomic_disable(dc, pipe_ctx);
972 
973 	/* Turn back off the phantom OTG after the phantom plane is fully disabled
974 	 */
975 	if (is_phantom)
976 		if (tg && tg->funcs->disable_phantom_crtc)
977 			tg->funcs->disable_phantom_crtc(tg);
978 
979 	DC_LOG_DC("Power down front end %d\n",
980 					pipe_ctx->pipe_idx);
981 }
982 
dcn35_calc_blocks_to_gate(struct dc * dc,struct dc_state * context,struct pg_block_update * update_state)983 void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
984 	struct pg_block_update *update_state)
985 {
986 	bool hpo_frl_stream_enc_acquired = false;
987 	bool hpo_dp_stream_enc_acquired = false;
988 	int i = 0, j = 0;
989 	int edp_num = 0;
990 	struct dc_link *edp_links[MAX_NUM_EDP] = { NULL };
991 
992 	memset(update_state, 0, sizeof(struct pg_block_update));
993 
994 	for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) {
995 		if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] &&
996 				dc->res_pool->hpo_dp_stream_enc[i]) {
997 			hpo_dp_stream_enc_acquired = true;
998 			break;
999 		}
1000 	}
1001 
1002 	if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired)
1003 		update_state->pg_res_update[PG_HPO] = true;
1004 
1005 	update_state->pg_res_update[PG_DWB] = true;
1006 
1007 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1008 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1009 
1010 		for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++)
1011 			update_state->pg_pipe_res_update[j][i] = true;
1012 
1013 		if (!pipe_ctx)
1014 			continue;
1015 
1016 		if (pipe_ctx->plane_res.hubp)
1017 			update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->plane_res.hubp->inst] = false;
1018 
1019 		if (pipe_ctx->plane_res.dpp)
1020 			update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false;
1021 
1022 		if ((pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) &&
1023 			pipe_ctx->plane_res.mpcc_inst >= 0)
1024 			update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
1025 
1026 		if (pipe_ctx->stream_res.dsc)
1027 			update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false;
1028 
1029 		if (pipe_ctx->stream_res.opp)
1030 			update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
1031 	}
1032 	/*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/
1033 	for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1034 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1035 		if (tg && tg->funcs->is_tg_enabled(tg)) {
1036 			update_state->pg_pipe_res_update[PG_OPTC][i] = false;
1037 			break;
1038 		}
1039 	}
1040 
1041 	dc_get_edp_links(dc, edp_links, &edp_num);
1042 	if (edp_num == 0 ||
1043 		((!edp_links[0] || !edp_links[0]->edp_sink_present) &&
1044 			(!edp_links[1] || !edp_links[1]->edp_sink_present))) {
1045 		/*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/
1046 		update_state->pg_pipe_res_update[PG_OPTC][0] = false;
1047 	}
1048 
1049 }
1050 
dcn35_calc_blocks_to_ungate(struct dc * dc,struct dc_state * context,struct pg_block_update * update_state)1051 void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
1052 	struct pg_block_update *update_state)
1053 {
1054 	bool hpo_frl_stream_enc_acquired = false;
1055 	bool hpo_dp_stream_enc_acquired = false;
1056 	int i = 0, j = 0;
1057 
1058 	memset(update_state, 0, sizeof(struct pg_block_update));
1059 
1060 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1061 		struct pipe_ctx *cur_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1062 		struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
1063 
1064 		if (cur_pipe == NULL || new_pipe == NULL)
1065 			continue;
1066 
1067 		if ((!cur_pipe->plane_state && new_pipe->plane_state) ||
1068 			(!cur_pipe->stream && new_pipe->stream)) {
1069 			// New pipe addition
1070 			for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) {
1071 				if (j == PG_HUBP && new_pipe->plane_res.hubp)
1072 					update_state->pg_pipe_res_update[j][new_pipe->plane_res.hubp->inst] = true;
1073 
1074 				if (j == PG_DPP && new_pipe->plane_res.dpp)
1075 					update_state->pg_pipe_res_update[j][new_pipe->plane_res.dpp->inst] = true;
1076 
1077 				if (j == PG_MPCC && new_pipe->plane_res.dpp)
1078 					update_state->pg_pipe_res_update[j][new_pipe->plane_res.mpcc_inst] = true;
1079 
1080 				if (j == PG_DSC && new_pipe->stream_res.dsc)
1081 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.dsc->inst] = true;
1082 
1083 				if (j == PG_OPP && new_pipe->stream_res.opp)
1084 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.opp->inst] = true;
1085 
1086 				if (j == PG_OPTC && new_pipe->stream_res.tg)
1087 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
1088 			}
1089 		} else if (cur_pipe->plane_state == new_pipe->plane_state ||
1090 				cur_pipe == new_pipe) {
1091 			//unchanged pipes
1092 			for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) {
1093 				if (j == PG_HUBP &&
1094 					cur_pipe->plane_res.hubp != new_pipe->plane_res.hubp &&
1095 					new_pipe->plane_res.hubp)
1096 					update_state->pg_pipe_res_update[j][new_pipe->plane_res.hubp->inst] = true;
1097 
1098 				if (j == PG_DPP &&
1099 					cur_pipe->plane_res.dpp != new_pipe->plane_res.dpp &&
1100 					new_pipe->plane_res.dpp)
1101 					update_state->pg_pipe_res_update[j][new_pipe->plane_res.dpp->inst] = true;
1102 
1103 				if (j == PG_OPP &&
1104 					cur_pipe->stream_res.opp != new_pipe->stream_res.opp &&
1105 					new_pipe->stream_res.opp)
1106 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.opp->inst] = true;
1107 
1108 				if (j == PG_DSC &&
1109 					cur_pipe->stream_res.dsc != new_pipe->stream_res.dsc &&
1110 					new_pipe->stream_res.dsc)
1111 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.dsc->inst] = true;
1112 
1113 				if (j == PG_OPTC &&
1114 					cur_pipe->stream_res.tg != new_pipe->stream_res.tg &&
1115 					new_pipe->stream_res.tg)
1116 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
1117 			}
1118 		}
1119 	}
1120 
1121 	for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) {
1122 		if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] &&
1123 				dc->res_pool->hpo_dp_stream_enc[i]) {
1124 			hpo_dp_stream_enc_acquired = true;
1125 			break;
1126 		}
1127 	}
1128 
1129 	if (hpo_frl_stream_enc_acquired || hpo_dp_stream_enc_acquired)
1130 		update_state->pg_res_update[PG_HPO] = true;
1131 
1132 }
1133 
1134 /**
1135  * dcn35_hw_block_power_down() - power down sequence
1136  *
1137  * The following sequence describes the ON-OFF (ONO) for power down:
1138  *
1139  *	ONO Region 3, DCPG 25: hpo - SKIPPED
1140  *	ONO Region 4, DCPG 0: dchubp0, dpp0
1141  *	ONO Region 6, DCPG 1: dchubp1, dpp1
1142  *	ONO Region 8, DCPG 2: dchubp2, dpp2
1143  *	ONO Region 10, DCPG 3: dchubp3, dpp3
1144  *	ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
1145  *	ONO Region 5, DCPG 16: dsc0
1146  *	ONO Region 7, DCPG 17: dsc1
1147  *	ONO Region 9, DCPG 18: dsc2
1148  *	ONO Region 11, DCPG 19: dsc3
1149  *	ONO Region 2, DCPG 24: mpc opp optc dwb
1150  *	ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
1151  *
1152  * @dc: Current DC state
1153  * @update_state: update PG sequence states for HW block
1154  */
dcn35_hw_block_power_down(struct dc * dc,struct pg_block_update * update_state)1155 void dcn35_hw_block_power_down(struct dc *dc,
1156 	struct pg_block_update *update_state)
1157 {
1158 	int i = 0;
1159 	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1160 
1161 	if (!pg_cntl)
1162 		return;
1163 	if (dc->debug.ignore_pg)
1164 		return;
1165 
1166 	if (update_state->pg_res_update[PG_HPO]) {
1167 		if (pg_cntl->funcs->hpo_pg_control)
1168 			pg_cntl->funcs->hpo_pg_control(pg_cntl, false);
1169 	}
1170 
1171 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1172 		if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1173 			update_state->pg_pipe_res_update[PG_DPP][i]) {
1174 			if (pg_cntl->funcs->hubp_dpp_pg_control)
1175 				pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
1176 		}
1177 	}
1178 	for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
1179 		if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1180 			if (pg_cntl->funcs->dsc_pg_control)
1181 				pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
1182 		}
1183 
1184 
1185 	/*this will need all the clients to unregister optc interruts let dmubfw handle this*/
1186 	if (pg_cntl->funcs->plane_otg_pg_control)
1187 		pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false);
1188 
1189 	//domain22, 23, 25 currently always on.
1190 
1191 }
1192 
1193 /**
1194  * dcn35_hw_block_power_up() - power up sequence
1195  *
1196  * The following sequence describes the ON-OFF (ONO) for power up:
1197  *
1198  *	ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
1199  *	ONO Region 2, DCPG 24: mpc opp optc dwb
1200  *	ONO Region 5, DCPG 16: dsc0
1201  *	ONO Region 7, DCPG 17: dsc1
1202  *	ONO Region 9, DCPG 18: dsc2
1203  *	ONO Region 11, DCPG 19: dsc3
1204  *	ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
1205  *	ONO Region 4, DCPG 0: dchubp0, dpp0
1206  *	ONO Region 6, DCPG 1: dchubp1, dpp1
1207  *	ONO Region 8, DCPG 2: dchubp2, dpp2
1208  *	ONO Region 10, DCPG 3: dchubp3, dpp3
1209  *	ONO Region 3, DCPG 25: hpo - SKIPPED
1210  *
1211  * @dc: Current DC state
1212  * @update_state: update PG sequence states for HW block
1213  */
dcn35_hw_block_power_up(struct dc * dc,struct pg_block_update * update_state)1214 void dcn35_hw_block_power_up(struct dc *dc,
1215 	struct pg_block_update *update_state)
1216 {
1217 	int i = 0;
1218 	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1219 
1220 	if (!pg_cntl)
1221 		return;
1222 	if (dc->debug.ignore_pg)
1223 		return;
1224 	//domain22, 23, 25 currently always on.
1225 	/*this will need all the clients to unregister optc interruts let dmubfw handle this*/
1226 	if (pg_cntl->funcs->plane_otg_pg_control)
1227 		pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
1228 
1229 	for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
1230 		if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1231 			if (pg_cntl->funcs->dsc_pg_control)
1232 				pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
1233 		}
1234 
1235 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1236 		if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1237 			update_state->pg_pipe_res_update[PG_DPP][i]) {
1238 			if (pg_cntl->funcs->hubp_dpp_pg_control)
1239 				pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
1240 		}
1241 	}
1242 	if (update_state->pg_res_update[PG_HPO]) {
1243 		if (pg_cntl->funcs->hpo_pg_control)
1244 			pg_cntl->funcs->hpo_pg_control(pg_cntl, true);
1245 	}
1246 }
dcn35_root_clock_control(struct dc * dc,struct pg_block_update * update_state,bool power_on)1247 void dcn35_root_clock_control(struct dc *dc,
1248 	struct pg_block_update *update_state, bool power_on)
1249 {
1250 	int i = 0;
1251 	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1252 
1253 	if (!pg_cntl)
1254 		return;
1255 	/*enable root clock first when power up*/
1256 	if (power_on)
1257 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1258 			if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1259 				update_state->pg_pipe_res_update[PG_DPP][i]) {
1260 				if (dc->hwseq->funcs.dpp_root_clock_control)
1261 					dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
1262 			}
1263 		}
1264 	for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1265 		if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1266 			if (power_on) {
1267 				if (dc->res_pool->dccg->funcs->enable_dsc)
1268 					dc->res_pool->dccg->funcs->enable_dsc(dc->res_pool->dccg, i);
1269 			} else {
1270 				if (dc->res_pool->dccg->funcs->disable_dsc)
1271 					dc->res_pool->dccg->funcs->disable_dsc(dc->res_pool->dccg, i);
1272 			}
1273 		}
1274 	}
1275 	/*disable root clock first when power down*/
1276 	if (!power_on)
1277 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1278 			if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1279 				update_state->pg_pipe_res_update[PG_DPP][i]) {
1280 				if (dc->hwseq->funcs.dpp_root_clock_control)
1281 					dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
1282 			}
1283 		}
1284 }
1285 
dcn35_prepare_bandwidth(struct dc * dc,struct dc_state * context)1286 void dcn35_prepare_bandwidth(
1287 		struct dc *dc,
1288 		struct dc_state *context)
1289 {
1290 	struct pg_block_update pg_update_state;
1291 
1292 	if (dc->hwss.calc_blocks_to_ungate) {
1293 		dc->hwss.calc_blocks_to_ungate(dc, context, &pg_update_state);
1294 
1295 		if (dc->hwss.root_clock_control)
1296 			dc->hwss.root_clock_control(dc, &pg_update_state, true);
1297 		/*power up required HW block*/
1298 		if (dc->hwss.hw_block_power_up)
1299 			dc->hwss.hw_block_power_up(dc, &pg_update_state);
1300 	}
1301 
1302 	dcn20_prepare_bandwidth(dc, context);
1303 }
1304 
dcn35_optimize_bandwidth(struct dc * dc,struct dc_state * context)1305 void dcn35_optimize_bandwidth(
1306 		struct dc *dc,
1307 		struct dc_state *context)
1308 {
1309 	struct pg_block_update pg_update_state;
1310 
1311 	dcn20_optimize_bandwidth(dc, context);
1312 
1313 	if (dc->hwss.calc_blocks_to_gate) {
1314 		dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state);
1315 		/*try to power down unused block*/
1316 		if (dc->hwss.hw_block_power_down)
1317 			dc->hwss.hw_block_power_down(dc, &pg_update_state);
1318 
1319 		if (dc->hwss.root_clock_control)
1320 			dc->hwss.root_clock_control(dc, &pg_update_state, false);
1321 	}
1322 }
1323 
dcn35_set_idle_state(const struct dc * dc,bool allow_idle)1324 void dcn35_set_idle_state(const struct dc *dc, bool allow_idle)
1325 {
1326 	// TODO: Find a more suitable communcation
1327 	if (dc->clk_mgr->funcs->set_idle_state)
1328 		dc->clk_mgr->funcs->set_idle_state(dc->clk_mgr, allow_idle);
1329 }
1330 
dcn35_get_idle_state(const struct dc * dc)1331 uint32_t dcn35_get_idle_state(const struct dc *dc)
1332 {
1333 	// TODO: Find a more suitable communcation
1334 	if (dc->clk_mgr->funcs->get_idle_state)
1335 		return dc->clk_mgr->funcs->get_idle_state(dc->clk_mgr);
1336 
1337 	return 0;
1338 }
1339 
dcn35_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)1340 void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
1341 		int num_pipes, struct dc_crtc_timing_adjust adjust)
1342 {
1343 	int i = 0;
1344 	struct drr_params params = {0};
1345 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
1346 	unsigned int event_triggers = 0x800;
1347 	// Note DRR trigger events are generated regardless of whether num frames met.
1348 	unsigned int num_frames = 2;
1349 
1350 	params.vertical_total_max = adjust.v_total_max;
1351 	params.vertical_total_min = adjust.v_total_min;
1352 	params.vertical_total_mid = adjust.v_total_mid;
1353 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
1354 
1355 	for (i = 0; i < num_pipes; i++) {
1356 		if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
1357 			struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
1358 			struct dc *dc = pipe_ctx[i]->stream->ctx->dc;
1359 
1360 			if (dc->debug.static_screen_wait_frames) {
1361 				unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total);
1362 
1363 				if (frame_rate >= 120 && dc->caps.ips_support &&
1364 					dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
1365 					/*ips enable case*/
1366 					num_frames = 2 * (frame_rate % 60);
1367 				}
1368 			}
1369 			if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
1370 				pipe_ctx[i]->stream_res.tg->funcs->set_drr(
1371 					pipe_ctx[i]->stream_res.tg, &params);
1372 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
1373 				if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
1374 					pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
1375 						pipe_ctx[i]->stream_res.tg,
1376 						event_triggers, num_frames);
1377 		}
1378 	}
1379 }
1380