1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright 2023 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: AMD 24 * 25 */ 26 27 #include "dm_services.h" 28 #include "dm_helpers.h" 29 #include "core_types.h" 30 #include "resource.h" 31 #include "dccg.h" 32 #include "dce/dce_hwseq.h" 33 #include "clk_mgr.h" 34 #include "reg_helper.h" 35 #include "abm.h" 36 #include "hubp.h" 37 #include "dchubbub.h" 38 #include "timing_generator.h" 39 #include "opp.h" 40 #include "ipp.h" 41 #include "mpc.h" 42 #include "mcif_wb.h" 43 #include "dc_dmub_srv.h" 44 #include "dcn35_hwseq.h" 45 #include "dcn35/dcn35_dccg.h" 46 #include "link_hwss.h" 47 #include "dpcd_defs.h" 48 #include "dce/dmub_outbox.h" 49 #include "link.h" 50 #include "dcn10/dcn10_hwseq.h" 51 #include "inc/link_enc_cfg.h" 52 #include "dcn30/dcn30_vpg.h" 53 #include "dce/dce_i2c_hw.h" 54 #include "dsc.h" 55 #include "dcn20/dcn20_optc.h" 56 #include "dcn30/dcn30_cm_common.h" 57 #include "dcn31/dcn31_hwseq.h" 58 #include "dcn20/dcn20_hwseq.h" 59 #include "dc_state_priv.h" 60 61 #define DC_LOGGER_INIT(logger) \ 62 struct dal_logger *dc_logger = logger 63 64 #define CTX \ 65 hws->ctx 66 #define REG(reg)\ 67 hws->regs->reg 68 #define DC_LOGGER \ 69 dc_logger 70 71 72 #undef FN 73 #define FN(reg_name, field_name) \ 74 hws->shifts->field_name, hws->masks->field_name 75 #if 0 76 static void enable_memory_low_power(struct dc *dc) 77 { 78 struct dce_hwseq *hws = dc->hwseq; 79 int i; 80 81 if (dc->debug.enable_mem_low_power.bits.dmcu) { 82 // Force ERAM to shutdown if DMCU is not enabled 83 if (dc->debug.disable_dmcu || dc->config.disable_dmcu) { 84 REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3); 85 } 86 } 87 /*dcn35 has default MEM_PWR enabled, make sure wake them up*/ 88 // Set default OPTC memory power states 89 if (dc->debug.enable_mem_low_power.bits.optc) { 90 // Shutdown when unassigned and light sleep in VBLANK 91 REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); 92 } 93 94 if (dc->debug.enable_mem_low_power.bits.vga) { 95 // Power down VGA memory 96 REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); 97 } 98 99 if (dc->debug.enable_mem_low_power.bits.mpc && 100 dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode) 101 dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc); 102 103 if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) { 104 // Power down VPGs 105 for (i = 0; i < dc->res_pool->stream_enc_count; i++) 106 dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); 107 #if defined(CONFIG_DRM_AMD_DC_DP2_0) 108 for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) 109 dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); 110 #endif 111 } 112 113 } 114 #endif 115 116 void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable) 117 { 118 REG_UPDATE_3(DMU_CLK_CNTL, 119 RBBMIF_FGCG_REP_DIS, !enable, 120 IHC_FGCG_REP_DIS, !enable, 121 LONO_FGCG_REP_DIS, !enable 122 ); 123 } 124 125 void dcn35_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable) 126 { 127 REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable); 128 } 129 130 void dcn35_init_hw(struct dc *dc) 131 { 132 struct abm **abms = dc->res_pool->multiple_abms; 133 struct dce_hwseq *hws = dc->hwseq; 134 struct dc_bios *dcb = dc->ctx->dc_bios; 135 struct resource_pool *res_pool = dc->res_pool; 136 uint32_t backlight = MAX_BACKLIGHT_LEVEL; 137 uint32_t user_level = MAX_BACKLIGHT_LEVEL; 138 int i; 139 140 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) 141 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); 142 143 //dcn35_set_dmu_fgcg(hws, dc->debug.enable_fine_grain_clock_gating.bits.dmu); 144 145 if (!dcb->funcs->is_accelerated_mode(dcb)) { 146 /*this calls into dmubfw to do the init*/ 147 hws->funcs.bios_golden_init(dc); 148 } 149 150 // Initialize the dccg 151 if (res_pool->dccg->funcs->dccg_init) 152 res_pool->dccg->funcs->dccg_init(res_pool->dccg); 153 154 //enable_memory_low_power(dc); 155 156 if (dc->ctx->dc_bios->fw_info_valid) { 157 res_pool->ref_clocks.xtalin_clock_inKhz = 158 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; 159 160 if (res_pool->hubbub) { 161 162 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, 163 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, 164 &res_pool->ref_clocks.dccg_ref_clock_inKhz); 165 166 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, 167 res_pool->ref_clocks.dccg_ref_clock_inKhz, 168 &res_pool->ref_clocks.dchub_ref_clock_inKhz); 169 } else { 170 // Not all ASICs have DCCG sw component 171 res_pool->ref_clocks.dccg_ref_clock_inKhz = 172 res_pool->ref_clocks.xtalin_clock_inKhz; 173 res_pool->ref_clocks.dchub_ref_clock_inKhz = 174 res_pool->ref_clocks.xtalin_clock_inKhz; 175 } 176 } else 177 ASSERT_CRITICAL(false); 178 179 for (i = 0; i < dc->link_count; i++) { 180 /* Power up AND update implementation according to the 181 * required signal (which may be different from the 182 * default signal on connector). 183 */ 184 struct dc_link *link = dc->links[i]; 185 186 if (link->ep_type != DISPLAY_ENDPOINT_PHY) 187 continue; 188 189 link->link_enc->funcs->hw_init(link->link_enc); 190 191 /* Check for enabled DIG to identify enabled display */ 192 if (link->link_enc->funcs->is_dig_enabled && 193 link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 194 link->link_status.link_active = true; 195 if (link->link_enc->funcs->fec_is_active && 196 link->link_enc->funcs->fec_is_active(link->link_enc)) 197 link->fec_state = dc_link_fec_enabled; 198 } 199 } 200 201 /* we want to turn off all dp displays before doing detection */ 202 dc->link_srv->blank_all_dp_displays(dc); 203 /* 204 if (hws->funcs.enable_power_gating_plane) 205 hws->funcs.enable_power_gating_plane(dc->hwseq, true); 206 */ 207 if (res_pool->hubbub && res_pool->hubbub->funcs->dchubbub_init) 208 res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub); 209 /* If taking control over from VBIOS, we may want to optimize our first 210 * mode set, so we need to skip powering down pipes until we know which 211 * pipes we want to use. 212 * Otherwise, if taking control is not possible, we need to power 213 * everything down. 214 */ 215 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { 216 217 // we want to turn off edp displays if odm is enabled and no seamless boot 218 if (!dc->caps.seamless_odm) { 219 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 220 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 221 uint32_t num_opps, opp_id_src0, opp_id_src1; 222 223 num_opps = 1; 224 if (tg) { 225 if (tg->funcs->is_tg_enabled(tg) && tg->funcs->get_optc_source) { 226 tg->funcs->get_optc_source(tg, &num_opps, 227 &opp_id_src0, &opp_id_src1); 228 } 229 } 230 231 if (num_opps > 1) { 232 dc->link_srv->blank_all_edp_displays(dc); 233 break; 234 } 235 } 236 } 237 238 hws->funcs.init_pipes(dc, dc->current_state); 239 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control && 240 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter) 241 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, 242 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); 243 } 244 if (res_pool->dccg->funcs->dccg_root_gate_disable_control) { 245 for (i = 0; i < res_pool->pipe_count; i++) 246 res_pool->dccg->funcs->dccg_root_gate_disable_control(res_pool->dccg, i, 0); 247 } 248 249 for (i = 0; i < res_pool->audio_count; i++) { 250 struct audio *audio = res_pool->audios[i]; 251 252 audio->funcs->hw_init(audio); 253 } 254 255 for (i = 0; i < dc->link_count; i++) { 256 struct dc_link *link = dc->links[i]; 257 258 if (link->panel_cntl) { 259 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); 260 user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; 261 } 262 } 263 if (dc->ctx->dmub_srv) { 264 for (i = 0; i < dc->res_pool->pipe_count; i++) { 265 if (abms[i] != NULL && abms[i]->funcs != NULL) 266 abms[i]->funcs->abm_init(abms[i], backlight, user_level); 267 } 268 } 269 270 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ 271 REG_WRITE(DIO_MEM_PWR_CTRL, 0); 272 273 // Set i2c to light sleep until engine is setup 274 if (dc->debug.enable_mem_low_power.bits.i2c) 275 REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 0); 276 277 if (hws->funcs.setup_hpo_hw_control) 278 hws->funcs.setup_hpo_hw_control(hws, false); 279 280 if (!dc->debug.disable_clock_gate) { 281 /* enable all DCN clock gating */ 282 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 283 } 284 285 if (dc->debug.disable_mem_low_power) { 286 REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 1); 287 } 288 if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) 289 dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); 290 291 if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges) 292 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); 293 294 if (dc->clk_mgr && dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) 295 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 296 297 298 299 if (dc->res_pool->hubbub->funcs->force_pstate_change_control) 300 dc->res_pool->hubbub->funcs->force_pstate_change_control( 301 dc->res_pool->hubbub, false, false); 302 303 if (dc->res_pool->hubbub->funcs->init_crb) 304 dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); 305 306 if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0) 307 dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc); 308 // Get DMCUB capabilities 309 if (dc->ctx->dmub_srv) { 310 dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); 311 dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; 312 dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; 313 dc->caps.dmub_caps.aux_backlight_support = dc->ctx->dmub_srv->dmub->feature_caps.abm_aux_backlight_support; 314 } 315 316 if (dc->res_pool->pg_cntl) { 317 if (dc->res_pool->pg_cntl->funcs->init_pg_status) 318 dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl); 319 } 320 } 321 322 static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) 323 { 324 struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; 325 struct dc_stream_state *stream = pipe_ctx->stream; 326 struct pipe_ctx *odm_pipe; 327 int opp_cnt = 1; 328 329 DC_LOGGER_INIT(stream->ctx->logger); 330 331 ASSERT(dsc); 332 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 333 opp_cnt++; 334 335 if (enable) { 336 struct dsc_config dsc_cfg; 337 struct dsc_optc_config dsc_optc_cfg = {0}; 338 enum optc_dsc_mode optc_dsc_mode; 339 struct dcn_dsc_state dsc_state = {0}; 340 341 if (!dsc) { 342 DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst); 343 return; 344 } 345 346 if (dsc->funcs->dsc_read_state) { 347 dsc->funcs->dsc_read_state(dsc, &dsc_state); 348 if (!dsc_state.dsc_fw_en) { 349 DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst); 350 return; 351 } 352 } 353 /* Enable DSC hw block */ 354 dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; 355 dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; 356 dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; 357 dsc_cfg.color_depth = stream->timing.display_color_depth; 358 dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; 359 dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; 360 ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); 361 dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; 362 363 dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); 364 dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); 365 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 366 struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; 367 368 ASSERT(odm_dsc); 369 odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); 370 odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); 371 } 372 dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; 373 dsc_cfg.pic_width *= opp_cnt; 374 375 optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; 376 377 /* Enable DSC in OPTC */ 378 DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst); 379 pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg, 380 optc_dsc_mode, 381 dsc_optc_cfg.bytes_per_pixel, 382 dsc_optc_cfg.slice_width); 383 } else { 384 /* disable DSC in OPTC */ 385 pipe_ctx->stream_res.tg->funcs->set_dsc_config( 386 pipe_ctx->stream_res.tg, 387 OPTC_DSC_DISABLED, 0, 0); 388 389 /* disable DSC block */ 390 dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); 391 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 392 ASSERT(odm_pipe->stream_res.dsc); 393 odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); 394 } 395 } 396 } 397 398 // Given any pipe_ctx, return the total ODM combine factor, and optionally return 399 // the OPPids which are used 400 static unsigned int get_odm_config(struct pipe_ctx *pipe_ctx, unsigned int *opp_instances) 401 { 402 unsigned int opp_count = 1; 403 struct pipe_ctx *odm_pipe; 404 405 // First get to the top pipe 406 for (odm_pipe = pipe_ctx; odm_pipe->prev_odm_pipe; odm_pipe = odm_pipe->prev_odm_pipe) 407 ; 408 409 // First pipe is always used 410 if (opp_instances) 411 opp_instances[0] = odm_pipe->stream_res.opp->inst; 412 413 // Find and count odm pipes, if any 414 for (odm_pipe = odm_pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 415 if (opp_instances) 416 opp_instances[opp_count] = odm_pipe->stream_res.opp->inst; 417 opp_count++; 418 } 419 420 return opp_count; 421 } 422 423 void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 424 { 425 struct pipe_ctx *odm_pipe; 426 int opp_cnt = 0; 427 int opp_inst[MAX_PIPES] = {0}; 428 int odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false); 429 int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true); 430 struct mpc *mpc = dc->res_pool->mpc; 431 int i; 432 433 opp_cnt = get_odm_config(pipe_ctx, opp_inst); 434 435 if (opp_cnt > 1) 436 pipe_ctx->stream_res.tg->funcs->set_odm_combine( 437 pipe_ctx->stream_res.tg, 438 opp_inst, opp_cnt, 439 odm_slice_width, last_odm_slice_width); 440 else 441 pipe_ctx->stream_res.tg->funcs->set_odm_bypass( 442 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 443 444 if (mpc->funcs->set_out_rate_control) { 445 for (i = 0; i < opp_cnt; ++i) { 446 mpc->funcs->set_out_rate_control( 447 mpc, opp_inst[i], 448 false, 449 0, 450 NULL); 451 } 452 } 453 454 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 455 odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( 456 odm_pipe->stream_res.opp, 457 true); 458 } 459 460 if (pipe_ctx->stream_res.dsc) { 461 struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; 462 463 update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC); 464 465 /* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */ 466 if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe && 467 current_pipe_ctx->next_odm_pipe->stream_res.dsc) { 468 struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc; 469 /* disconnect DSC block from stream */ 470 dsc->funcs->dsc_disconnect(dsc); 471 } 472 } 473 } 474 475 void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on) 476 { 477 if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp) 478 return; 479 480 if (hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control) { 481 hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control( 482 hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on); 483 } 484 } 485 486 void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on) 487 { 488 if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpstream) 489 return; 490 491 if (hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating) { 492 hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating( 493 hws->ctx->dc->res_pool->dccg, dp_hpo_inst, clock_on); 494 } 495 } 496 497 void dcn35_physymclk_root_clock_control(struct dce_hwseq *hws, unsigned int phy_inst, bool clock_on) 498 { 499 if (!hws->ctx->dc->debug.root_clock_optimization.bits.physymclk) 500 return; 501 502 if (hws->ctx->dc->res_pool->dccg->funcs->set_physymclk_root_clock_gating) { 503 hws->ctx->dc->res_pool->dccg->funcs->set_physymclk_root_clock_gating( 504 hws->ctx->dc->res_pool->dccg, phy_inst, clock_on); 505 } 506 } 507 508 void dcn35_dsc_pg_control( 509 struct dce_hwseq *hws, 510 unsigned int dsc_inst, 511 bool power_on) 512 { 513 uint32_t power_gate = power_on ? 0 : 1; 514 uint32_t pwr_status = power_on ? 0 : 2; 515 uint32_t org_ip_request_cntl = 0; 516 517 if (hws->ctx->dc->debug.disable_dsc_power_gate) 518 return; 519 if (hws->ctx->dc->debug.ignore_pg) 520 return; 521 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); 522 if (org_ip_request_cntl == 0) 523 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); 524 525 switch (dsc_inst) { 526 case 0: /* DSC0 */ 527 REG_UPDATE(DOMAIN16_PG_CONFIG, 528 DOMAIN_POWER_GATE, power_gate); 529 530 REG_WAIT(DOMAIN16_PG_STATUS, 531 DOMAIN_PGFSM_PWR_STATUS, pwr_status, 532 1, 1000); 533 break; 534 case 1: /* DSC1 */ 535 REG_UPDATE(DOMAIN17_PG_CONFIG, 536 DOMAIN_POWER_GATE, power_gate); 537 538 REG_WAIT(DOMAIN17_PG_STATUS, 539 DOMAIN_PGFSM_PWR_STATUS, pwr_status, 540 1, 1000); 541 break; 542 case 2: /* DSC2 */ 543 REG_UPDATE(DOMAIN18_PG_CONFIG, 544 DOMAIN_POWER_GATE, power_gate); 545 546 REG_WAIT(DOMAIN18_PG_STATUS, 547 DOMAIN_PGFSM_PWR_STATUS, pwr_status, 548 1, 1000); 549 break; 550 case 3: /* DSC3 */ 551 REG_UPDATE(DOMAIN19_PG_CONFIG, 552 DOMAIN_POWER_GATE, power_gate); 553 554 REG_WAIT(DOMAIN19_PG_STATUS, 555 DOMAIN_PGFSM_PWR_STATUS, pwr_status, 556 1, 1000); 557 break; 558 default: 559 BREAK_TO_DEBUGGER(); 560 break; 561 } 562 563 if (org_ip_request_cntl == 0) 564 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); 565 } 566 567 void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable) 568 { 569 bool force_on = true; /* disable power gating */ 570 uint32_t org_ip_request_cntl = 0; 571 572 if (hws->ctx->dc->debug.disable_hubp_power_gate) 573 return; 574 if (hws->ctx->dc->debug.ignore_pg) 575 return; 576 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); 577 if (org_ip_request_cntl == 0) 578 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); 579 /* DCHUBP0/1/2/3/4/5 */ 580 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); 581 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); 582 /* DPP0/1/2/3/4/5 */ 583 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); 584 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); 585 586 force_on = true; /* disable power gating */ 587 if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate) 588 force_on = false; 589 590 /* DCS0/1/2/3/4 */ 591 REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); 592 REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); 593 REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); 594 REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); 595 596 597 } 598 599 /* In headless boot cases, DIG may be turned 600 * on which causes HW/SW discrepancies. 601 * To avoid this, power down hardware on boot 602 * if DIG is turned on 603 */ 604 void dcn35_power_down_on_boot(struct dc *dc) 605 { 606 struct dc_link *edp_links[MAX_NUM_EDP]; 607 struct dc_link *edp_link = NULL; 608 int edp_num; 609 int i = 0; 610 611 dc_get_edp_links(dc, edp_links, &edp_num); 612 if (edp_num) 613 edp_link = edp_links[0]; 614 615 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled && 616 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && 617 dc->hwseq->funcs.edp_backlight_control && 618 dc->hwseq->funcs.power_down && 619 dc->hwss.edp_power_control) { 620 dc->hwseq->funcs.edp_backlight_control(edp_link, false); 621 dc->hwseq->funcs.power_down(dc); 622 dc->hwss.edp_power_control(edp_link, false); 623 } else { 624 for (i = 0; i < dc->link_count; i++) { 625 struct dc_link *link = dc->links[i]; 626 627 if (link->link_enc && link->link_enc->funcs->is_dig_enabled && 628 link->link_enc->funcs->is_dig_enabled(link->link_enc) && 629 dc->hwseq->funcs.power_down) { 630 dc->hwseq->funcs.power_down(dc); 631 break; 632 } 633 634 } 635 } 636 637 /* 638 * Call update_clocks with empty context 639 * to send DISPLAY_OFF 640 * Otherwise DISPLAY_OFF may not be asserted 641 */ 642 if (dc->clk_mgr->funcs->set_low_power_state) 643 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr); 644 645 if (dc->clk_mgr->clks.pwr_state == DCN_PWR_STATE_LOW_POWER) 646 dc_allow_idle_optimizations(dc, true); 647 } 648 649 bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable) 650 { 651 if (dc->debug.dmcub_emulation) 652 return true; 653 654 if (enable) { 655 uint32_t num_active_edp = 0; 656 int i; 657 658 for (i = 0; i < dc->current_state->stream_count; ++i) { 659 struct dc_stream_state *stream = dc->current_state->streams[i]; 660 struct dc_link *link = stream->link; 661 bool is_psr = link && !link->panel_config.psr.disable_psr && 662 (link->psr_settings.psr_version == DC_PSR_VERSION_1 || 663 link->psr_settings.psr_version == DC_PSR_VERSION_SU_1); 664 bool is_replay = link && link->replay_settings.replay_feature_enabled; 665 666 /* Ignore streams that disabled. */ 667 if (stream->dpms_off) 668 continue; 669 670 /* Active external displays block idle optimizations. */ 671 if (!dc_is_embedded_signal(stream->signal)) 672 return false; 673 674 /* If not PWRSEQ0 can't enter idle optimizations */ 675 if (link && link->link_index != 0) 676 return false; 677 678 /* Check for panel power features required for idle optimizations. */ 679 if (!is_psr && !is_replay) 680 return false; 681 682 num_active_edp += 1; 683 } 684 685 /* If more than one active eDP then disallow. */ 686 if (num_active_edp > 1) 687 return false; 688 } 689 690 // TODO: review other cases when idle optimization is allowed 691 dc_dmub_srv_apply_idle_power_optimizations(dc, enable); 692 693 return true; 694 } 695 696 void dcn35_z10_restore(const struct dc *dc) 697 { 698 if (dc->debug.disable_z10) 699 return; 700 701 dc_dmub_srv_apply_idle_power_optimizations(dc, false); 702 703 dcn31_z10_restore(dc); 704 } 705 706 void dcn35_init_pipes(struct dc *dc, struct dc_state *context) 707 { 708 int i; 709 struct dce_hwseq *hws = dc->hwseq; 710 struct hubbub *hubbub = dc->res_pool->hubbub; 711 struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; 712 bool can_apply_seamless_boot = false; 713 bool tg_enabled[MAX_PIPES] = {false}; 714 715 for (i = 0; i < context->stream_count; i++) { 716 if (context->streams[i]->apply_seamless_boot_optimization) { 717 can_apply_seamless_boot = true; 718 break; 719 } 720 } 721 722 for (i = 0; i < dc->res_pool->pipe_count; i++) { 723 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 724 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 725 726 /* There is assumption that pipe_ctx is not mapping irregularly 727 * to non-preferred front end. If pipe_ctx->stream is not NULL, 728 * we will use the pipe, so don't disable 729 */ 730 if (pipe_ctx->stream != NULL && can_apply_seamless_boot) 731 continue; 732 733 /* Blank controller using driver code instead of 734 * command table. 735 */ 736 if (tg->funcs->is_tg_enabled(tg)) { 737 if (hws->funcs.init_blank != NULL) { 738 hws->funcs.init_blank(dc, tg); 739 tg->funcs->lock(tg); 740 } else { 741 tg->funcs->lock(tg); 742 tg->funcs->set_blank(tg, true); 743 hwss_wait_for_blank_complete(tg); 744 } 745 } 746 } 747 748 /* Reset det size */ 749 for (i = 0; i < dc->res_pool->pipe_count; i++) { 750 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 751 struct hubp *hubp = dc->res_pool->hubps[i]; 752 753 /* Do not need to reset for seamless boot */ 754 if (pipe_ctx->stream != NULL && can_apply_seamless_boot) 755 continue; 756 757 if (hubbub && hubp) { 758 if (hubbub->funcs->program_det_size) 759 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0); 760 if (hubbub->funcs->program_det_segments) 761 hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0); 762 } 763 } 764 765 /* num_opp will be equal to number of mpcc */ 766 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { 767 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 768 769 /* Cannot reset the MPC mux if seamless boot */ 770 if (pipe_ctx->stream != NULL && can_apply_seamless_boot) 771 continue; 772 773 dc->res_pool->mpc->funcs->mpc_init_single_inst( 774 dc->res_pool->mpc, i); 775 } 776 777 for (i = 0; i < dc->res_pool->pipe_count; i++) { 778 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 779 struct hubp *hubp = dc->res_pool->hubps[i]; 780 struct dpp *dpp = dc->res_pool->dpps[i]; 781 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 782 783 /* There is assumption that pipe_ctx is not mapping irregularly 784 * to non-preferred front end. If pipe_ctx->stream is not NULL, 785 * we will use the pipe, so don't disable 786 */ 787 if (can_apply_seamless_boot && 788 pipe_ctx->stream != NULL && 789 pipe_ctx->stream_res.tg->funcs->is_tg_enabled( 790 pipe_ctx->stream_res.tg)) { 791 // Enable double buffering for OTG_BLANK no matter if 792 // seamless boot is enabled or not to suppress global sync 793 // signals when OTG blanked. This is to prevent pipe from 794 // requesting data while in PSR. 795 tg->funcs->tg_init(tg); 796 hubp->power_gated = true; 797 tg_enabled[i] = true; 798 continue; 799 } 800 801 /* Disable on the current state so the new one isn't cleared. */ 802 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 803 804 hubp->funcs->hubp_reset(hubp); 805 dpp->funcs->dpp_reset(dpp); 806 807 pipe_ctx->stream_res.tg = tg; 808 pipe_ctx->pipe_idx = i; 809 810 pipe_ctx->plane_res.hubp = hubp; 811 pipe_ctx->plane_res.dpp = dpp; 812 pipe_ctx->plane_res.mpcc_inst = dpp->inst; 813 hubp->mpcc_id = dpp->inst; 814 hubp->opp_id = OPP_ID_INVALID; 815 hubp->power_gated = false; 816 817 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; 818 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; 819 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; 820 pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; 821 822 hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); 823 824 if (tg->funcs->is_tg_enabled(tg)) 825 tg->funcs->unlock(tg); 826 827 dc->hwss.disable_plane(dc, context, pipe_ctx); 828 829 pipe_ctx->stream_res.tg = NULL; 830 pipe_ctx->plane_res.hubp = NULL; 831 832 if (tg->funcs->is_tg_enabled(tg)) { 833 if (tg->funcs->init_odm) 834 tg->funcs->init_odm(tg); 835 } 836 837 tg->funcs->tg_init(tg); 838 } 839 840 /* Clean up MPC tree */ 841 for (i = 0; i < dc->res_pool->pipe_count; i++) { 842 if (tg_enabled[i]) { 843 if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) { 844 if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) { 845 int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id; 846 847 if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id])) 848 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; 849 } 850 } 851 } 852 } 853 854 if (pg_cntl != NULL) { 855 if (pg_cntl->funcs->dsc_pg_control != NULL) { 856 uint32_t num_opps = 0; 857 uint32_t opp_id_src0 = OPP_ID_INVALID; 858 uint32_t opp_id_src1 = OPP_ID_INVALID; 859 uint32_t optc_dsc_state = 0; 860 861 // Step 1: To find out which OPTC is running & OPTC DSC is ON 862 // We can't use res_pool->res_cap->num_timing_generator to check 863 // Because it records display pipes default setting built in driver, 864 // not display pipes of the current chip. 865 // Some ASICs would be fused display pipes less than the default setting. 866 // In dcnxx_resource_construct function, driver would obatin real information. 867 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 868 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 869 870 if (tg->funcs->is_tg_enabled(tg)) { 871 if (tg->funcs->get_dsc_status) 872 tg->funcs->get_dsc_status(tg, &optc_dsc_state); 873 // Only one OPTC with DSC is ON, so if we got one result, 874 // we would exit this block. non-zero value is DSC enabled 875 if (optc_dsc_state != 0) { 876 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); 877 break; 878 } 879 } 880 } 881 882 // Step 2: To power down DSC but skip DSC of running OPTC 883 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { 884 struct dcn_dsc_state s = {0}; 885 886 /* avoid reading DSC state when it is not in use as it may be power gated */ 887 if (optc_dsc_state) { 888 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s); 889 890 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) && 891 s.dsc_clock_en && s.dsc_fw_en) 892 continue; 893 } 894 895 pg_cntl->funcs->dsc_pg_control(pg_cntl, dc->res_pool->dscs[i]->inst, false); 896 } 897 } 898 } 899 } 900 901 void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, 902 struct dc_state *context) 903 { 904 /* enable DCFCLK current DCHUB */ 905 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true); 906 907 /* initialize HUBP on power up */ 908 pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp); 909 910 /* make sure OPP_PIPE_CLOCK_EN = 1 */ 911 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( 912 pipe_ctx->stream_res.opp, 913 true); 914 /*to do: insert PG here*/ 915 if (dc->vm_pa_config.valid) { 916 struct vm_system_aperture_param apt; 917 918 apt.sys_default.quad_part = 0; 919 920 apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr; 921 apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr; 922 923 // Program system aperture settings 924 pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); 925 } 926 927 if (!pipe_ctx->top_pipe 928 && pipe_ctx->plane_state 929 && pipe_ctx->plane_state->flip_int_enabled 930 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) 931 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp); 932 } 933 934 /* disable HW used by plane. 935 * note: cannot disable until disconnect is complete 936 */ 937 void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) 938 { 939 struct hubp *hubp = pipe_ctx->plane_res.hubp; 940 struct dpp *dpp = pipe_ctx->plane_res.dpp; 941 942 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx); 943 944 /* In flip immediate with pipe splitting case GSL is used for 945 * synchronization so we must disable it when the plane is disabled. 946 */ 947 if (pipe_ctx->stream_res.gsl_group != 0) 948 dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false); 949 /* 950 if (hubp->funcs->hubp_update_mall_sel) 951 hubp->funcs->hubp_update_mall_sel(hubp, 0, false); 952 */ 953 dc->hwss.set_flip_control_gsl(pipe_ctx, false); 954 955 hubp->funcs->hubp_clk_cntl(hubp, false); 956 957 dpp->funcs->dpp_dppclk_control(dpp, false, false); 958 /*to do, need to support both case*/ 959 hubp->power_gated = true; 960 961 hubp->funcs->hubp_reset(hubp); 962 dpp->funcs->dpp_reset(dpp); 963 964 pipe_ctx->stream = NULL; 965 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res)); 966 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res)); 967 pipe_ctx->top_pipe = NULL; 968 pipe_ctx->bottom_pipe = NULL; 969 pipe_ctx->plane_state = NULL; 970 } 971 972 void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) 973 { 974 struct dce_hwseq *hws = dc->hwseq; 975 bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM; 976 struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; 977 978 DC_LOGGER_INIT(dc->ctx->logger); 979 980 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) 981 return; 982 983 if (hws->funcs.plane_atomic_disable) 984 hws->funcs.plane_atomic_disable(dc, pipe_ctx); 985 986 /* Turn back off the phantom OTG after the phantom plane is fully disabled 987 */ 988 if (is_phantom) 989 if (tg && tg->funcs->disable_phantom_crtc) 990 tg->funcs->disable_phantom_crtc(tg); 991 992 DC_LOG_DC("Power down front end %d\n", 993 pipe_ctx->pipe_idx); 994 } 995 996 void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, 997 struct pg_block_update *update_state) 998 { 999 bool hpo_frl_stream_enc_acquired = false; 1000 bool hpo_dp_stream_enc_acquired = false; 1001 int i = 0, j = 0; 1002 int edp_num = 0; 1003 struct dc_link *edp_links[MAX_NUM_EDP] = { NULL }; 1004 1005 memset(update_state, 0, sizeof(struct pg_block_update)); 1006 1007 for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) { 1008 if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] && 1009 dc->res_pool->hpo_dp_stream_enc[i]) { 1010 hpo_dp_stream_enc_acquired = true; 1011 break; 1012 } 1013 } 1014 1015 if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired) 1016 update_state->pg_res_update[PG_HPO] = true; 1017 1018 update_state->pg_res_update[PG_DWB] = true; 1019 1020 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1021 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1022 1023 for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) 1024 update_state->pg_pipe_res_update[j][i] = true; 1025 1026 if (!pipe_ctx) 1027 continue; 1028 1029 if (pipe_ctx->plane_res.hubp) 1030 update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->plane_res.hubp->inst] = false; 1031 1032 if (pipe_ctx->plane_res.dpp && pipe_ctx->plane_res.hubp) 1033 update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false; 1034 1035 if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) 1036 update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false; 1037 1038 if (pipe_ctx->stream_res.dsc) { 1039 update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false; 1040 if (dc->caps.sequential_ono) { 1041 update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false; 1042 update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false; 1043 } 1044 } 1045 1046 if (pipe_ctx->stream_res.opp) 1047 update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false; 1048 1049 if (pipe_ctx->stream_res.hpo_dp_stream_enc) 1050 update_state->pg_pipe_res_update[PG_DPSTREAM][pipe_ctx->stream_res.hpo_dp_stream_enc->inst] = false; 1051 } 1052 1053 for (i = 0; i < dc->link_count; i++) { 1054 update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = true; 1055 if (dc->links[i]->type != dc_connection_none) 1056 update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = false; 1057 } 1058 1059 /*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/ 1060 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 1061 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 1062 if (tg && tg->funcs->is_tg_enabled(tg)) { 1063 update_state->pg_pipe_res_update[PG_OPTC][i] = false; 1064 break; 1065 } 1066 } 1067 1068 dc_get_edp_links(dc, edp_links, &edp_num); 1069 if (edp_num == 0 || 1070 ((!edp_links[0] || !edp_links[0]->edp_sink_present) && 1071 (!edp_links[1] || !edp_links[1]->edp_sink_present))) { 1072 /*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/ 1073 update_state->pg_pipe_res_update[PG_OPTC][0] = false; 1074 } 1075 1076 if (dc->caps.sequential_ono) { 1077 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { 1078 if (!update_state->pg_pipe_res_update[PG_HUBP][i] && 1079 !update_state->pg_pipe_res_update[PG_DPP][i]) { 1080 for (j = i - 1; j >= 0; j--) { 1081 update_state->pg_pipe_res_update[PG_HUBP][j] = false; 1082 update_state->pg_pipe_res_update[PG_DPP][j] = false; 1083 } 1084 1085 break; 1086 } 1087 } 1088 } 1089 } 1090 1091 void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, 1092 struct pg_block_update *update_state) 1093 { 1094 bool hpo_frl_stream_enc_acquired = false; 1095 bool hpo_dp_stream_enc_acquired = false; 1096 int i = 0, j = 0; 1097 1098 memset(update_state, 0, sizeof(struct pg_block_update)); 1099 1100 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1101 struct pipe_ctx *cur_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1102 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 1103 1104 if (cur_pipe == NULL || new_pipe == NULL) 1105 continue; 1106 1107 if ((!cur_pipe->plane_state && new_pipe->plane_state) || 1108 (!cur_pipe->stream && new_pipe->stream) || 1109 (cur_pipe->stream != new_pipe->stream && new_pipe->stream)) { 1110 // New pipe addition 1111 for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) { 1112 if (j == PG_HUBP && new_pipe->plane_res.hubp) 1113 update_state->pg_pipe_res_update[j][new_pipe->plane_res.hubp->inst] = true; 1114 1115 if (j == PG_DPP && new_pipe->plane_res.dpp) 1116 update_state->pg_pipe_res_update[j][new_pipe->plane_res.dpp->inst] = true; 1117 1118 if (j == PG_MPCC && new_pipe->plane_res.dpp) 1119 update_state->pg_pipe_res_update[j][new_pipe->plane_res.mpcc_inst] = true; 1120 1121 if (j == PG_DSC && new_pipe->stream_res.dsc) 1122 update_state->pg_pipe_res_update[j][new_pipe->stream_res.dsc->inst] = true; 1123 1124 if (j == PG_OPP && new_pipe->stream_res.opp) 1125 update_state->pg_pipe_res_update[j][new_pipe->stream_res.opp->inst] = true; 1126 1127 if (j == PG_OPTC && new_pipe->stream_res.tg) 1128 update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true; 1129 1130 if (j == PG_DPSTREAM && new_pipe->stream_res.hpo_dp_stream_enc) 1131 update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true; 1132 } 1133 } else if (cur_pipe->plane_state == new_pipe->plane_state || 1134 cur_pipe == new_pipe) { 1135 //unchanged pipes 1136 for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) { 1137 if (j == PG_HUBP && 1138 cur_pipe->plane_res.hubp != new_pipe->plane_res.hubp && 1139 new_pipe->plane_res.hubp) 1140 update_state->pg_pipe_res_update[j][new_pipe->plane_res.hubp->inst] = true; 1141 1142 if (j == PG_DPP && 1143 cur_pipe->plane_res.dpp != new_pipe->plane_res.dpp && 1144 new_pipe->plane_res.dpp) 1145 update_state->pg_pipe_res_update[j][new_pipe->plane_res.dpp->inst] = true; 1146 1147 if (j == PG_OPP && 1148 cur_pipe->stream_res.opp != new_pipe->stream_res.opp && 1149 new_pipe->stream_res.opp) 1150 update_state->pg_pipe_res_update[j][new_pipe->stream_res.opp->inst] = true; 1151 1152 if (j == PG_DSC && 1153 cur_pipe->stream_res.dsc != new_pipe->stream_res.dsc && 1154 new_pipe->stream_res.dsc) 1155 update_state->pg_pipe_res_update[j][new_pipe->stream_res.dsc->inst] = true; 1156 1157 if (j == PG_OPTC && 1158 cur_pipe->stream_res.tg != new_pipe->stream_res.tg && 1159 new_pipe->stream_res.tg) 1160 update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true; 1161 1162 if (j == PG_DPSTREAM && 1163 cur_pipe->stream_res.hpo_dp_stream_enc != new_pipe->stream_res.hpo_dp_stream_enc && 1164 new_pipe->stream_res.hpo_dp_stream_enc) 1165 update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true; 1166 } 1167 } 1168 } 1169 1170 for (i = 0; i < dc->link_count; i++) 1171 if (dc->links[i]->type != dc_connection_none) 1172 update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = true; 1173 1174 for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) { 1175 if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] && 1176 dc->res_pool->hpo_dp_stream_enc[i]) { 1177 hpo_dp_stream_enc_acquired = true; 1178 break; 1179 } 1180 } 1181 1182 if (hpo_frl_stream_enc_acquired || hpo_dp_stream_enc_acquired) 1183 update_state->pg_res_update[PG_HPO] = true; 1184 1185 if (hpo_frl_stream_enc_acquired) 1186 update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true; 1187 1188 if (dc->caps.sequential_ono) { 1189 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { 1190 if (update_state->pg_pipe_res_update[PG_HUBP][i] && 1191 update_state->pg_pipe_res_update[PG_DPP][i]) { 1192 for (j = i - 1; j >= 0; j--) { 1193 update_state->pg_pipe_res_update[PG_HUBP][j] = true; 1194 update_state->pg_pipe_res_update[PG_DPP][j] = true; 1195 } 1196 1197 break; 1198 } 1199 } 1200 } 1201 } 1202 1203 /** 1204 * dcn35_hw_block_power_down() - power down sequence 1205 * 1206 * The following sequence describes the ON-OFF (ONO) for power down: 1207 * 1208 * ONO Region 3, DCPG 25: hpo - SKIPPED 1209 * ONO Region 4, DCPG 0: dchubp0, dpp0 1210 * ONO Region 6, DCPG 1: dchubp1, dpp1 1211 * ONO Region 8, DCPG 2: dchubp2, dpp2 1212 * ONO Region 10, DCPG 3: dchubp3, dpp3 1213 * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry 1214 * ONO Region 5, DCPG 16: dsc0 1215 * ONO Region 7, DCPG 17: dsc1 1216 * ONO Region 9, DCPG 18: dsc2 1217 * ONO Region 11, DCPG 19: dsc3 1218 * ONO Region 2, DCPG 24: mpc opp optc dwb 1219 * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed 1220 * 1221 * If sequential ONO is specified the order is modified from ONO Region 11 -> ONO Region 0 descending. 1222 * 1223 * @dc: Current DC state 1224 * @update_state: update PG sequence states for HW block 1225 */ 1226 void dcn35_hw_block_power_down(struct dc *dc, 1227 struct pg_block_update *update_state) 1228 { 1229 int i = 0; 1230 struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; 1231 1232 if (!pg_cntl) 1233 return; 1234 if (dc->debug.ignore_pg) 1235 return; 1236 1237 if (update_state->pg_res_update[PG_HPO]) { 1238 if (pg_cntl->funcs->hpo_pg_control) 1239 pg_cntl->funcs->hpo_pg_control(pg_cntl, false); 1240 } 1241 1242 if (!dc->caps.sequential_ono) { 1243 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1244 if (update_state->pg_pipe_res_update[PG_HUBP][i] && 1245 update_state->pg_pipe_res_update[PG_DPP][i]) { 1246 if (pg_cntl->funcs->hubp_dpp_pg_control) 1247 pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false); 1248 } 1249 } 1250 1251 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { 1252 if (update_state->pg_pipe_res_update[PG_DSC][i]) { 1253 if (pg_cntl->funcs->dsc_pg_control) 1254 pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false); 1255 } 1256 } 1257 } else { 1258 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { 1259 if (update_state->pg_pipe_res_update[PG_DSC][i]) { 1260 if (pg_cntl->funcs->dsc_pg_control) 1261 pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false); 1262 } 1263 1264 if (update_state->pg_pipe_res_update[PG_HUBP][i] && 1265 update_state->pg_pipe_res_update[PG_DPP][i]) { 1266 if (pg_cntl->funcs->hubp_dpp_pg_control) 1267 pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false); 1268 } 1269 } 1270 } 1271 1272 /*this will need all the clients to unregister optc interruts let dmubfw handle this*/ 1273 if (pg_cntl->funcs->plane_otg_pg_control) 1274 pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false); 1275 1276 //domain22, 23, 25 currently always on. 1277 1278 } 1279 1280 /** 1281 * dcn35_hw_block_power_up() - power up sequence 1282 * 1283 * The following sequence describes the ON-OFF (ONO) for power up: 1284 * 1285 * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED 1286 * ONO Region 2, DCPG 24: mpc opp optc dwb 1287 * ONO Region 5, DCPG 16: dsc0 1288 * ONO Region 7, DCPG 17: dsc1 1289 * ONO Region 9, DCPG 18: dsc2 1290 * ONO Region 11, DCPG 19: dsc3 1291 * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit 1292 * ONO Region 4, DCPG 0: dchubp0, dpp0 1293 * ONO Region 6, DCPG 1: dchubp1, dpp1 1294 * ONO Region 8, DCPG 2: dchubp2, dpp2 1295 * ONO Region 10, DCPG 3: dchubp3, dpp3 1296 * ONO Region 3, DCPG 25: hpo - SKIPPED 1297 * 1298 * If sequential ONO is specified the order is modified from ONO Region 0 -> ONO Region 11 ascending. 1299 * 1300 * @dc: Current DC state 1301 * @update_state: update PG sequence states for HW block 1302 */ 1303 void dcn35_hw_block_power_up(struct dc *dc, 1304 struct pg_block_update *update_state) 1305 { 1306 int i = 0; 1307 struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; 1308 1309 if (!pg_cntl) 1310 return; 1311 if (dc->debug.ignore_pg) 1312 return; 1313 //domain22, 23, 25 currently always on. 1314 /*this will need all the clients to unregister optc interruts let dmubfw handle this*/ 1315 if (pg_cntl->funcs->plane_otg_pg_control) 1316 pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true); 1317 1318 if (!dc->caps.sequential_ono) { 1319 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) 1320 if (update_state->pg_pipe_res_update[PG_DSC][i]) { 1321 if (pg_cntl->funcs->dsc_pg_control) 1322 pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true); 1323 } 1324 } 1325 1326 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1327 if (update_state->pg_pipe_res_update[PG_HUBP][i] && 1328 update_state->pg_pipe_res_update[PG_DPP][i]) { 1329 if (pg_cntl->funcs->hubp_dpp_pg_control) 1330 pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true); 1331 } 1332 1333 if (dc->caps.sequential_ono) { 1334 if (update_state->pg_pipe_res_update[PG_DSC][i]) { 1335 if (pg_cntl->funcs->dsc_pg_control) 1336 pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true); 1337 } 1338 } 1339 } 1340 if (update_state->pg_res_update[PG_HPO]) { 1341 if (pg_cntl->funcs->hpo_pg_control) 1342 pg_cntl->funcs->hpo_pg_control(pg_cntl, true); 1343 } 1344 } 1345 void dcn35_root_clock_control(struct dc *dc, 1346 struct pg_block_update *update_state, bool power_on) 1347 { 1348 int i = 0; 1349 struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; 1350 1351 if (!pg_cntl) 1352 return; 1353 /*enable root clock first when power up*/ 1354 if (power_on) { 1355 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1356 if (update_state->pg_pipe_res_update[PG_HUBP][i] && 1357 update_state->pg_pipe_res_update[PG_DPP][i]) { 1358 if (dc->hwseq->funcs.dpp_root_clock_control) 1359 dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); 1360 } 1361 if (update_state->pg_pipe_res_update[PG_DPSTREAM][i]) 1362 if (dc->hwseq->funcs.dpstream_root_clock_control) 1363 dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on); 1364 } 1365 1366 for (i = 0; i < dc->res_pool->dig_link_enc_count; i++) 1367 if (update_state->pg_pipe_res_update[PG_PHYSYMCLK][i]) 1368 if (dc->hwseq->funcs.physymclk_root_clock_control) 1369 dc->hwseq->funcs.physymclk_root_clock_control(dc->hwseq, i, power_on); 1370 1371 } 1372 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { 1373 if (update_state->pg_pipe_res_update[PG_DSC][i]) { 1374 if (power_on) { 1375 if (dc->res_pool->dccg->funcs->enable_dsc) 1376 dc->res_pool->dccg->funcs->enable_dsc(dc->res_pool->dccg, i); 1377 } else { 1378 if (dc->res_pool->dccg->funcs->disable_dsc) 1379 dc->res_pool->dccg->funcs->disable_dsc(dc->res_pool->dccg, i); 1380 } 1381 } 1382 } 1383 /*disable root clock first when power down*/ 1384 if (!power_on) { 1385 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1386 if (update_state->pg_pipe_res_update[PG_HUBP][i] && 1387 update_state->pg_pipe_res_update[PG_DPP][i]) { 1388 if (dc->hwseq->funcs.dpp_root_clock_control) 1389 dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); 1390 } 1391 if (update_state->pg_pipe_res_update[PG_DPSTREAM][i]) 1392 if (dc->hwseq->funcs.dpstream_root_clock_control) 1393 dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on); 1394 } 1395 1396 for (i = 0; i < dc->res_pool->dig_link_enc_count; i++) 1397 if (update_state->pg_pipe_res_update[PG_PHYSYMCLK][i]) 1398 if (dc->hwseq->funcs.physymclk_root_clock_control) 1399 dc->hwseq->funcs.physymclk_root_clock_control(dc->hwseq, i, power_on); 1400 1401 } 1402 } 1403 1404 void dcn35_prepare_bandwidth( 1405 struct dc *dc, 1406 struct dc_state *context) 1407 { 1408 struct pg_block_update pg_update_state; 1409 1410 if (dc->hwss.calc_blocks_to_ungate) { 1411 dc->hwss.calc_blocks_to_ungate(dc, context, &pg_update_state); 1412 1413 if (dc->hwss.root_clock_control) 1414 dc->hwss.root_clock_control(dc, &pg_update_state, true); 1415 /*power up required HW block*/ 1416 if (dc->hwss.hw_block_power_up) 1417 dc->hwss.hw_block_power_up(dc, &pg_update_state); 1418 } 1419 1420 dcn20_prepare_bandwidth(dc, context); 1421 } 1422 1423 void dcn35_optimize_bandwidth( 1424 struct dc *dc, 1425 struct dc_state *context) 1426 { 1427 struct pg_block_update pg_update_state; 1428 1429 dcn20_optimize_bandwidth(dc, context); 1430 1431 if (dc->hwss.calc_blocks_to_gate) { 1432 dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state); 1433 /*try to power down unused block*/ 1434 if (dc->hwss.hw_block_power_down) 1435 dc->hwss.hw_block_power_down(dc, &pg_update_state); 1436 1437 if (dc->hwss.root_clock_control) 1438 dc->hwss.root_clock_control(dc, &pg_update_state, false); 1439 } 1440 } 1441 1442 void dcn35_set_drr(struct pipe_ctx **pipe_ctx, 1443 int num_pipes, struct dc_crtc_timing_adjust adjust) 1444 { 1445 int i = 0; 1446 struct drr_params params = {0}; 1447 // DRR set trigger event mapped to OTG_TRIG_A 1448 unsigned int event_triggers = 0x2;//Bit[1]: OTG_TRIG_A 1449 // Note DRR trigger events are generated regardless of whether num frames met. 1450 unsigned int num_frames = 2; 1451 1452 params.vertical_total_max = adjust.v_total_max; 1453 params.vertical_total_min = adjust.v_total_min; 1454 params.vertical_total_mid = adjust.v_total_mid; 1455 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num; 1456 1457 for (i = 0; i < num_pipes; i++) { 1458 /* dc_state_destruct() might null the stream resources, so fetch tg 1459 * here first to avoid a race condition. The lifetime of the pointee 1460 * itself (the timing_generator object) is not a problem here. 1461 */ 1462 struct timing_generator *tg = pipe_ctx[i]->stream_res.tg; 1463 1464 if ((tg != NULL) && tg->funcs) { 1465 if (pipe_ctx[i]->stream && pipe_ctx[i]->stream->ctx->dc->debug.static_screen_wait_frames) { 1466 struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing; 1467 struct dc *dc = pipe_ctx[i]->stream->ctx->dc; 1468 unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total); 1469 1470 if (frame_rate >= 120 && dc->caps.ips_support && 1471 dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) { 1472 /*ips enable case*/ 1473 num_frames = 2 * (frame_rate % 60); 1474 } 1475 } 1476 if (tg->funcs->set_drr) 1477 tg->funcs->set_drr(tg, ¶ms); 1478 if (adjust.v_total_max != 0 && adjust.v_total_min != 0) 1479 if (tg->funcs->set_static_screen_control) 1480 tg->funcs->set_static_screen_control( 1481 tg, event_triggers, num_frames); 1482 } 1483 } 1484 } 1485 void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, 1486 int num_pipes, const struct dc_static_screen_params *params) 1487 { 1488 unsigned int i; 1489 unsigned int triggers = 0; 1490 1491 if (params->triggers.surface_update) 1492 triggers |= 0x200;/*bit 9 : 10 0000 0000*/ 1493 if (params->triggers.cursor_update) 1494 triggers |= 0x8;/*bit3*/ 1495 if (params->triggers.force_trigger) 1496 triggers |= 0x1; 1497 for (i = 0; i < num_pipes; i++) 1498 pipe_ctx[i]->stream_res.tg->funcs-> 1499 set_static_screen_control(pipe_ctx[i]->stream_res.tg, 1500 triggers, params->num_frames); 1501 } 1502 1503 void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx, 1504 int num_pipes, uint32_t v_total_min, uint32_t v_total_max) 1505 { 1506 int i = 0; 1507 struct long_vtotal_params params = {0}; 1508 1509 params.vertical_total_max = v_total_max; 1510 params.vertical_total_min = v_total_min; 1511 1512 for (i = 0; i < num_pipes; i++) { 1513 if (!pipe_ctx[i]) 1514 continue; 1515 1516 if (pipe_ctx[i]->stream) { 1517 struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing; 1518 1519 if (timing) 1520 params.vertical_blank_start = timing->v_total - timing->v_front_porch; 1521 else 1522 params.vertical_blank_start = 0; 1523 1524 if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs && 1525 pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal) 1526 pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal(pipe_ctx[i]->stream_res.tg, ¶ms); 1527 } 1528 } 1529 } 1530 1531 static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx) 1532 { 1533 /* Calculate average pixel count per TU, return false if under ~2.00 to 1534 * avoid empty TUs. This is only required for DPIA tunneling as empty TUs 1535 * are legal to generate for native DP links. Assume TU size 64 as there 1536 * is currently no scenario where it's reprogrammed from HW default. 1537 * MTPs have no such limitation, so this does not affect MST use cases. 1538 */ 1539 unsigned int pix_clk_mhz; 1540 unsigned int symclk_mhz; 1541 unsigned int avg_pix_per_tu_x1000; 1542 unsigned int tu_size_bytes = 64; 1543 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 1544 struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings; 1545 const struct dc *dc = pipe_ctx->stream->link->dc; 1546 1547 if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) 1548 return false; 1549 1550 // Not necessary for MST configurations 1551 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 1552 return false; 1553 1554 pix_clk_mhz = timing->pix_clk_100hz / 10000; 1555 1556 // If this is true, can't block due to dynamic ODM 1557 if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz) 1558 return false; 1559 1560 switch (link_settings->link_rate) { 1561 case LINK_RATE_LOW: 1562 symclk_mhz = 162; 1563 break; 1564 case LINK_RATE_HIGH: 1565 symclk_mhz = 270; 1566 break; 1567 case LINK_RATE_HIGH2: 1568 symclk_mhz = 540; 1569 break; 1570 case LINK_RATE_HIGH3: 1571 symclk_mhz = 810; 1572 break; 1573 default: 1574 // We shouldn't be tunneling any other rates, something is wrong 1575 ASSERT(0); 1576 return false; 1577 } 1578 1579 avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes) 1580 / (symclk_mhz * link_settings->lane_count); 1581 1582 // Add small empirically-decided margin to account for potential jitter 1583 return (avg_pix_per_tu_x1000 < 2020); 1584 } 1585 1586 bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx) 1587 { 1588 struct dc *dc = pipe_ctx->stream->ctx->dc; 1589 1590 if (!is_h_timing_divisible_by_2(pipe_ctx->stream)) 1591 return false; 1592 1593 if (should_avoid_empty_tu(pipe_ctx)) 1594 return false; 1595 1596 if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && 1597 dc->debug.enable_dp_dig_pixel_rate_div_policy) 1598 return true; 1599 1600 return false; 1601 } 1602 1603 /* 1604 * Set powerup to true for every pipe to match pre-OS configuration. 1605 */ 1606 static void dcn35_calc_blocks_to_ungate_for_hw_release(struct dc *dc, struct pg_block_update *update_state) 1607 { 1608 int i = 0, j = 0; 1609 1610 memset(update_state, 0, sizeof(struct pg_block_update)); 1611 1612 for (i = 0; i < dc->res_pool->pipe_count; i++) 1613 for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) 1614 update_state->pg_pipe_res_update[j][i] = true; 1615 1616 update_state->pg_res_update[PG_HPO] = true; 1617 update_state->pg_res_update[PG_DWB] = true; 1618 } 1619 1620 /* 1621 * The purpose is to power up all gatings to restore optimization to pre-OS env. 1622 * Re-use hwss func and existing PG&RCG flags to decide powerup sequence. 1623 */ 1624 void dcn35_hardware_release(struct dc *dc) 1625 { 1626 struct pg_block_update pg_update_state; 1627 1628 dcn35_calc_blocks_to_ungate_for_hw_release(dc, &pg_update_state); 1629 1630 if (dc->hwss.root_clock_control) 1631 dc->hwss.root_clock_control(dc, &pg_update_state, true); 1632 /*power up required HW block*/ 1633 if (dc->hwss.hw_block_power_up) 1634 dc->hwss.hw_block_power_up(dc, &pg_update_state); 1635 } 1636