1 // SPDX-License-Identifier: MIT 2 // 3 // Copyright 2024 Advanced Micro Devices, Inc. 4 5 #include "dm_services.h" 6 #include "basics/dc_common.h" 7 #include "dm_helpers.h" 8 #include "core_types.h" 9 #include "resource.h" 10 #include "dccg.h" 11 #include "dce/dce_hwseq.h" 12 #include "reg_helper.h" 13 #include "abm.h" 14 #include "hubp.h" 15 #include "dchubbub.h" 16 #include "timing_generator.h" 17 #include "opp.h" 18 #include "ipp.h" 19 #include "mpc.h" 20 #include "mcif_wb.h" 21 #include "dc_dmub_srv.h" 22 #include "link_hwss.h" 23 #include "dpcd_defs.h" 24 #include "clk_mgr.h" 25 #include "dsc.h" 26 #include "link.h" 27 28 #include "dce/dmub_hw_lock_mgr.h" 29 #include "dcn10/dcn10_cm_common.h" 30 #include "dcn20/dcn20_optc.h" 31 #include "dcn30/dcn30_cm_common.h" 32 #include "dcn32/dcn32_hwseq.h" 33 #include "dcn401_hwseq.h" 34 #include "dcn401/dcn401_resource.h" 35 #include "dc_state_priv.h" 36 #include "link_enc_cfg.h" 37 38 #define DC_LOGGER_INIT(logger) 39 40 #define CTX \ 41 hws->ctx 42 #define REG(reg)\ 43 hws->regs->reg 44 #define DC_LOGGER \ 45 dc->ctx->logger 46 47 48 #undef FN 49 #define FN(reg_name, field_name) \ 50 hws->shifts->field_name, hws->masks->field_name 51 52 static void dcn401_initialize_min_clocks(struct dc *dc) 53 { 54 struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk; 55 56 clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ; 57 clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000; 58 clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000; 59 clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000; 60 clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000; 61 if (dc->debug.disable_boot_optimizations) { 62 clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000; 63 } else { 64 /* Even though DPG_EN = 1 for the connected display, it still requires the 65 * correct timing so we cannot set DISPCLK to min freq or it could cause 66 * audio corruption. Read current DISPCLK from DENTIST and request the same 67 * freq to ensure that the timing is valid and unchanged. 68 */ 69 clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr); 70 } 71 clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000; 72 clocks->fclk_p_state_change_support = true; 73 clocks->p_state_change_support = true; 74 75 dc->clk_mgr->funcs->update_clocks( 76 dc->clk_mgr, 77 dc->current_state, 78 true); 79 } 80 81 void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx) 82 { 83 unsigned int i = 0; 84 struct mpc_grph_gamut_adjustment mpc_adjust; 85 unsigned int mpcc_id = pipe_ctx->plane_res.mpcc_inst; 86 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 87 88 //For now assert if location is not pre-blend 89 if (pipe_ctx->plane_state) 90 ASSERT(pipe_ctx->plane_state->mcm_location == MPCC_MOVABLE_CM_LOCATION_BEFORE); 91 92 // program MPCC_MCM_FIRST_GAMUT_REMAP 93 memset(&mpc_adjust, 0, sizeof(mpc_adjust)); 94 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; 95 mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_FIRST_GAMUT_REMAP; 96 97 if (pipe_ctx->plane_state && 98 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) { 99 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; 100 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) 101 mpc_adjust.temperature_matrix[i] = 102 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i]; 103 } 104 105 mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust); 106 107 // program MPCC_MCM_SECOND_GAMUT_REMAP for Bypass / Disable for now 108 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; 109 mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_SECOND_GAMUT_REMAP; 110 111 mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust); 112 113 // program MPCC_OGAM_GAMUT_REMAP same as is currently used on DCN3x 114 memset(&mpc_adjust, 0, sizeof(mpc_adjust)); 115 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; 116 mpc_adjust.mpcc_gamut_remap_block_id = MPCC_OGAM_GAMUT_REMAP; 117 118 if (pipe_ctx->top_pipe == NULL) { 119 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) { 120 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; 121 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) 122 mpc_adjust.temperature_matrix[i] = 123 pipe_ctx->stream->gamut_remap_matrix.matrix[i]; 124 } 125 } 126 127 mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust); 128 } 129 130 void dcn401_init_hw(struct dc *dc) 131 { 132 struct abm **abms = dc->res_pool->multiple_abms; 133 struct dce_hwseq *hws = dc->hwseq; 134 struct dc_bios *dcb = dc->ctx->dc_bios; 135 struct resource_pool *res_pool = dc->res_pool; 136 int i; 137 int edp_num; 138 uint32_t backlight = MAX_BACKLIGHT_LEVEL; 139 uint32_t user_level = MAX_BACKLIGHT_LEVEL; 140 int current_dchub_ref_freq = 0; 141 142 if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) { 143 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); 144 145 // mark dcmode limits present if any clock has distinct AC and DC values from SMU 146 dc->caps.dcmode_power_limits_present = 147 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) || 148 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) || 149 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) || 150 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) || 151 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) || 152 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz); 153 } 154 155 // Initialize the dccg 156 if (res_pool->dccg->funcs->dccg_init) 157 res_pool->dccg->funcs->dccg_init(res_pool->dccg); 158 159 // Disable DMUB Initialization until IPS state programming is finalized 160 //if (!dcb->funcs->is_accelerated_mode(dcb)) { 161 // hws->funcs.bios_golden_init(dc); 162 //} 163 164 // Set default OPTC memory power states 165 if (dc->debug.enable_mem_low_power.bits.optc) { 166 // Shutdown when unassigned and light sleep in VBLANK 167 REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); 168 } 169 170 if (dc->debug.enable_mem_low_power.bits.vga) { 171 // Power down VGA memory 172 REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); 173 } 174 175 if (dc->ctx->dc_bios->fw_info_valid) { 176 res_pool->ref_clocks.xtalin_clock_inKhz = 177 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; 178 179 if (res_pool->hubbub) { 180 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, 181 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, 182 &res_pool->ref_clocks.dccg_ref_clock_inKhz); 183 184 current_dchub_ref_freq = res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; 185 186 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, 187 res_pool->ref_clocks.dccg_ref_clock_inKhz, 188 &res_pool->ref_clocks.dchub_ref_clock_inKhz); 189 } else { 190 // Not all ASICs have DCCG sw component 191 res_pool->ref_clocks.dccg_ref_clock_inKhz = 192 res_pool->ref_clocks.xtalin_clock_inKhz; 193 res_pool->ref_clocks.dchub_ref_clock_inKhz = 194 res_pool->ref_clocks.xtalin_clock_inKhz; 195 } 196 } else 197 ASSERT_CRITICAL(false); 198 199 for (i = 0; i < dc->link_count; i++) { 200 /* Power up AND update implementation according to the 201 * required signal (which may be different from the 202 * default signal on connector). 203 */ 204 struct dc_link *link = dc->links[i]; 205 206 link->link_enc->funcs->hw_init(link->link_enc); 207 208 /* Check for enabled DIG to identify enabled display */ 209 if (link->link_enc->funcs->is_dig_enabled && 210 link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 211 link->link_status.link_active = true; 212 link->phy_state.symclk_state = SYMCLK_ON_TX_ON; 213 if (link->link_enc->funcs->fec_is_active && 214 link->link_enc->funcs->fec_is_active(link->link_enc)) 215 link->fec_state = dc_link_fec_enabled; 216 } 217 } 218 219 /* enable_power_gating_plane before dsc_pg_control because 220 * FORCEON = 1 with hw default value on bootup, resume from s3 221 */ 222 if (hws->funcs.enable_power_gating_plane) 223 hws->funcs.enable_power_gating_plane(dc->hwseq, true); 224 225 /* we want to turn off all dp displays before doing detection */ 226 dc->link_srv->blank_all_dp_displays(dc); 227 228 /* If taking control over from VBIOS, we may want to optimize our first 229 * mode set, so we need to skip powering down pipes until we know which 230 * pipes we want to use. 231 * Otherwise, if taking control is not possible, we need to power 232 * everything down. 233 */ 234 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { 235 /* Disable boot optimizations means power down everything including PHY, DIG, 236 * and OTG (i.e. the boot is not optimized because we do a full power down). 237 */ 238 if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations) 239 dc->hwss.enable_accelerated_mode(dc, dc->current_state); 240 else 241 hws->funcs.init_pipes(dc, dc->current_state); 242 243 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) 244 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, 245 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); 246 247 dcn401_initialize_min_clocks(dc); 248 249 /* On HW init, allow idle optimizations after pipes have been turned off. 250 * 251 * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state 252 * is reset (i.e. not in idle at the time hw init is called), but software state 253 * still has idle_optimizations = true, so we must disable idle optimizations first 254 * (i.e. set false), then re-enable (set true). 255 */ 256 dc_allow_idle_optimizations(dc, false); 257 dc_allow_idle_optimizations(dc, true); 258 } 259 260 /* In headless boot cases, DIG may be turned 261 * on which causes HW/SW discrepancies. 262 * To avoid this, power down hardware on boot 263 * if DIG is turned on and seamless boot not enabled 264 */ 265 if (!dc->config.seamless_boot_edp_requested) { 266 struct dc_link *edp_links[MAX_NUM_EDP]; 267 struct dc_link *edp_link; 268 269 dc_get_edp_links(dc, edp_links, &edp_num); 270 if (edp_num) { 271 for (i = 0; i < edp_num; i++) { 272 edp_link = edp_links[i]; 273 if (edp_link->link_enc->funcs->is_dig_enabled && 274 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && 275 dc->hwss.edp_backlight_control && 276 hws->funcs.power_down && 277 dc->hwss.edp_power_control) { 278 dc->hwss.edp_backlight_control(edp_link, false); 279 hws->funcs.power_down(dc); 280 dc->hwss.edp_power_control(edp_link, false); 281 } 282 } 283 } else { 284 for (i = 0; i < dc->link_count; i++) { 285 struct dc_link *link = dc->links[i]; 286 287 if (link->link_enc->funcs->is_dig_enabled && 288 link->link_enc->funcs->is_dig_enabled(link->link_enc) && 289 hws->funcs.power_down) { 290 hws->funcs.power_down(dc); 291 break; 292 } 293 294 } 295 } 296 } 297 298 for (i = 0; i < res_pool->audio_count; i++) { 299 struct audio *audio = res_pool->audios[i]; 300 301 audio->funcs->hw_init(audio); 302 } 303 304 for (i = 0; i < dc->link_count; i++) { 305 struct dc_link *link = dc->links[i]; 306 307 if (link->panel_cntl) { 308 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); 309 user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; 310 } 311 } 312 313 for (i = 0; i < dc->res_pool->pipe_count; i++) { 314 if (abms[i] != NULL && abms[i]->funcs != NULL) 315 abms[i]->funcs->abm_init(abms[i], backlight, user_level); 316 } 317 318 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ 319 REG_WRITE(DIO_MEM_PWR_CTRL, 0); 320 321 if (!dc->debug.disable_clock_gate) { 322 /* enable all DCN clock gating */ 323 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 324 325 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 326 327 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 328 } 329 330 dcn401_setup_hpo_hw_control(hws, true); 331 332 if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) 333 dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); 334 335 if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges) 336 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); 337 338 if (dc->res_pool->hubbub->funcs->force_pstate_change_control) 339 dc->res_pool->hubbub->funcs->force_pstate_change_control( 340 dc->res_pool->hubbub, false, false); 341 342 if (dc->res_pool->hubbub->funcs->init_crb) 343 dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); 344 345 if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0) 346 dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc); 347 348 // Get DMCUB capabilities 349 if (dc->ctx->dmub_srv) { 350 dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); 351 dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; 352 dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0; 353 dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; 354 dc->debug.fams2_config.bits.enable &= 355 dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support 356 if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box) 357 || res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) { 358 /* update bounding box if FAMS2 disabled, or if dchub clk has changed */ 359 if (dc->clk_mgr) 360 dc->res_pool->funcs->update_bw_bounding_box(dc, 361 dc->clk_mgr->bw_params); 362 } 363 } 364 } 365 366 static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ctx *pipe_ctx, 367 enum MCM_LUT_XABLE *shaper_xable, 368 enum MCM_LUT_XABLE *lut3d_xable, 369 enum MCM_LUT_XABLE *lut1d_xable) 370 { 371 enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting = DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL; 372 bool lut1d_enable = false; 373 struct mpc *mpc = dc->res_pool->mpc; 374 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 375 376 if (!pipe_ctx->plane_state) 377 return; 378 shaper_3dlut_setting = pipe_ctx->plane_state->mcm_shaper_3dlut_setting; 379 lut1d_enable = pipe_ctx->plane_state->mcm_lut1d_enable; 380 mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id); 381 pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE; 382 383 *lut1d_xable = lut1d_enable ? MCM_LUT_ENABLE : MCM_LUT_DISABLE; 384 385 switch (shaper_3dlut_setting) { 386 case DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL: 387 *lut3d_xable = *shaper_xable = MCM_LUT_DISABLE; 388 break; 389 case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER: 390 *lut3d_xable = MCM_LUT_DISABLE; 391 *shaper_xable = MCM_LUT_ENABLE; 392 break; 393 case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT: 394 *lut3d_xable = *shaper_xable = MCM_LUT_ENABLE; 395 break; 396 } 397 } 398 399 void dcn401_populate_mcm_luts(struct dc *dc, 400 struct pipe_ctx *pipe_ctx, 401 struct dc_cm2_func_luts mcm_luts, 402 bool lut_bank_a) 403 { 404 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 405 struct hubp *hubp = pipe_ctx->plane_res.hubp; 406 int mpcc_id = hubp->inst; 407 struct mpc *mpc = dc->res_pool->mpc; 408 union mcm_lut_params m_lut_params; 409 enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src; 410 enum hubp_3dlut_fl_format format; 411 enum hubp_3dlut_fl_mode mode; 412 enum hubp_3dlut_fl_width width; 413 enum hubp_3dlut_fl_addressing_mode addr_mode; 414 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g; 415 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b; 416 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r; 417 enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE; 418 enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE; 419 enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE; 420 bool is_17x17x17 = true; 421 bool rval; 422 423 dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); 424 425 /* 1D LUT */ 426 if (mcm_luts.lut1d_func) { 427 memset(&m_lut_params, 0, sizeof(m_lut_params)); 428 if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL) 429 m_lut_params.pwl = &mcm_luts.lut1d_func->pwl; 430 else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) { 431 rval = cm3_helper_translate_curve_to_hw_format( 432 mcm_luts.lut1d_func, 433 &dpp_base->regamma_params, false); 434 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; 435 } 436 if (m_lut_params.pwl) { 437 if (mpc->funcs->populate_lut) 438 mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id); 439 } 440 if (mpc->funcs->program_lut_mode) 441 mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id); 442 } 443 444 /* Shaper */ 445 if (mcm_luts.shaper) { 446 memset(&m_lut_params, 0, sizeof(m_lut_params)); 447 if (mcm_luts.shaper->type == TF_TYPE_HWPWL) 448 m_lut_params.pwl = &mcm_luts.shaper->pwl; 449 else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { 450 ASSERT(false); 451 rval = cm3_helper_translate_curve_to_hw_format( 452 mcm_luts.shaper, 453 &dpp_base->regamma_params, true); 454 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; 455 } 456 if (m_lut_params.pwl) { 457 if (mpc->funcs->populate_lut) 458 mpc->funcs->populate_lut(mpc, MCM_LUT_SHAPER, m_lut_params, lut_bank_a, mpcc_id); 459 } 460 if (mpc->funcs->program_lut_mode) 461 mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, shaper_xable, lut_bank_a, mpcc_id); 462 } 463 464 /* 3DLUT */ 465 switch (lut3d_src) { 466 case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM: 467 memset(&m_lut_params, 0, sizeof(m_lut_params)); 468 if (hubp->funcs->hubp_enable_3dlut_fl) 469 hubp->funcs->hubp_enable_3dlut_fl(hubp, false); 470 if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) { 471 m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d; 472 if (mpc->funcs->populate_lut) 473 mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, m_lut_params, lut_bank_a, mpcc_id); 474 if (mpc->funcs->program_lut_mode) 475 mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, 476 mpcc_id); 477 } 478 break; 479 case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM: 480 481 if (mpc->funcs->program_lut_read_write_control) 482 mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id); 483 if (mpc->funcs->program_lut_mode) 484 mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id); 485 if (mpc->funcs->program_3dlut_size) 486 mpc->funcs->program_3dlut_size(mpc, is_17x17x17, mpcc_id); 487 if (hubp->funcs->hubp_program_3dlut_fl_addr) 488 hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr); 489 switch (mcm_luts.lut3d_data.gpu_mem_params.layout) { 490 case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB: 491 mode = hubp_3dlut_fl_mode_native_1; 492 addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; 493 break; 494 case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR: 495 mode = hubp_3dlut_fl_mode_native_2; 496 addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; 497 break; 498 case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR: 499 mode = hubp_3dlut_fl_mode_transform; 500 addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear; 501 break; 502 default: 503 mode = hubp_3dlut_fl_mode_disable; 504 addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; 505 break; 506 } 507 if (hubp->funcs->hubp_program_3dlut_fl_mode) 508 hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode); 509 510 if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode) 511 hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode); 512 513 switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) { 514 case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB: 515 default: 516 format = hubp_3dlut_fl_format_unorm_12msb_bitslice; 517 break; 518 case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB: 519 format = hubp_3dlut_fl_format_unorm_12lsb_bitslice; 520 break; 521 case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10: 522 format = hubp_3dlut_fl_format_float_fp1_5_10; 523 break; 524 } 525 if (hubp->funcs->hubp_program_3dlut_fl_format) 526 hubp->funcs->hubp_program_3dlut_fl_format(hubp, format); 527 if (hubp->funcs->hubp_update_3dlut_fl_bias_scale) 528 hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp, 529 mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias, 530 mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale); 531 532 switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) { 533 case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA: 534 default: 535 crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15; 536 crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31; 537 crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47; 538 break; 539 } 540 541 if (hubp->funcs->hubp_program_3dlut_fl_crossbar) 542 hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, 543 crossbar_bit_slice_y_g, 544 crossbar_bit_slice_cb_b, 545 crossbar_bit_slice_cr_r); 546 547 switch (mcm_luts.lut3d_data.gpu_mem_params.size) { 548 case DC_CM2_GPU_MEM_SIZE_171717: 549 default: 550 width = hubp_3dlut_fl_width_17; 551 break; 552 case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: 553 width = hubp_3dlut_fl_width_transformed; 554 break; 555 } 556 if (hubp->funcs->hubp_program_3dlut_fl_width) 557 hubp->funcs->hubp_program_3dlut_fl_width(hubp, width); 558 if (mpc->funcs->update_3dlut_fast_load_select) 559 mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst); 560 561 if (hubp->funcs->hubp_enable_3dlut_fl) 562 hubp->funcs->hubp_enable_3dlut_fl(hubp, true); 563 else { 564 if (mpc->funcs->program_lut_mode) { 565 mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_DISABLE, lut_bank_a, mpcc_id); 566 mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id); 567 mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id); 568 } 569 } 570 break; 571 572 } 573 } 574 575 void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx) 576 { 577 struct hubp *hubp = pipe_ctx->plane_res.hubp; 578 579 if (hubp->funcs->hubp_enable_3dlut_fl) { 580 hubp->funcs->hubp_enable_3dlut_fl(hubp, true); 581 } 582 } 583 584 bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx, 585 const struct dc_plane_state *plane_state) 586 { 587 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 588 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 589 struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc; 590 struct mpc *mpc = dc->res_pool->mpc; 591 bool result; 592 const struct pwl_params *lut_params = NULL; 593 bool rval; 594 595 if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { 596 dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a); 597 return true; 598 } 599 600 mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id); 601 pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE; 602 // 1D LUT 603 if (plane_state->blend_tf.type == TF_TYPE_HWPWL) 604 lut_params = &plane_state->blend_tf.pwl; 605 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { 606 rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf, 607 &dpp_base->regamma_params, false); 608 lut_params = rval ? &dpp_base->regamma_params : NULL; 609 } 610 result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); 611 lut_params = NULL; 612 613 // Shaper 614 if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL) 615 lut_params = &plane_state->in_shaper_func.pwl; 616 else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { 617 // TODO: dpp_base replace 618 rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func, 619 &dpp_base->shaper_params, true); 620 lut_params = rval ? &dpp_base->shaper_params : NULL; 621 } 622 result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); 623 624 // 3D 625 if (mpc->funcs->program_3dlut) { 626 if (plane_state->lut3d_func.state.bits.initialized == 1) 627 result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id); 628 else 629 result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id); 630 } 631 632 return result; 633 } 634 635 bool dcn401_set_output_transfer_func(struct dc *dc, 636 struct pipe_ctx *pipe_ctx, 637 const struct dc_stream_state *stream) 638 { 639 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 640 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 641 const struct pwl_params *params = NULL; 642 bool ret = false; 643 644 /* program OGAM or 3DLUT only for the top pipe*/ 645 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) { 646 /*program shaper and 3dlut in MPC*/ 647 ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream); 648 if (ret == false && mpc->funcs->set_output_gamma) { 649 if (stream->out_transfer_func.type == TF_TYPE_HWPWL) 650 params = &stream->out_transfer_func.pwl; 651 else if (pipe_ctx->stream->out_transfer_func.type == 652 TF_TYPE_DISTRIBUTED_POINTS && 653 cm3_helper_translate_curve_to_hw_format( 654 &stream->out_transfer_func, 655 &mpc->blender_params, false)) 656 params = &mpc->blender_params; 657 /* there are no ROM LUTs in OUTGAM */ 658 if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED) 659 BREAK_TO_DEBUGGER(); 660 } 661 } 662 663 if (mpc->funcs->set_output_gamma) 664 mpc->funcs->set_output_gamma(mpc, mpcc_id, params); 665 666 return ret; 667 } 668 669 void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx, 670 unsigned int *tmds_div) 671 { 672 struct dc_stream_state *stream = pipe_ctx->stream; 673 674 if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) { 675 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) 676 *tmds_div = PIXEL_RATE_DIV_BY_2; 677 else 678 *tmds_div = PIXEL_RATE_DIV_BY_4; 679 } else { 680 *tmds_div = PIXEL_RATE_DIV_BY_1; 681 } 682 683 if (*tmds_div == PIXEL_RATE_DIV_NA) 684 ASSERT(false); 685 686 } 687 688 static void enable_stream_timing_calc( 689 struct pipe_ctx *pipe_ctx, 690 struct dc_state *context, 691 struct dc *dc, 692 unsigned int *tmds_div, 693 int *opp_inst, 694 int *opp_cnt, 695 struct pipe_ctx *opp_heads[MAX_PIPES], 696 bool *manual_mode, 697 struct drr_params *params, 698 unsigned int *event_triggers) 699 { 700 struct dc_stream_state *stream = pipe_ctx->stream; 701 int i; 702 703 if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) 704 dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div); 705 706 *opp_cnt = resource_get_opp_heads_for_otg_master(pipe_ctx, &context->res_ctx, opp_heads); 707 for (i = 0; i < *opp_cnt; i++) 708 opp_inst[i] = opp_heads[i]->stream_res.opp->inst; 709 710 if (dc_is_tmds_signal(stream->signal)) { 711 stream->link->phy_state.symclk_ref_cnts.otg = 1; 712 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF) 713 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; 714 else 715 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON; 716 } 717 718 params->vertical_total_min = stream->adjust.v_total_min; 719 params->vertical_total_max = stream->adjust.v_total_max; 720 params->vertical_total_mid = stream->adjust.v_total_mid; 721 params->vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num; 722 723 // DRR should set trigger event to monitor surface update event 724 if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) 725 *event_triggers = 0x80; 726 } 727 728 enum dc_status dcn401_enable_stream_timing( 729 struct pipe_ctx *pipe_ctx, 730 struct dc_state *context, 731 struct dc *dc) 732 { 733 struct dce_hwseq *hws = dc->hwseq; 734 struct dc_stream_state *stream = pipe_ctx->stream; 735 struct drr_params params = {0}; 736 unsigned int event_triggers = 0; 737 int opp_cnt = 1; 738 int opp_inst[MAX_PIPES] = {0}; 739 struct pipe_ctx *opp_heads[MAX_PIPES] = {0}; 740 struct dc_crtc_timing patched_crtc_timing = stream->timing; 741 bool manual_mode = false; 742 unsigned int tmds_div = PIXEL_RATE_DIV_NA; 743 unsigned int unused_div = PIXEL_RATE_DIV_NA; 744 int odm_slice_width; 745 int last_odm_slice_width; 746 int i; 747 748 if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) 749 return DC_OK; 750 751 enable_stream_timing_calc(pipe_ctx, context, dc, &tmds_div, opp_inst, 752 &opp_cnt, opp_heads, &manual_mode, ¶ms, &event_triggers); 753 754 if (dc->res_pool->dccg->funcs->set_pixel_rate_div) { 755 dc->res_pool->dccg->funcs->set_pixel_rate_div( 756 dc->res_pool->dccg, pipe_ctx->stream_res.tg->inst, 757 tmds_div, unused_div); 758 } 759 760 /* TODO check if timing_changed, disable stream if timing changed */ 761 762 if (opp_cnt > 1) { 763 odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false); 764 last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true); 765 pipe_ctx->stream_res.tg->funcs->set_odm_combine( 766 pipe_ctx->stream_res.tg, 767 opp_inst, opp_cnt, 768 odm_slice_width, last_odm_slice_width); 769 } 770 771 /* set DTBCLK_P */ 772 if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) { 773 if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) { 774 dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst); 775 } 776 } 777 778 /* HW program guide assume display already disable 779 * by unplug sequence. OTG assume stop. 780 */ 781 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true); 782 783 if (false == pipe_ctx->clock_source->funcs->program_pix_clk( 784 pipe_ctx->clock_source, 785 &pipe_ctx->stream_res.pix_clk_params, 786 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings), 787 &pipe_ctx->pll_settings)) { 788 BREAK_TO_DEBUGGER(); 789 return DC_ERROR_UNEXPECTED; 790 } 791 792 if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal))) 793 dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx); 794 795 /* if we are borrowing from hblank, h_addressable needs to be adjusted */ 796 if (dc->debug.enable_hblank_borrow) 797 patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow; 798 799 pipe_ctx->stream_res.tg->funcs->program_timing( 800 pipe_ctx->stream_res.tg, 801 &patched_crtc_timing, 802 (unsigned int)pipe_ctx->global_sync.dcn4x.vready_offset_pixels, 803 (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines, 804 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels, 805 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels, 806 (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines, 807 pipe_ctx->stream->signal, 808 true); 809 810 for (i = 0; i < opp_cnt; i++) { 811 opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control( 812 opp_heads[i]->stream_res.opp, 813 true); 814 opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel( 815 opp_heads[i]->stream_res.opp, 816 stream->timing.pixel_encoding, 817 resource_is_pipe_type(opp_heads[i], OTG_MASTER)); 818 } 819 820 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( 821 pipe_ctx->stream_res.opp, 822 true); 823 824 hws->funcs.blank_pixel_data(dc, pipe_ctx, true); 825 826 /* VTG is within DCHUB command block. DCFCLK is always on */ 827 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) { 828 BREAK_TO_DEBUGGER(); 829 return DC_ERROR_UNEXPECTED; 830 } 831 832 hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); 833 set_drr_and_clear_adjust_pending(pipe_ctx, stream, ¶ms); 834 835 /* Event triggers and num frames initialized for DRR, but can be 836 * later updated for PSR use. Note DRR trigger events are generated 837 * regardless of whether num frames met. 838 */ 839 if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) 840 pipe_ctx->stream_res.tg->funcs->set_static_screen_control( 841 pipe_ctx->stream_res.tg, event_triggers, 2); 842 843 /* TODO program crtc source select for non-virtual signal*/ 844 /* TODO program FMT */ 845 /* TODO setup link_enc */ 846 /* TODO set stream attributes */ 847 /* TODO program audio */ 848 /* TODO enable stream if timing changed */ 849 /* TODO unblank stream if DP */ 850 851 if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) { 852 if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable) 853 pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg); 854 } 855 856 return DC_OK; 857 } 858 859 static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) 860 { 861 switch (link->link_enc->transmitter) { 862 case TRANSMITTER_UNIPHY_A: 863 return PHYD32CLKA; 864 case TRANSMITTER_UNIPHY_B: 865 return PHYD32CLKB; 866 case TRANSMITTER_UNIPHY_C: 867 return PHYD32CLKC; 868 case TRANSMITTER_UNIPHY_D: 869 return PHYD32CLKD; 870 case TRANSMITTER_UNIPHY_E: 871 return PHYD32CLKE; 872 default: 873 return PHYD32CLKA; 874 } 875 } 876 877 static void dcn401_enable_stream_calc( 878 struct pipe_ctx *pipe_ctx, 879 int *dp_hpo_inst, 880 enum phyd32clk_clock_source *phyd32clk, 881 unsigned int *tmds_div, 882 uint32_t *early_control) 883 { 884 885 struct dc *dc = pipe_ctx->stream->ctx->dc; 886 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 887 enum dc_lane_count lane_count = 888 pipe_ctx->stream->link->cur_link_settings.lane_count; 889 uint32_t active_total_with_borders; 890 891 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) 892 *dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; 893 894 *phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link); 895 896 if (dc_is_tmds_signal(pipe_ctx->stream->signal)) 897 dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div); 898 else 899 *tmds_div = PIXEL_RATE_DIV_BY_1; 900 901 /* enable early control to avoid corruption on DP monitor*/ 902 active_total_with_borders = 903 timing->h_addressable 904 + timing->h_border_left 905 + timing->h_border_right; 906 907 if (lane_count != 0) 908 *early_control = active_total_with_borders % lane_count; 909 910 if (*early_control == 0) 911 *early_control = lane_count; 912 913 } 914 915 void dcn401_enable_stream(struct pipe_ctx *pipe_ctx) 916 { 917 uint32_t early_control = 0; 918 struct timing_generator *tg = pipe_ctx->stream_res.tg; 919 struct dc_link *link = pipe_ctx->stream->link; 920 const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); 921 struct dc *dc = pipe_ctx->stream->ctx->dc; 922 struct dccg *dccg = dc->res_pool->dccg; 923 enum phyd32clk_clock_source phyd32clk; 924 int dp_hpo_inst = 0; 925 unsigned int tmds_div = PIXEL_RATE_DIV_NA; 926 unsigned int unused_div = PIXEL_RATE_DIV_NA; 927 struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc; 928 struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; 929 930 if (!dc->config.unify_link_enc_assignment) 931 link_enc = link_enc_cfg_get_link_enc(link); 932 933 dcn401_enable_stream_calc(pipe_ctx, &dp_hpo_inst, &phyd32clk, 934 &tmds_div, &early_control); 935 936 if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) { 937 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { 938 dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst); 939 if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) { 940 dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); 941 } else { 942 dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); 943 } 944 } else { 945 dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, 946 link_enc->transmitter - TRANSMITTER_UNIPHY_A); 947 } 948 } 949 950 if (dc->res_pool->dccg->funcs->set_pixel_rate_div) { 951 dc->res_pool->dccg->funcs->set_pixel_rate_div( 952 dc->res_pool->dccg, 953 pipe_ctx->stream_res.tg->inst, 954 tmds_div, 955 unused_div); 956 } 957 958 link_hwss->setup_stream_encoder(pipe_ctx); 959 960 if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { 961 if (dc->hwss.program_dmdata_engine) 962 dc->hwss.program_dmdata_engine(pipe_ctx); 963 } 964 965 dc->hwss.update_info_frame(pipe_ctx); 966 967 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 968 dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 969 970 tg->funcs->set_early_control(tg, early_control); 971 } 972 973 void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable) 974 { 975 REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, enable); 976 } 977 978 void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy) 979 { 980 if (cursor_width <= 128) { 981 pos_cpy->x_hotspot /= 2; 982 pos_cpy->x_hotspot += 1; 983 } else { 984 pos_cpy->x_hotspot /= 2; 985 pos_cpy->x_hotspot += 2; 986 } 987 } 988 989 static void disable_link_output_symclk_on_tx_off(struct dc_link *link, enum dp_link_encoding link_encoding) 990 { 991 struct dc *dc = link->ctx->dc; 992 struct pipe_ctx *pipe_ctx = NULL; 993 uint8_t i; 994 995 for (i = 0; i < MAX_PIPES; i++) { 996 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 997 if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) { 998 pipe_ctx->clock_source->funcs->program_pix_clk( 999 pipe_ctx->clock_source, 1000 &pipe_ctx->stream_res.pix_clk_params, 1001 link_encoding, 1002 &pipe_ctx->pll_settings); 1003 break; 1004 } 1005 } 1006 } 1007 1008 void dcn401_disable_link_output(struct dc_link *link, 1009 const struct link_resource *link_res, 1010 enum signal_type signal) 1011 { 1012 struct dc *dc = link->ctx->dc; 1013 const struct link_hwss *link_hwss = get_link_hwss(link, link_res); 1014 struct dmcu *dmcu = dc->res_pool->dmcu; 1015 1016 if (signal == SIGNAL_TYPE_EDP && 1017 link->dc->hwss.edp_backlight_control && 1018 !link->skip_implict_edp_power_control) 1019 link->dc->hwss.edp_backlight_control(link, false); 1020 else if (dmcu != NULL && dmcu->funcs->lock_phy) 1021 dmcu->funcs->lock_phy(dmcu); 1022 1023 if (dc_is_tmds_signal(signal) && link->phy_state.symclk_ref_cnts.otg > 0) { 1024 disable_link_output_symclk_on_tx_off(link, DP_UNKNOWN_ENCODING); 1025 link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; 1026 } else { 1027 link_hwss->disable_link_output(link, link_res, signal); 1028 link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; 1029 } 1030 1031 if (signal == SIGNAL_TYPE_EDP && 1032 link->dc->hwss.edp_backlight_control && 1033 !link->skip_implict_edp_power_control) 1034 link->dc->hwss.edp_power_control(link, false); 1035 else if (dmcu != NULL && dmcu->funcs->lock_phy) 1036 dmcu->funcs->unlock_phy(dmcu); 1037 1038 dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); 1039 } 1040 1041 void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx) 1042 { 1043 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; 1044 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1045 struct dpp *dpp = pipe_ctx->plane_res.dpp; 1046 struct dc_cursor_mi_param param = { 1047 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10, 1048 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz, 1049 .viewport = pipe_ctx->plane_res.scl_data.viewport, 1050 .recout = pipe_ctx->plane_res.scl_data.recout, 1051 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz, 1052 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert, 1053 .rotation = pipe_ctx->plane_state->rotation, 1054 .mirror = pipe_ctx->plane_state->horizontal_mirror, 1055 .stream = pipe_ctx->stream 1056 }; 1057 struct rect odm_slice_src = { 0 }; 1058 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) || 1059 (pipe_ctx->prev_odm_pipe != NULL); 1060 int prev_odm_width = 0; 1061 struct pipe_ctx *prev_odm_pipe = NULL; 1062 bool mpc_combine_on = false; 1063 int bottom_pipe_x_pos = 0; 1064 1065 int x_pos = pos_cpy.x; 1066 int y_pos = pos_cpy.y; 1067 int recout_x_pos = 0; 1068 int recout_y_pos = 0; 1069 1070 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) { 1071 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) || 1072 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) { 1073 mpc_combine_on = true; 1074 } 1075 } 1076 1077 /* DCN4 moved cursor composition after Scaler, so in HW it is in 1078 * recout space and for HW Cursor position programming need to 1079 * translate to recout space. 1080 * 1081 * Cursor X and Y position programmed into HW can't be negative, 1082 * in fact it is X, Y coordinate shifted for the HW Cursor Hot spot 1083 * position that goes into HW X and Y coordinates while HW Hot spot 1084 * X and Y coordinates are length relative to the cursor top left 1085 * corner, hotspot must be smaller than the cursor size. 1086 * 1087 * DMs/DC interface for Cursor position is in stream->src space, and 1088 * DMs supposed to transform Cursor coordinates to stream->src space, 1089 * then here we need to translate Cursor coordinates to stream->dst 1090 * space, as now in HW, Cursor coordinates are in per pipe recout 1091 * space, and for the given pipe valid coordinates are only in range 1092 * from 0,0 - recout width, recout height space. 1093 * If certain pipe combining is in place, need to further adjust per 1094 * pipe to make sure each pipe enabling cursor on its part of the 1095 * screen. 1096 */ 1097 x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width / 1098 pipe_ctx->stream->src.width; 1099 y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height / 1100 pipe_ctx->stream->src.height; 1101 1102 /* If the cursor's source viewport is clipped then we need to 1103 * translate the cursor to appear in the correct position on 1104 * the screen. 1105 * 1106 * This translation isn't affected by scaling so it needs to be 1107 * done *after* we adjust the position for the scale factor. 1108 * 1109 * This is only done by opt-in for now since there are still 1110 * some usecases like tiled display that might enable the 1111 * cursor on both streams while expecting dc to clip it. 1112 */ 1113 if (pos_cpy.translate_by_source) { 1114 x_pos += pipe_ctx->plane_state->src_rect.x; 1115 y_pos += pipe_ctx->plane_state->src_rect.y; 1116 } 1117 1118 /* Adjust for ODM Combine 1119 * next/prev_odm_offset is to account for scaled modes that have underscan 1120 */ 1121 if (odm_combine_on) { 1122 prev_odm_pipe = pipe_ctx->prev_odm_pipe; 1123 1124 while (prev_odm_pipe != NULL) { 1125 odm_slice_src = resource_get_odm_slice_src_rect(prev_odm_pipe); 1126 prev_odm_width += odm_slice_src.width; 1127 prev_odm_pipe = prev_odm_pipe->prev_odm_pipe; 1128 } 1129 1130 x_pos -= (prev_odm_width); 1131 } 1132 1133 /* If the position is negative then we need to add to the hotspot 1134 * to fix cursor size between ODM slices 1135 */ 1136 1137 if (x_pos < 0) { 1138 pos_cpy.x_hotspot -= x_pos; 1139 if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION) 1140 adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy); 1141 x_pos = 0; 1142 } 1143 1144 if (y_pos < 0) { 1145 pos_cpy.y_hotspot -= y_pos; 1146 y_pos = 0; 1147 } 1148 1149 /* If the position on bottom MPC pipe is negative then we need to add to the hotspot and 1150 * adjust x_pos on bottom pipe to make cursor visible when crossing between MPC slices. 1151 */ 1152 if (mpc_combine_on && 1153 pipe_ctx->top_pipe && 1154 (pipe_ctx == pipe_ctx->top_pipe->bottom_pipe)) { 1155 1156 bottom_pipe_x_pos = x_pos - pipe_ctx->plane_res.scl_data.recout.x; 1157 if (bottom_pipe_x_pos < 0) { 1158 x_pos = pipe_ctx->plane_res.scl_data.recout.x; 1159 pos_cpy.x_hotspot -= bottom_pipe_x_pos; 1160 if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION) 1161 adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy); 1162 } 1163 } 1164 1165 pos_cpy.x = (uint32_t)x_pos; 1166 pos_cpy.y = (uint32_t)y_pos; 1167 1168 if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx)) 1169 pos_cpy.enable = false; 1170 1171 x_pos = pos_cpy.x - param.recout.x; 1172 y_pos = pos_cpy.y - param.recout.y; 1173 1174 recout_x_pos = x_pos - pos_cpy.x_hotspot; 1175 recout_y_pos = y_pos - pos_cpy.y_hotspot; 1176 1177 if (recout_x_pos >= (int)param.recout.width) 1178 pos_cpy.enable = false; /* not visible beyond right edge*/ 1179 1180 if (recout_y_pos >= (int)param.recout.height) 1181 pos_cpy.enable = false; /* not visible beyond bottom edge*/ 1182 1183 if (recout_x_pos + (int)hubp->curs_attr.width <= 0) 1184 pos_cpy.enable = false; /* not visible beyond left edge*/ 1185 1186 if (recout_y_pos + (int)hubp->curs_attr.height <= 0) 1187 pos_cpy.enable = false; /* not visible beyond top edge*/ 1188 1189 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m); 1190 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height); 1191 } 1192 1193 static bool dcn401_check_no_memory_request_for_cab(struct dc *dc) 1194 { 1195 int i; 1196 1197 /* First, check no-memory-request case */ 1198 for (i = 0; i < dc->current_state->stream_count; i++) { 1199 if ((dc->current_state->stream_status[i].plane_count) && 1200 (dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)) 1201 /* Fail eligibility on a visible stream */ 1202 return false; 1203 } 1204 1205 return true; 1206 } 1207 1208 static uint32_t dcn401_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx) 1209 { 1210 int i; 1211 uint8_t num_ways = 0; 1212 uint32_t mall_ss_size_bytes = 0; 1213 1214 mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes; 1215 // TODO add additional logic for PSR active stream exclusion optimization 1216 // mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes; 1217 1218 // Include cursor size for CAB allocation 1219 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1220 struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i]; 1221 1222 if (!pipe->stream || !pipe->plane_state) 1223 continue; 1224 1225 mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false); 1226 } 1227 1228 // Convert number of cache lines required to number of ways 1229 if (dc->debug.force_mall_ss_num_ways > 0) 1230 num_ways = dc->debug.force_mall_ss_num_ways; 1231 else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) 1232 num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes); 1233 else 1234 num_ways = 0; 1235 1236 return num_ways; 1237 } 1238 1239 bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable) 1240 { 1241 union dmub_rb_cmd cmd; 1242 uint8_t ways, i; 1243 int j; 1244 bool mall_ss_unsupported = false; 1245 struct dc_plane_state *plane = NULL; 1246 1247 if (!dc->ctx->dmub_srv || !dc->current_state) 1248 return false; 1249 1250 for (i = 0; i < dc->current_state->stream_count; i++) { 1251 /* MALL SS messaging is not supported with PSR at this time */ 1252 if (dc->current_state->streams[i] != NULL && 1253 dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { 1254 DC_LOG_MALL("MALL SS not supported with PSR at this time\n"); 1255 return false; 1256 } 1257 } 1258 1259 memset(&cmd, 0, sizeof(cmd)); 1260 cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS; 1261 cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); 1262 1263 if (enable) { 1264 if (dcn401_check_no_memory_request_for_cab(dc)) { 1265 /* 1. Check no memory request case for CAB. 1266 * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message 1267 */ 1268 DC_LOG_MALL("sending CAB action NO_DCN_REQ\n"); 1269 cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ; 1270 } else { 1271 /* 2. Check if all surfaces can fit in CAB. 1272 * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message 1273 * and configure HUBP's to fetch from MALL 1274 */ 1275 ways = dcn401_calculate_cab_allocation(dc, dc->current_state); 1276 1277 /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo, 1278 * or TMZ surface, don't try to enter MALL. 1279 */ 1280 for (i = 0; i < dc->current_state->stream_count; i++) { 1281 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { 1282 plane = dc->current_state->stream_status[i].plane_states[j]; 1283 1284 if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO || 1285 plane->address.tmz_surface) { 1286 mall_ss_unsupported = true; 1287 break; 1288 } 1289 } 1290 if (mall_ss_unsupported) 1291 break; 1292 } 1293 if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) { 1294 cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB; 1295 cmd.cab.cab_alloc_ways = ways; 1296 DC_LOG_MALL("cab allocation: %d ways. CAB action: DCN_SS_FIT_IN_CAB\n", ways); 1297 } else { 1298 cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB; 1299 DC_LOG_MALL("frame does not fit in CAB: %d ways required. CAB action: DCN_SS_NOT_FIT_IN_CAB\n", ways); 1300 } 1301 } 1302 } else { 1303 /* Disable CAB */ 1304 cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION; 1305 DC_LOG_MALL("idle optimization disabled\n"); 1306 } 1307 1308 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 1309 1310 return true; 1311 } 1312 1313 void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc, 1314 const struct pipe_ctx *top_pipe) 1315 { 1316 bool is_wait_needed = false; 1317 const struct pipe_ctx *pipe_ctx = top_pipe; 1318 1319 /* check if any surfaces are updating address while using flip immediate and dcc */ 1320 while (pipe_ctx != NULL) { 1321 if (pipe_ctx->plane_state && 1322 pipe_ctx->plane_state->dcc.enable && 1323 pipe_ctx->plane_state->flip_immediate && 1324 pipe_ctx->plane_state->update_flags.bits.addr_update) { 1325 is_wait_needed = true; 1326 break; 1327 } 1328 1329 /* check next pipe */ 1330 pipe_ctx = pipe_ctx->bottom_pipe; 1331 } 1332 1333 if (is_wait_needed && dc->debug.dcc_meta_propagation_delay_us > 0) { 1334 udelay(dc->debug.dcc_meta_propagation_delay_us); 1335 } 1336 } 1337 1338 void dcn401_prepare_bandwidth(struct dc *dc, 1339 struct dc_state *context) 1340 { 1341 struct hubbub *hubbub = dc->res_pool->hubbub; 1342 bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; 1343 unsigned int compbuf_size = 0; 1344 1345 /* Any transition into P-State support should disable MCLK switching first to avoid hangs */ 1346 if (p_state_change_support) { 1347 dc->optimized_required = true; 1348 context->bw_ctx.bw.dcn.clk.p_state_change_support = false; 1349 } 1350 1351 if (dc->clk_mgr->dc_mode_softmax_enabled) 1352 if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && 1353 context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) 1354 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); 1355 1356 /* Increase clocks */ 1357 dc->clk_mgr->funcs->update_clocks( 1358 dc->clk_mgr, 1359 context, 1360 false); 1361 1362 /* program dchubbub watermarks: 1363 * For assigning wm_optimized_required, use |= operator since we don't want 1364 * to clear the value if the optimize has not happened yet 1365 */ 1366 dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub, 1367 &context->bw_ctx.bw.dcn.watermarks, 1368 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 1369 false); 1370 /* update timeout thresholds */ 1371 if (hubbub->funcs->program_arbiter) { 1372 dc->wm_optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false); 1373 } 1374 1375 /* decrease compbuf size */ 1376 if (hubbub->funcs->program_compbuf_segments) { 1377 compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size; 1378 dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size); 1379 1380 hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false); 1381 } 1382 1383 if (dc->debug.fams2_config.bits.enable) { 1384 dcn401_fams2_global_control_lock(dc, context, true); 1385 dcn401_fams2_update_config(dc, context, false); 1386 dcn401_fams2_global_control_lock(dc, context, false); 1387 } 1388 1389 if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) { 1390 /* After disabling P-State, restore the original value to ensure we get the correct P-State 1391 * on the next optimize. */ 1392 context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; 1393 } 1394 } 1395 1396 void dcn401_optimize_bandwidth( 1397 struct dc *dc, 1398 struct dc_state *context) 1399 { 1400 int i; 1401 struct hubbub *hubbub = dc->res_pool->hubbub; 1402 1403 /* enable fams2 if needed */ 1404 if (dc->debug.fams2_config.bits.enable) { 1405 dcn401_fams2_global_control_lock(dc, context, true); 1406 dcn401_fams2_update_config(dc, context, true); 1407 dcn401_fams2_global_control_lock(dc, context, false); 1408 } 1409 1410 /* program dchubbub watermarks */ 1411 hubbub->funcs->program_watermarks(hubbub, 1412 &context->bw_ctx.bw.dcn.watermarks, 1413 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 1414 true); 1415 /* update timeout thresholds */ 1416 if (hubbub->funcs->program_arbiter) { 1417 hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, true); 1418 } 1419 1420 if (dc->clk_mgr->dc_mode_softmax_enabled) 1421 if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && 1422 context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) 1423 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk); 1424 1425 /* increase compbuf size */ 1426 if (hubbub->funcs->program_compbuf_segments) 1427 hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true); 1428 1429 dc->clk_mgr->funcs->update_clocks( 1430 dc->clk_mgr, 1431 context, 1432 true); 1433 if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) { 1434 for (i = 0; i < dc->res_pool->pipe_count; ++i) { 1435 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1436 1437 if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank 1438 && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max 1439 && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total) 1440 pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp, 1441 pipe_ctx->dlg_regs.min_dst_y_next_start); 1442 } 1443 } 1444 } 1445 1446 void dcn401_fams2_global_control_lock(struct dc *dc, 1447 struct dc_state *context, 1448 bool lock) 1449 { 1450 /* use always for now */ 1451 union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; 1452 1453 if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable) 1454 return; 1455 1456 hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; 1457 hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; 1458 hw_lock_cmd.bits.lock = lock; 1459 hw_lock_cmd.bits.should_release = !lock; 1460 dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); 1461 } 1462 1463 void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params) 1464 { 1465 struct dc *dc = params->fams2_global_control_lock_fast_params.dc; 1466 bool lock = params->fams2_global_control_lock_fast_params.lock; 1467 1468 if (params->fams2_global_control_lock_fast_params.is_required) { 1469 union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; 1470 1471 hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; 1472 hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; 1473 hw_lock_cmd.bits.lock = lock; 1474 hw_lock_cmd.bits.should_release = !lock; 1475 dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); 1476 } 1477 } 1478 1479 void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable) 1480 { 1481 bool fams2_required; 1482 1483 if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable) 1484 return; 1485 1486 fams2_required = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable; 1487 1488 dc_dmub_srv_fams2_update_config(dc, context, enable && fams2_required); 1489 } 1490 1491 static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context, 1492 struct pipe_ctx *otg_master) 1493 { 1494 int i; 1495 struct pipe_ctx *old_pipe; 1496 struct pipe_ctx *new_pipe; 1497 struct pipe_ctx *old_opp_heads[MAX_PIPES]; 1498 struct pipe_ctx *old_otg_master; 1499 int old_opp_head_count = 0; 1500 1501 old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx]; 1502 1503 if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) { 1504 old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master, 1505 &dc->current_state->res_ctx, 1506 old_opp_heads); 1507 } else { 1508 // DC cannot assume that the current state and the new state 1509 // share the same OTG pipe since this is not true when called 1510 // in the context of a commit stream not checked. Hence, set 1511 // old_otg_master to NULL to skip the DSC configuration. 1512 old_otg_master = NULL; 1513 } 1514 1515 1516 if (otg_master->stream_res.dsc) 1517 dcn32_update_dsc_on_stream(otg_master, 1518 otg_master->stream->timing.flags.DSC); 1519 if (old_otg_master && old_otg_master->stream_res.dsc) { 1520 for (i = 0; i < old_opp_head_count; i++) { 1521 old_pipe = old_opp_heads[i]; 1522 new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx]; 1523 if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) 1524 old_pipe->stream_res.dsc->funcs->dsc_disconnect( 1525 old_pipe->stream_res.dsc); 1526 } 1527 } 1528 } 1529 1530 void dcn401_update_odm(struct dc *dc, struct dc_state *context, 1531 struct pipe_ctx *otg_master) 1532 { 1533 struct pipe_ctx *opp_heads[MAX_PIPES]; 1534 int opp_inst[MAX_PIPES] = {0}; 1535 int opp_head_count; 1536 int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false); 1537 int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true); 1538 int i; 1539 1540 opp_head_count = resource_get_opp_heads_for_otg_master( 1541 otg_master, &context->res_ctx, opp_heads); 1542 1543 for (i = 0; i < opp_head_count; i++) 1544 opp_inst[i] = opp_heads[i]->stream_res.opp->inst; 1545 if (opp_head_count > 1) 1546 otg_master->stream_res.tg->funcs->set_odm_combine( 1547 otg_master->stream_res.tg, 1548 opp_inst, opp_head_count, 1549 odm_slice_width, last_odm_slice_width); 1550 else 1551 otg_master->stream_res.tg->funcs->set_odm_bypass( 1552 otg_master->stream_res.tg, 1553 &otg_master->stream->timing); 1554 1555 for (i = 0; i < opp_head_count; i++) { 1556 opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control( 1557 opp_heads[i]->stream_res.opp, 1558 true); 1559 opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel( 1560 opp_heads[i]->stream_res.opp, 1561 opp_heads[i]->stream->timing.pixel_encoding, 1562 resource_is_pipe_type(opp_heads[i], OTG_MASTER)); 1563 } 1564 1565 update_dsc_for_odm_change(dc, context, otg_master); 1566 1567 if (!resource_is_pipe_type(otg_master, DPP_PIPE)) 1568 /* 1569 * blank pattern is generated by OPP, reprogram blank pattern 1570 * due to OPP count change 1571 */ 1572 dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true); 1573 } 1574 1575 void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, 1576 struct dc_link_settings *link_settings) 1577 { 1578 struct encoder_unblank_param params = {0}; 1579 struct dc_stream_state *stream = pipe_ctx->stream; 1580 struct dc_link *link = stream->link; 1581 struct dce_hwseq *hws = link->dc->hwseq; 1582 1583 /* calculate parameters for unblank */ 1584 params.opp_cnt = resource_get_odm_slice_count(pipe_ctx); 1585 1586 params.timing = pipe_ctx->stream->timing; 1587 params.link_settings.link_rate = link_settings->link_rate; 1588 params.pix_per_cycle = pipe_ctx->stream_res.pix_clk_params.dio_se_pix_per_cycle; 1589 1590 if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { 1591 pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank( 1592 pipe_ctx->stream_res.hpo_dp_stream_enc, 1593 pipe_ctx->stream_res.tg->inst); 1594 } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { 1595 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); 1596 } 1597 1598 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) 1599 hws->funcs.edp_backlight_control(link, true); 1600 } 1601 1602 void dcn401_hardware_release(struct dc *dc) 1603 { 1604 dc_dmub_srv_fams2_update_config(dc, dc->current_state, false); 1605 1606 /* If pstate unsupported, or still supported 1607 * by firmware, force it supported by dcn 1608 */ 1609 if (dc->current_state) { 1610 if ((!dc->clk_mgr->clks.p_state_change_support || 1611 dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) && 1612 dc->res_pool->hubbub->funcs->force_pstate_change_control) 1613 dc->res_pool->hubbub->funcs->force_pstate_change_control( 1614 dc->res_pool->hubbub, true, true); 1615 1616 dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true; 1617 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true); 1618 } 1619 } 1620 1621 void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master) 1622 { 1623 struct pipe_ctx *opp_heads[MAX_PIPES]; 1624 struct pipe_ctx *dpp_pipes[MAX_PIPES]; 1625 struct hubbub *hubbub = dc->res_pool->hubbub; 1626 int dpp_count = 0; 1627 1628 if (!otg_master->stream) 1629 return; 1630 1631 int slice_count = resource_get_opp_heads_for_otg_master(otg_master, 1632 &context->res_ctx, opp_heads); 1633 1634 for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) { 1635 if (opp_heads[slice_idx]->plane_state) { 1636 dpp_count = resource_get_dpp_pipes_for_opp_head( 1637 opp_heads[slice_idx], 1638 &context->res_ctx, 1639 dpp_pipes); 1640 for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) { 1641 struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx]; 1642 if (dpp_pipe && hubbub && 1643 dpp_pipe->plane_res.hubp && 1644 hubbub->funcs->wait_for_det_update) 1645 hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst); 1646 } 1647 } else { 1648 if (hubbub && opp_heads[slice_idx]->plane_res.hubp && hubbub->funcs->wait_for_det_update) 1649 hubbub->funcs->wait_for_det_update(hubbub, opp_heads[slice_idx]->plane_res.hubp->inst); 1650 } 1651 } 1652 } 1653 1654 void dcn401_interdependent_update_lock(struct dc *dc, 1655 struct dc_state *context, bool lock) 1656 { 1657 unsigned int i = 0; 1658 struct pipe_ctx *pipe = NULL; 1659 struct timing_generator *tg = NULL; 1660 1661 if (lock) { 1662 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1663 pipe = &context->res_ctx.pipe_ctx[i]; 1664 tg = pipe->stream_res.tg; 1665 1666 if (!resource_is_pipe_type(pipe, OTG_MASTER) || 1667 !tg->funcs->is_tg_enabled(tg) || 1668 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) 1669 continue; 1670 dc->hwss.pipe_control_lock(dc, pipe, true); 1671 } 1672 } else { 1673 /* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/ 1674 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1675 pipe = &context->res_ctx.pipe_ctx[i]; 1676 tg = pipe->stream_res.tg; 1677 1678 if (!resource_is_pipe_type(pipe, OTG_MASTER) || 1679 !tg->funcs->is_tg_enabled(tg) || 1680 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 1681 continue; 1682 } 1683 1684 if (dc->scratch.pipes_to_unlock_first[i]) { 1685 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1686 dc->hwss.pipe_control_lock(dc, pipe, false); 1687 /* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/ 1688 dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe); 1689 } 1690 } 1691 1692 /* Unlocking the rest of the pipes */ 1693 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1694 if (dc->scratch.pipes_to_unlock_first[i]) 1695 continue; 1696 1697 pipe = &context->res_ctx.pipe_ctx[i]; 1698 tg = pipe->stream_res.tg; 1699 if (!resource_is_pipe_type(pipe, OTG_MASTER) || 1700 !tg->funcs->is_tg_enabled(tg) || 1701 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 1702 continue; 1703 } 1704 1705 dc->hwss.pipe_control_lock(dc, pipe, false); 1706 } 1707 } 1708 } 1709 1710 void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx) 1711 { 1712 /* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that 1713 * HUBP will properly fetch 3DLUT contents after unlock. 1714 * 1715 * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless 1716 * of whether OTG lock is currently being held or not. 1717 */ 1718 struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL }; 1719 struct pipe_ctx *odm_pipe, *mpc_pipe; 1720 int i, wa_pipe_ct = 0; 1721 1722 for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) { 1723 for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) { 1724 if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src 1725 == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM 1726 && mpc_pipe->plane_state->mcm_shaper_3dlut_setting 1727 == DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) { 1728 wa_pipes[wa_pipe_ct++] = mpc_pipe; 1729 } 1730 } 1731 } 1732 1733 if (wa_pipe_ct > 0) { 1734 if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout) 1735 pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true); 1736 1737 for (i = 0; i < wa_pipe_ct; ++i) { 1738 if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl) 1739 wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true); 1740 } 1741 1742 pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); 1743 if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status) 1744 pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false); 1745 1746 for (i = 0; i < wa_pipe_ct; ++i) { 1747 if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl) 1748 wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true); 1749 } 1750 1751 if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout) 1752 pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false); 1753 } else { 1754 pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); 1755 } 1756 } 1757 1758 void dcn401_program_outstanding_updates(struct dc *dc, 1759 struct dc_state *context) 1760 { 1761 struct hubbub *hubbub = dc->res_pool->hubbub; 1762 1763 /* update compbuf if required */ 1764 if (hubbub->funcs->program_compbuf_segments) 1765 hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true); 1766 } 1767 1768 void dcn401_reset_back_end_for_pipe( 1769 struct dc *dc, 1770 struct pipe_ctx *pipe_ctx, 1771 struct dc_state *context) 1772 { 1773 struct dc_link *link = pipe_ctx->stream->link; 1774 const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); 1775 1776 DC_LOGGER_INIT(dc->ctx->logger); 1777 if (pipe_ctx->stream_res.stream_enc == NULL) { 1778 pipe_ctx->stream = NULL; 1779 return; 1780 } 1781 1782 /* DPMS may already disable or */ 1783 /* dpms_off status is incorrect due to fastboot 1784 * feature. When system resume from S4 with second 1785 * screen only, the dpms_off would be true but 1786 * VBIOS lit up eDP, so check link status too. 1787 */ 1788 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) 1789 dc->link_srv->set_dpms_off(pipe_ctx); 1790 else if (pipe_ctx->stream_res.audio) 1791 dc->hwss.disable_audio_stream(pipe_ctx); 1792 1793 /* free acquired resources */ 1794 if (pipe_ctx->stream_res.audio) { 1795 /*disable az_endpoint*/ 1796 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 1797 1798 /*free audio*/ 1799 if (dc->caps.dynamic_audio == true) { 1800 /*we have to dynamic arbitrate the audio endpoints*/ 1801 /*we free the resource, need reset is_audio_acquired*/ 1802 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, 1803 pipe_ctx->stream_res.audio, false); 1804 pipe_ctx->stream_res.audio = NULL; 1805 } 1806 } 1807 1808 /* by upper caller loop, parent pipe: pipe0, will be reset last. 1809 * back end share by all pipes and will be disable only when disable 1810 * parent pipe. 1811 */ 1812 if (pipe_ctx->top_pipe == NULL) { 1813 1814 dc->hwss.set_abm_immediate_disable(pipe_ctx); 1815 1816 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); 1817 1818 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); 1819 if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) 1820 pipe_ctx->stream_res.tg->funcs->set_odm_bypass( 1821 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 1822 1823 set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL); 1824 1825 /* TODO - convert symclk_ref_cnts for otg to a bit map to solve 1826 * the case where the same symclk is shared across multiple otg 1827 * instances 1828 */ 1829 if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) 1830 link->phy_state.symclk_ref_cnts.otg = 0; 1831 if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) { 1832 link_hwss->disable_link_output(link, 1833 &pipe_ctx->link_res, pipe_ctx->stream->signal); 1834 link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; 1835 } 1836 1837 /* reset DTBCLK_P */ 1838 if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) 1839 dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst); 1840 } 1841 1842 /* 1843 * In case of a dangling plane, setting this to NULL unconditionally 1844 * causes failures during reset hw ctx where, if stream is NULL, 1845 * it is expected that the pipe_ctx pointers to pipes and plane are NULL. 1846 */ 1847 pipe_ctx->stream = NULL; 1848 pipe_ctx->top_pipe = NULL; 1849 pipe_ctx->bottom_pipe = NULL; 1850 pipe_ctx->next_odm_pipe = NULL; 1851 pipe_ctx->prev_odm_pipe = NULL; 1852 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", 1853 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); 1854 } 1855 1856 void dcn401_reset_hw_ctx_wrap( 1857 struct dc *dc, 1858 struct dc_state *context) 1859 { 1860 int i; 1861 struct dce_hwseq *hws = dc->hwseq; 1862 1863 /* Reset Back End*/ 1864 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { 1865 struct pipe_ctx *pipe_ctx_old = 1866 &dc->current_state->res_ctx.pipe_ctx[i]; 1867 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1868 1869 if (!pipe_ctx_old->stream) 1870 continue; 1871 1872 if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) 1873 continue; 1874 1875 if (!pipe_ctx->stream || 1876 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { 1877 struct clock_source *old_clk = pipe_ctx_old->clock_source; 1878 1879 if (hws->funcs.reset_back_end_for_pipe) 1880 hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); 1881 if (hws->funcs.enable_stream_gating) 1882 hws->funcs.enable_stream_gating(dc, pipe_ctx_old); 1883 if (old_clk) 1884 old_clk->funcs->cs_power_down(old_clk); 1885 } 1886 } 1887 } 1888 1889 static unsigned int dcn401_calculate_vready_offset_for_group(struct pipe_ctx *pipe) 1890 { 1891 struct pipe_ctx *other_pipe; 1892 unsigned int vready_offset = pipe->global_sync.dcn4x.vready_offset_pixels; 1893 1894 /* Always use the largest vready_offset of all connected pipes */ 1895 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { 1896 if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset) 1897 vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels; 1898 } 1899 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { 1900 if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset) 1901 vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels; 1902 } 1903 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { 1904 if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset) 1905 vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels; 1906 } 1907 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { 1908 if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset) 1909 vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels; 1910 } 1911 1912 return vready_offset; 1913 } 1914 1915 static void dcn401_program_tg( 1916 struct dc *dc, 1917 struct pipe_ctx *pipe_ctx, 1918 struct dc_state *context, 1919 struct dce_hwseq *hws) 1920 { 1921 pipe_ctx->stream_res.tg->funcs->program_global_sync( 1922 pipe_ctx->stream_res.tg, 1923 dcn401_calculate_vready_offset_for_group(pipe_ctx), 1924 (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines, 1925 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels, 1926 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels, 1927 (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines); 1928 1929 if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) 1930 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); 1931 1932 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 1933 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true); 1934 1935 if (hws->funcs.setup_vupdate_interrupt) 1936 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); 1937 } 1938 1939 void dcn401_program_pipe( 1940 struct dc *dc, 1941 struct pipe_ctx *pipe_ctx, 1942 struct dc_state *context) 1943 { 1944 struct dce_hwseq *hws = dc->hwseq; 1945 1946 /* Only need to unblank on top pipe */ 1947 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) { 1948 if (pipe_ctx->update_flags.bits.enable || 1949 pipe_ctx->update_flags.bits.odm || 1950 pipe_ctx->stream->update_flags.bits.abm_level) 1951 hws->funcs.blank_pixel_data(dc, pipe_ctx, 1952 !pipe_ctx->plane_state || 1953 !pipe_ctx->plane_state->visible); 1954 } 1955 1956 /* Only update TG on top pipe */ 1957 if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe 1958 && !pipe_ctx->prev_odm_pipe) 1959 dcn401_program_tg(dc, pipe_ctx, context, hws); 1960 1961 if (pipe_ctx->update_flags.bits.odm) 1962 hws->funcs.update_odm(dc, context, pipe_ctx); 1963 1964 if (pipe_ctx->update_flags.bits.enable) { 1965 if (hws->funcs.enable_plane) 1966 hws->funcs.enable_plane(dc, pipe_ctx, context); 1967 else 1968 dc->hwss.enable_plane(dc, pipe_ctx, context); 1969 1970 if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes) 1971 dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub); 1972 } 1973 1974 if (pipe_ctx->update_flags.bits.det_size) { 1975 if (dc->res_pool->hubbub->funcs->program_det_size) 1976 dc->res_pool->hubbub->funcs->program_det_size( 1977 dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); 1978 if (dc->res_pool->hubbub->funcs->program_det_segments) 1979 dc->res_pool->hubbub->funcs->program_det_segments( 1980 dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size); 1981 } 1982 1983 if (pipe_ctx->update_flags.raw || 1984 (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) || 1985 pipe_ctx->stream->update_flags.raw) 1986 dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context); 1987 1988 if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable || 1989 pipe_ctx->plane_state->update_flags.bits.hdr_mult)) 1990 hws->funcs.set_hdr_multiplier(pipe_ctx); 1991 1992 if (hws->funcs.populate_mcm_luts) { 1993 if (pipe_ctx->plane_state) { 1994 hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts, 1995 pipe_ctx->plane_state->lut_bank_a); 1996 pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a; 1997 } 1998 } 1999 2000 if (pipe_ctx->plane_state && 2001 (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || 2002 pipe_ctx->plane_state->update_flags.bits.gamma_change || 2003 pipe_ctx->plane_state->update_flags.bits.lut_3d || 2004 pipe_ctx->update_flags.bits.enable)) 2005 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); 2006 2007 /* dcn10_translate_regamma_to_hw_format takes 750us to finish 2008 * only do gamma programming for powering on, internal memcmp to avoid 2009 * updating on slave planes 2010 */ 2011 if (pipe_ctx->update_flags.bits.enable || 2012 pipe_ctx->update_flags.bits.plane_changed || 2013 pipe_ctx->stream->update_flags.bits.out_tf || 2014 (pipe_ctx->plane_state && 2015 pipe_ctx->plane_state->update_flags.bits.output_tf_change)) 2016 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); 2017 2018 /* If the pipe has been enabled or has a different opp, we 2019 * should reprogram the fmt. This deals with cases where 2020 * interation between mpc and odm combine on different streams 2021 * causes a different pipe to be chosen to odm combine with. 2022 */ 2023 if (pipe_ctx->update_flags.bits.enable 2024 || pipe_ctx->update_flags.bits.opp_changed) { 2025 2026 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 2027 pipe_ctx->stream_res.opp, 2028 COLOR_SPACE_YCBCR601, 2029 pipe_ctx->stream->timing.display_color_depth, 2030 pipe_ctx->stream->signal); 2031 2032 pipe_ctx->stream_res.opp->funcs->opp_program_fmt( 2033 pipe_ctx->stream_res.opp, 2034 &pipe_ctx->stream->bit_depth_params, 2035 &pipe_ctx->stream->clamping); 2036 } 2037 2038 /* Set ABM pipe after other pipe configurations done */ 2039 if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) { 2040 if (pipe_ctx->stream_res.abm) { 2041 dc->hwss.set_pipe(pipe_ctx); 2042 pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm, 2043 pipe_ctx->stream->abm_level); 2044 } 2045 } 2046 2047 if (pipe_ctx->update_flags.bits.test_pattern_changed) { 2048 struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp; 2049 struct bit_depth_reduction_params params; 2050 2051 memset(¶ms, 0, sizeof(params)); 2052 odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); 2053 dc->hwss.set_disp_pattern_generator(dc, 2054 pipe_ctx, 2055 pipe_ctx->stream_res.test_pattern_params.test_pattern, 2056 pipe_ctx->stream_res.test_pattern_params.color_space, 2057 pipe_ctx->stream_res.test_pattern_params.color_depth, 2058 NULL, 2059 pipe_ctx->stream_res.test_pattern_params.width, 2060 pipe_ctx->stream_res.test_pattern_params.height, 2061 pipe_ctx->stream_res.test_pattern_params.offset); 2062 } 2063 } 2064 2065 void dcn401_program_front_end_for_ctx( 2066 struct dc *dc, 2067 struct dc_state *context) 2068 { 2069 int i; 2070 unsigned int prev_hubp_count = 0; 2071 unsigned int hubp_count = 0; 2072 struct dce_hwseq *hws = dc->hwseq; 2073 struct pipe_ctx *pipe = NULL; 2074 2075 DC_LOGGER_INIT(dc->ctx->logger); 2076 2077 if (resource_is_pipe_topology_changed(dc->current_state, context)) 2078 resource_log_pipe_topology_update(dc, context); 2079 2080 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 2081 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2082 pipe = &context->res_ctx.pipe_ctx[i]; 2083 2084 if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) { 2085 if (pipe->plane_state->triplebuffer_flips) 2086 BREAK_TO_DEBUGGER(); 2087 2088 /*turn off triple buffer for full update*/ 2089 dc->hwss.program_triplebuffer( 2090 dc, pipe, pipe->plane_state->triplebuffer_flips); 2091 } 2092 } 2093 } 2094 2095 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2096 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state) 2097 prev_hubp_count++; 2098 if (context->res_ctx.pipe_ctx[i].plane_state) 2099 hubp_count++; 2100 } 2101 2102 if (prev_hubp_count == 0 && hubp_count > 0) { 2103 if (dc->res_pool->hubbub->funcs->force_pstate_change_control) 2104 dc->res_pool->hubbub->funcs->force_pstate_change_control( 2105 dc->res_pool->hubbub, true, false); 2106 udelay(500); 2107 } 2108 2109 /* Set pipe update flags and lock pipes */ 2110 for (i = 0; i < dc->res_pool->pipe_count; i++) 2111 dc->hwss.detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i], 2112 &context->res_ctx.pipe_ctx[i]); 2113 2114 /* When disabling phantom pipes, turn on phantom OTG first (so we can get double 2115 * buffer updates properly) 2116 */ 2117 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2118 struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream; 2119 2120 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2121 2122 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream && 2123 dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { 2124 struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; 2125 2126 if (tg->funcs->enable_crtc) { 2127 if (dc->hwseq->funcs.blank_pixel_data) 2128 dc->hwseq->funcs.blank_pixel_data(dc, pipe, true); 2129 2130 tg->funcs->enable_crtc(tg); 2131 } 2132 } 2133 } 2134 /* OTG blank before disabling all front ends */ 2135 for (i = 0; i < dc->res_pool->pipe_count; i++) 2136 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable 2137 && !context->res_ctx.pipe_ctx[i].top_pipe 2138 && !context->res_ctx.pipe_ctx[i].prev_odm_pipe 2139 && context->res_ctx.pipe_ctx[i].stream) 2140 hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); 2141 2142 2143 /* Disconnect mpcc */ 2144 for (i = 0; i < dc->res_pool->pipe_count; i++) 2145 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable 2146 || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { 2147 struct hubbub *hubbub = dc->res_pool->hubbub; 2148 2149 /* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom 2150 * then we want to do the programming here (effectively it's being disabled). If we do 2151 * the programming later the DET won't be updated until the OTG for the phantom pipe is 2152 * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with 2153 * DET allocation. 2154 */ 2155 if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable || 2156 (context->res_ctx.pipe_ctx[i].plane_state && 2157 dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == 2158 SUBVP_PHANTOM))) { 2159 if (hubbub->funcs->program_det_size) 2160 hubbub->funcs->program_det_size(hubbub, 2161 dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); 2162 if (dc->res_pool->hubbub->funcs->program_det_segments) 2163 dc->res_pool->hubbub->funcs->program_det_segments( 2164 hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); 2165 } 2166 hws->funcs.plane_atomic_disconnect(dc, dc->current_state, 2167 &dc->current_state->res_ctx.pipe_ctx[i]); 2168 DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); 2169 } 2170 2171 /* update ODM for blanked OTG master pipes */ 2172 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2173 pipe = &context->res_ctx.pipe_ctx[i]; 2174 if (resource_is_pipe_type(pipe, OTG_MASTER) && 2175 !resource_is_pipe_type(pipe, DPP_PIPE) && 2176 pipe->update_flags.bits.odm && 2177 hws->funcs.update_odm) 2178 hws->funcs.update_odm(dc, context, pipe); 2179 } 2180 2181 /* 2182 * Program all updated pipes, order matters for mpcc setup. Start with 2183 * top pipe and program all pipes that follow in order 2184 */ 2185 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2186 pipe = &context->res_ctx.pipe_ctx[i]; 2187 2188 if (pipe->plane_state && !pipe->top_pipe) { 2189 while (pipe) { 2190 if (hws->funcs.program_pipe) 2191 hws->funcs.program_pipe(dc, pipe, context); 2192 else { 2193 /* Don't program phantom pipes in the regular front end programming sequence. 2194 * There is an MPO transition case where a pipe being used by a video plane is 2195 * transitioned directly to be a phantom pipe when closing the MPO video. 2196 * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place 2197 * right away) but the MPO still exists until the double buffered update of the 2198 * main pipe so we will get a frame of underflow if the phantom pipe is 2199 * programmed here. 2200 */ 2201 if (pipe->stream && 2202 dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) 2203 dcn401_program_pipe(dc, pipe, context); 2204 } 2205 2206 pipe = pipe->bottom_pipe; 2207 } 2208 } 2209 2210 /* Program secondary blending tree and writeback pipes */ 2211 pipe = &context->res_ctx.pipe_ctx[i]; 2212 if (!pipe->top_pipe && !pipe->prev_odm_pipe 2213 && pipe->stream && pipe->stream->num_wb_info > 0 2214 && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw) 2215 || pipe->stream->update_flags.raw) 2216 && hws->funcs.program_all_writeback_pipes_in_tree) 2217 hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); 2218 2219 /* Avoid underflow by check of pipe line read when adding 2nd plane. */ 2220 if (hws->wa.wait_hubpret_read_start_during_mpo_transition && 2221 !pipe->top_pipe && 2222 pipe->stream && 2223 pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start && 2224 dc->current_state->stream_status[0].plane_count == 1 && 2225 context->stream_status[0].plane_count > 1) { 2226 pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp); 2227 } 2228 } 2229 } 2230 2231 void dcn401_post_unlock_program_front_end( 2232 struct dc *dc, 2233 struct dc_state *context) 2234 { 2235 // Timeout for pipe enable 2236 unsigned int timeout_us = 100000; 2237 unsigned int polling_interval_us = 1; 2238 struct dce_hwseq *hwseq = dc->hwseq; 2239 int i; 2240 2241 DC_LOGGER_INIT(dc->ctx->logger); 2242 2243 for (i = 0; i < dc->res_pool->pipe_count; i++) 2244 if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) && 2245 !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD)) 2246 dc->hwss.post_unlock_reset_opp(dc, 2247 &dc->current_state->res_ctx.pipe_ctx[i]); 2248 2249 for (i = 0; i < dc->res_pool->pipe_count; i++) 2250 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) 2251 dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); 2252 2253 /* 2254 * If we are enabling a pipe, we need to wait for pending clear as this is a critical 2255 * part of the enable operation otherwise, DM may request an immediate flip which 2256 * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which 2257 * is unsupported on DCN. 2258 */ 2259 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2260 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2261 // Don't check flip pending on phantom pipes 2262 if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable && 2263 dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) { 2264 struct hubp *hubp = pipe->plane_res.hubp; 2265 int j = 0; 2266 2267 for (j = 0; j < timeout_us / polling_interval_us 2268 && hubp->funcs->hubp_is_flip_pending(hubp); j++) 2269 udelay(polling_interval_us); 2270 } 2271 } 2272 2273 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2274 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2275 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2276 2277 /* When going from a smaller ODM slice count to larger, we must ensure double 2278 * buffer update completes before we return to ensure we don't reduce DISPCLK 2279 * before we've transitioned to 2:1 or 4:1 2280 */ 2281 if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) && 2282 resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) && 2283 dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) { 2284 int j = 0; 2285 struct timing_generator *tg = pipe->stream_res.tg; 2286 2287 if (tg->funcs->get_optc_double_buffer_pending) { 2288 for (j = 0; j < timeout_us / polling_interval_us 2289 && tg->funcs->get_optc_double_buffer_pending(tg); j++) 2290 udelay(polling_interval_us); 2291 } 2292 } 2293 } 2294 2295 if (dc->res_pool->hubbub->funcs->force_pstate_change_control) 2296 dc->res_pool->hubbub->funcs->force_pstate_change_control( 2297 dc->res_pool->hubbub, false, false); 2298 2299 2300 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2301 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2302 2303 if (pipe->plane_state && !pipe->top_pipe) { 2304 /* Program phantom pipe here to prevent a frame of underflow in the MPO transition 2305 * case (if a pipe being used for a video plane transitions to a phantom pipe, it 2306 * can underflow due to HUBP_VTG_SEL programming if done in the regular front end 2307 * programming sequence). 2308 */ 2309 while (pipe) { 2310 if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 2311 /* When turning on the phantom pipe we want to run through the 2312 * entire enable sequence, so apply all the "enable" flags. 2313 */ 2314 if (dc->hwss.apply_update_flags_for_phantom) 2315 dc->hwss.apply_update_flags_for_phantom(pipe); 2316 if (dc->hwss.update_phantom_vp_position) 2317 dc->hwss.update_phantom_vp_position(dc, context, pipe); 2318 dcn401_program_pipe(dc, pipe, context); 2319 } 2320 pipe = pipe->bottom_pipe; 2321 } 2322 } 2323 } 2324 2325 if (!hwseq) 2326 return; 2327 2328 /* P-State support transitions: 2329 * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe 2330 * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally) 2331 * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe 2332 * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe 2333 * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes 2334 */ 2335 if (hwseq->funcs.update_force_pstate) 2336 dc->hwseq->funcs.update_force_pstate(dc, context); 2337 2338 /* Only program the MALL registers after all the main and phantom pipes 2339 * are done programming. 2340 */ 2341 if (hwseq->funcs.program_mall_pipe_config) 2342 hwseq->funcs.program_mall_pipe_config(dc, context); 2343 2344 /* WA to apply WM setting*/ 2345 if (hwseq->wa.DEGVIDCN21) 2346 dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub); 2347 2348 2349 /* WA for stutter underflow during MPO transitions when adding 2nd plane */ 2350 if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) { 2351 2352 if (dc->current_state->stream_status[0].plane_count == 1 && 2353 context->stream_status[0].plane_count > 1) { 2354 2355 struct timing_generator *tg = dc->res_pool->timing_generators[0]; 2356 2357 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false); 2358 2359 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true; 2360 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = 2361 tg->funcs->get_frame_count(tg); 2362 } 2363 } 2364 } 2365 2366 bool dcn401_update_bandwidth( 2367 struct dc *dc, 2368 struct dc_state *context) 2369 { 2370 int i; 2371 struct dce_hwseq *hws = dc->hwseq; 2372 2373 /* recalculate DML parameters */ 2374 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) 2375 return false; 2376 2377 /* apply updated bandwidth parameters */ 2378 dc->hwss.prepare_bandwidth(dc, context); 2379 2380 /* update hubp configs for all pipes */ 2381 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2382 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2383 2384 if (pipe_ctx->plane_state == NULL) 2385 continue; 2386 2387 if (pipe_ctx->top_pipe == NULL) { 2388 bool blank = !is_pipe_tree_visible(pipe_ctx); 2389 2390 pipe_ctx->stream_res.tg->funcs->program_global_sync( 2391 pipe_ctx->stream_res.tg, 2392 dcn401_calculate_vready_offset_for_group(pipe_ctx), 2393 (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines, 2394 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels, 2395 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels, 2396 (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines); 2397 2398 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 2399 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false); 2400 2401 if (pipe_ctx->prev_odm_pipe == NULL) 2402 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); 2403 2404 if (hws->funcs.setup_vupdate_interrupt) 2405 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); 2406 } 2407 2408 if (pipe_ctx->plane_res.hubp->funcs->hubp_setup2) 2409 pipe_ctx->plane_res.hubp->funcs->hubp_setup2( 2410 pipe_ctx->plane_res.hubp, 2411 &pipe_ctx->hubp_regs, 2412 &pipe_ctx->global_sync, 2413 &pipe_ctx->stream->timing); 2414 } 2415 2416 return true; 2417 } 2418 2419 void dcn401_detect_pipe_changes(struct dc_state *old_state, 2420 struct dc_state *new_state, 2421 struct pipe_ctx *old_pipe, 2422 struct pipe_ctx *new_pipe) 2423 { 2424 bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM; 2425 bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM; 2426 2427 unsigned int old_pipe_vready_offset_pixels = old_pipe->global_sync.dcn4x.vready_offset_pixels; 2428 unsigned int new_pipe_vready_offset_pixels = new_pipe->global_sync.dcn4x.vready_offset_pixels; 2429 unsigned int old_pipe_vstartup_lines = old_pipe->global_sync.dcn4x.vstartup_lines; 2430 unsigned int new_pipe_vstartup_lines = new_pipe->global_sync.dcn4x.vstartup_lines; 2431 unsigned int old_pipe_vupdate_offset_pixels = old_pipe->global_sync.dcn4x.vupdate_offset_pixels; 2432 unsigned int new_pipe_vupdate_offset_pixels = new_pipe->global_sync.dcn4x.vupdate_offset_pixels; 2433 unsigned int old_pipe_vupdate_width_pixels = old_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels; 2434 unsigned int new_pipe_vupdate_width_pixels = new_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels; 2435 2436 new_pipe->update_flags.raw = 0; 2437 2438 /* If non-phantom pipe is being transitioned to a phantom pipe, 2439 * set disable and return immediately. This is because the pipe 2440 * that was previously in use must be fully disabled before we 2441 * can "enable" it as a phantom pipe (since the OTG will certainly 2442 * be different). The post_unlock sequence will set the correct 2443 * update flags to enable the phantom pipe. 2444 */ 2445 if (old_pipe->plane_state && !old_is_phantom && 2446 new_pipe->plane_state && new_is_phantom) { 2447 new_pipe->update_flags.bits.disable = 1; 2448 return; 2449 } 2450 2451 if (resource_is_pipe_type(new_pipe, OTG_MASTER) && 2452 resource_is_odm_topology_changed(new_pipe, old_pipe)) 2453 /* Detect odm changes */ 2454 new_pipe->update_flags.bits.odm = 1; 2455 2456 /* Exit on unchanged, unused pipe */ 2457 if (!old_pipe->plane_state && !new_pipe->plane_state) 2458 return; 2459 /* Detect pipe enable/disable */ 2460 if (!old_pipe->plane_state && new_pipe->plane_state) { 2461 new_pipe->update_flags.bits.enable = 1; 2462 new_pipe->update_flags.bits.mpcc = 1; 2463 new_pipe->update_flags.bits.dppclk = 1; 2464 new_pipe->update_flags.bits.hubp_interdependent = 1; 2465 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; 2466 new_pipe->update_flags.bits.unbounded_req = 1; 2467 new_pipe->update_flags.bits.gamut_remap = 1; 2468 new_pipe->update_flags.bits.scaler = 1; 2469 new_pipe->update_flags.bits.viewport = 1; 2470 new_pipe->update_flags.bits.det_size = 1; 2471 if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE && 2472 new_pipe->stream_res.test_pattern_params.width != 0 && 2473 new_pipe->stream_res.test_pattern_params.height != 0) 2474 new_pipe->update_flags.bits.test_pattern_changed = 1; 2475 if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { 2476 new_pipe->update_flags.bits.odm = 1; 2477 new_pipe->update_flags.bits.global_sync = 1; 2478 } 2479 return; 2480 } 2481 2482 /* For SubVP we need to unconditionally enable because any phantom pipes are 2483 * always removed then newly added for every full updates whenever SubVP is in use. 2484 * The remove-add sequence of the phantom pipe always results in the pipe 2485 * being blanked in enable_stream_timing (DPG). 2486 */ 2487 if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM) 2488 new_pipe->update_flags.bits.enable = 1; 2489 2490 /* Phantom pipes are effectively disabled, if the pipe was previously phantom 2491 * we have to enable 2492 */ 2493 if (old_pipe->plane_state && old_is_phantom && 2494 new_pipe->plane_state && !new_is_phantom) 2495 new_pipe->update_flags.bits.enable = 1; 2496 2497 if (old_pipe->plane_state && !new_pipe->plane_state) { 2498 new_pipe->update_flags.bits.disable = 1; 2499 return; 2500 } 2501 2502 /* Detect plane change */ 2503 if (old_pipe->plane_state != new_pipe->plane_state) 2504 new_pipe->update_flags.bits.plane_changed = true; 2505 2506 /* Detect top pipe only changes */ 2507 if (resource_is_pipe_type(new_pipe, OTG_MASTER)) { 2508 /* Detect global sync changes */ 2509 if ((old_pipe_vready_offset_pixels != new_pipe_vready_offset_pixels) 2510 || (old_pipe_vstartup_lines != new_pipe_vstartup_lines) 2511 || (old_pipe_vupdate_offset_pixels != new_pipe_vupdate_offset_pixels) 2512 || (old_pipe_vupdate_width_pixels != new_pipe_vupdate_width_pixels)) 2513 new_pipe->update_flags.bits.global_sync = 1; 2514 } 2515 2516 if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb) 2517 new_pipe->update_flags.bits.det_size = 1; 2518 2519 /* 2520 * Detect opp / tg change, only set on change, not on enable 2521 * Assume mpcc inst = pipe index, if not this code needs to be updated 2522 * since mpcc is what is affected by these. In fact all of our sequence 2523 * makes this assumption at the moment with how hubp reset is matched to 2524 * same index mpcc reset. 2525 */ 2526 if (old_pipe->stream_res.opp != new_pipe->stream_res.opp) 2527 new_pipe->update_flags.bits.opp_changed = 1; 2528 if (old_pipe->stream_res.tg != new_pipe->stream_res.tg) 2529 new_pipe->update_flags.bits.tg_changed = 1; 2530 2531 /* 2532 * Detect mpcc blending changes, only dpp inst and opp matter here, 2533 * mpccs getting removed/inserted update connected ones during their own 2534 * programming 2535 */ 2536 if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp 2537 || old_pipe->stream_res.opp != new_pipe->stream_res.opp) 2538 new_pipe->update_flags.bits.mpcc = 1; 2539 2540 /* Detect dppclk change */ 2541 if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz) 2542 new_pipe->update_flags.bits.dppclk = 1; 2543 2544 /* Check for scl update */ 2545 if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data))) 2546 new_pipe->update_flags.bits.scaler = 1; 2547 /* Check for vp update */ 2548 if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect)) 2549 || memcmp(&old_pipe->plane_res.scl_data.viewport_c, 2550 &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect))) 2551 new_pipe->update_flags.bits.viewport = 1; 2552 2553 /* Detect dlg/ttu/rq updates */ 2554 { 2555 struct dml2_display_dlg_regs old_dlg_regs = old_pipe->hubp_regs.dlg_regs; 2556 struct dml2_display_ttu_regs old_ttu_regs = old_pipe->hubp_regs.ttu_regs; 2557 struct dml2_display_rq_regs old_rq_regs = old_pipe->hubp_regs.rq_regs; 2558 struct dml2_display_dlg_regs *new_dlg_regs = &new_pipe->hubp_regs.dlg_regs; 2559 struct dml2_display_ttu_regs *new_ttu_regs = &new_pipe->hubp_regs.ttu_regs; 2560 struct dml2_display_rq_regs *new_rq_regs = &new_pipe->hubp_regs.rq_regs; 2561 2562 /* Detect pipe interdependent updates */ 2563 if ((old_dlg_regs.dst_y_prefetch != new_dlg_regs->dst_y_prefetch) 2564 || (old_dlg_regs.vratio_prefetch != new_dlg_regs->vratio_prefetch) 2565 || (old_dlg_regs.vratio_prefetch_c != new_dlg_regs->vratio_prefetch_c) 2566 || (old_dlg_regs.dst_y_per_vm_vblank != new_dlg_regs->dst_y_per_vm_vblank) 2567 || (old_dlg_regs.dst_y_per_row_vblank != new_dlg_regs->dst_y_per_row_vblank) 2568 || (old_dlg_regs.dst_y_per_vm_flip != new_dlg_regs->dst_y_per_vm_flip) 2569 || (old_dlg_regs.dst_y_per_row_flip != new_dlg_regs->dst_y_per_row_flip) 2570 || (old_dlg_regs.refcyc_per_meta_chunk_vblank_l != new_dlg_regs->refcyc_per_meta_chunk_vblank_l) 2571 || (old_dlg_regs.refcyc_per_meta_chunk_vblank_c != new_dlg_regs->refcyc_per_meta_chunk_vblank_c) 2572 || (old_dlg_regs.refcyc_per_meta_chunk_flip_l != new_dlg_regs->refcyc_per_meta_chunk_flip_l) 2573 || (old_dlg_regs.refcyc_per_line_delivery_pre_l != new_dlg_regs->refcyc_per_line_delivery_pre_l) 2574 || (old_dlg_regs.refcyc_per_line_delivery_pre_c != new_dlg_regs->refcyc_per_line_delivery_pre_c) 2575 || (old_ttu_regs.refcyc_per_req_delivery_pre_l != new_ttu_regs->refcyc_per_req_delivery_pre_l) 2576 || (old_ttu_regs.refcyc_per_req_delivery_pre_c != new_ttu_regs->refcyc_per_req_delivery_pre_c) 2577 || (old_ttu_regs.refcyc_per_req_delivery_pre_cur0 != 2578 new_ttu_regs->refcyc_per_req_delivery_pre_cur0) 2579 || (old_ttu_regs.min_ttu_vblank != new_ttu_regs->min_ttu_vblank) 2580 || (old_ttu_regs.qos_level_flip != new_ttu_regs->qos_level_flip)) { 2581 old_dlg_regs.dst_y_prefetch = new_dlg_regs->dst_y_prefetch; 2582 old_dlg_regs.vratio_prefetch = new_dlg_regs->vratio_prefetch; 2583 old_dlg_regs.vratio_prefetch_c = new_dlg_regs->vratio_prefetch_c; 2584 old_dlg_regs.dst_y_per_vm_vblank = new_dlg_regs->dst_y_per_vm_vblank; 2585 old_dlg_regs.dst_y_per_row_vblank = new_dlg_regs->dst_y_per_row_vblank; 2586 old_dlg_regs.dst_y_per_vm_flip = new_dlg_regs->dst_y_per_vm_flip; 2587 old_dlg_regs.dst_y_per_row_flip = new_dlg_regs->dst_y_per_row_flip; 2588 old_dlg_regs.refcyc_per_meta_chunk_vblank_l = new_dlg_regs->refcyc_per_meta_chunk_vblank_l; 2589 old_dlg_regs.refcyc_per_meta_chunk_vblank_c = new_dlg_regs->refcyc_per_meta_chunk_vblank_c; 2590 old_dlg_regs.refcyc_per_meta_chunk_flip_l = new_dlg_regs->refcyc_per_meta_chunk_flip_l; 2591 old_dlg_regs.refcyc_per_line_delivery_pre_l = new_dlg_regs->refcyc_per_line_delivery_pre_l; 2592 old_dlg_regs.refcyc_per_line_delivery_pre_c = new_dlg_regs->refcyc_per_line_delivery_pre_c; 2593 old_ttu_regs.refcyc_per_req_delivery_pre_l = new_ttu_regs->refcyc_per_req_delivery_pre_l; 2594 old_ttu_regs.refcyc_per_req_delivery_pre_c = new_ttu_regs->refcyc_per_req_delivery_pre_c; 2595 old_ttu_regs.refcyc_per_req_delivery_pre_cur0 = new_ttu_regs->refcyc_per_req_delivery_pre_cur0; 2596 old_ttu_regs.min_ttu_vblank = new_ttu_regs->min_ttu_vblank; 2597 old_ttu_regs.qos_level_flip = new_ttu_regs->qos_level_flip; 2598 new_pipe->update_flags.bits.hubp_interdependent = 1; 2599 } 2600 /* Detect any other updates to ttu/rq/dlg */ 2601 if (memcmp(&old_dlg_regs, new_dlg_regs, sizeof(old_dlg_regs)) || 2602 memcmp(&old_ttu_regs, new_ttu_regs, sizeof(old_ttu_regs)) || 2603 memcmp(&old_rq_regs, new_rq_regs, sizeof(old_rq_regs))) 2604 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; 2605 } 2606 2607 if (old_pipe->unbounded_req != new_pipe->unbounded_req) 2608 new_pipe->update_flags.bits.unbounded_req = 1; 2609 2610 if (memcmp(&old_pipe->stream_res.test_pattern_params, 2611 &new_pipe->stream_res.test_pattern_params, sizeof(struct test_pattern_params))) { 2612 new_pipe->update_flags.bits.test_pattern_changed = 1; 2613 } 2614 } 2615 2616 void dcn401_plane_atomic_power_down(struct dc *dc, 2617 struct dpp *dpp, 2618 struct hubp *hubp) 2619 { 2620 struct dce_hwseq *hws = dc->hwseq; 2621 uint32_t org_ip_request_cntl = 0; 2622 2623 DC_LOGGER_INIT(dc->ctx->logger); 2624 2625 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); 2626 if (org_ip_request_cntl == 0) 2627 REG_SET(DC_IP_REQUEST_CNTL, 0, 2628 IP_REQUEST_EN, 1); 2629 2630 if (hws->funcs.dpp_pg_control) 2631 hws->funcs.dpp_pg_control(hws, dpp->inst, false); 2632 2633 if (hws->funcs.hubp_pg_control) 2634 hws->funcs.hubp_pg_control(hws, hubp->inst, false); 2635 2636 hubp->funcs->hubp_reset(hubp); 2637 dpp->funcs->dpp_reset(dpp); 2638 2639 if (org_ip_request_cntl == 0) 2640 REG_SET(DC_IP_REQUEST_CNTL, 0, 2641 IP_REQUEST_EN, 0); 2642 2643 DC_LOG_DEBUG( 2644 "Power gated front end %d\n", hubp->inst); 2645 2646 if (hws->funcs.dpp_root_clock_control) 2647 hws->funcs.dpp_root_clock_control(hws, dpp->inst, false); 2648 } 2649