1 // SPDX-License-Identifier: MIT 2 // 3 // Copyright 2024 Advanced Micro Devices, Inc. 4 5 #include "dm_services.h" 6 #include "basics/dc_common.h" 7 #include "dm_helpers.h" 8 #include "core_types.h" 9 #include "resource.h" 10 #include "dccg.h" 11 #include "dce/dce_hwseq.h" 12 #include "reg_helper.h" 13 #include "abm.h" 14 #include "hubp.h" 15 #include "dchubbub.h" 16 #include "timing_generator.h" 17 #include "opp.h" 18 #include "ipp.h" 19 #include "mpc.h" 20 #include "mcif_wb.h" 21 #include "dc_dmub_srv.h" 22 #include "link_hwss.h" 23 #include "dpcd_defs.h" 24 #include "clk_mgr.h" 25 #include "dsc.h" 26 #include "link.h" 27 28 #include "dce/dmub_hw_lock_mgr.h" 29 #include "dcn10/dcn10_cm_common.h" 30 #include "dcn20/dcn20_optc.h" 31 #include "dcn30/dcn30_cm_common.h" 32 #include "dcn32/dcn32_hwseq.h" 33 #include "dcn401_hwseq.h" 34 #include "dcn401/dcn401_resource.h" 35 #include "dc_state_priv.h" 36 #include "link_enc_cfg.h" 37 38 #define DC_LOGGER_INIT(logger) 39 40 #define CTX \ 41 hws->ctx 42 #define REG(reg)\ 43 hws->regs->reg 44 #define DC_LOGGER \ 45 dc->ctx->logger 46 47 48 #undef FN 49 #define FN(reg_name, field_name) \ 50 hws->shifts->field_name, hws->masks->field_name 51 52 static void dcn401_initialize_min_clocks(struct dc *dc) 53 { 54 struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk; 55 56 clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ; 57 clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000; 58 clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000; 59 clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000; 60 clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000; 61 if (dc->debug.disable_boot_optimizations) { 62 clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000; 63 } else { 64 /* Even though DPG_EN = 1 for the connected display, it still requires the 65 * correct timing so we cannot set DISPCLK to min freq or it could cause 66 * audio corruption. Read current DISPCLK from DENTIST and request the same 67 * freq to ensure that the timing is valid and unchanged. 68 */ 69 clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr); 70 } 71 clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000; 72 clocks->fclk_p_state_change_support = true; 73 clocks->p_state_change_support = true; 74 75 dc->clk_mgr->funcs->update_clocks( 76 dc->clk_mgr, 77 dc->current_state, 78 true); 79 } 80 81 void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx) 82 { 83 unsigned int i = 0; 84 struct mpc_grph_gamut_adjustment mpc_adjust; 85 unsigned int mpcc_id = pipe_ctx->plane_res.mpcc_inst; 86 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 87 88 //For now assert if location is not pre-blend 89 if (pipe_ctx->plane_state) 90 ASSERT(pipe_ctx->plane_state->mcm_location == MPCC_MOVABLE_CM_LOCATION_BEFORE); 91 92 // program MPCC_MCM_FIRST_GAMUT_REMAP 93 memset(&mpc_adjust, 0, sizeof(mpc_adjust)); 94 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; 95 mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_FIRST_GAMUT_REMAP; 96 97 if (pipe_ctx->plane_state && 98 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) { 99 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; 100 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) 101 mpc_adjust.temperature_matrix[i] = 102 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i]; 103 } 104 105 mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust); 106 107 // program MPCC_MCM_SECOND_GAMUT_REMAP for Bypass / Disable for now 108 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; 109 mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_SECOND_GAMUT_REMAP; 110 111 mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust); 112 113 // program MPCC_OGAM_GAMUT_REMAP same as is currently used on DCN3x 114 memset(&mpc_adjust, 0, sizeof(mpc_adjust)); 115 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; 116 mpc_adjust.mpcc_gamut_remap_block_id = MPCC_OGAM_GAMUT_REMAP; 117 118 if (pipe_ctx->top_pipe == NULL) { 119 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) { 120 mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; 121 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) 122 mpc_adjust.temperature_matrix[i] = 123 pipe_ctx->stream->gamut_remap_matrix.matrix[i]; 124 } 125 } 126 127 mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust); 128 } 129 130 void dcn401_init_hw(struct dc *dc) 131 { 132 struct abm **abms = dc->res_pool->multiple_abms; 133 struct dce_hwseq *hws = dc->hwseq; 134 struct dc_bios *dcb = dc->ctx->dc_bios; 135 struct resource_pool *res_pool = dc->res_pool; 136 int i; 137 int edp_num; 138 uint32_t backlight = MAX_BACKLIGHT_LEVEL; 139 uint32_t user_level = MAX_BACKLIGHT_LEVEL; 140 int current_dchub_ref_freq = 0; 141 142 if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) { 143 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); 144 145 // mark dcmode limits present if any clock has distinct AC and DC values from SMU 146 dc->caps.dcmode_power_limits_present = 147 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) || 148 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) || 149 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) || 150 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) || 151 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) || 152 (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz); 153 } 154 155 // Initialize the dccg 156 if (res_pool->dccg->funcs->dccg_init) 157 res_pool->dccg->funcs->dccg_init(res_pool->dccg); 158 159 // Disable DMUB Initialization until IPS state programming is finalized 160 //if (!dcb->funcs->is_accelerated_mode(dcb)) { 161 // hws->funcs.bios_golden_init(dc); 162 //} 163 164 // Set default OPTC memory power states 165 if (dc->debug.enable_mem_low_power.bits.optc) { 166 // Shutdown when unassigned and light sleep in VBLANK 167 REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); 168 } 169 170 if (dc->debug.enable_mem_low_power.bits.vga) { 171 // Power down VGA memory 172 REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); 173 } 174 175 if (dc->ctx->dc_bios->fw_info_valid) { 176 res_pool->ref_clocks.xtalin_clock_inKhz = 177 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; 178 179 if (res_pool->hubbub) { 180 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, 181 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, 182 &res_pool->ref_clocks.dccg_ref_clock_inKhz); 183 184 current_dchub_ref_freq = res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; 185 186 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, 187 res_pool->ref_clocks.dccg_ref_clock_inKhz, 188 &res_pool->ref_clocks.dchub_ref_clock_inKhz); 189 } else { 190 // Not all ASICs have DCCG sw component 191 res_pool->ref_clocks.dccg_ref_clock_inKhz = 192 res_pool->ref_clocks.xtalin_clock_inKhz; 193 res_pool->ref_clocks.dchub_ref_clock_inKhz = 194 res_pool->ref_clocks.xtalin_clock_inKhz; 195 } 196 } else 197 ASSERT_CRITICAL(false); 198 199 for (i = 0; i < dc->link_count; i++) { 200 /* Power up AND update implementation according to the 201 * required signal (which may be different from the 202 * default signal on connector). 203 */ 204 struct dc_link *link = dc->links[i]; 205 206 link->link_enc->funcs->hw_init(link->link_enc); 207 208 /* Check for enabled DIG to identify enabled display */ 209 if (link->link_enc->funcs->is_dig_enabled && 210 link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 211 link->link_status.link_active = true; 212 link->phy_state.symclk_state = SYMCLK_ON_TX_ON; 213 if (link->link_enc->funcs->fec_is_active && 214 link->link_enc->funcs->fec_is_active(link->link_enc)) 215 link->fec_state = dc_link_fec_enabled; 216 } 217 } 218 219 /* enable_power_gating_plane before dsc_pg_control because 220 * FORCEON = 1 with hw default value on bootup, resume from s3 221 */ 222 if (hws->funcs.enable_power_gating_plane) 223 hws->funcs.enable_power_gating_plane(dc->hwseq, true); 224 225 /* we want to turn off all dp displays before doing detection */ 226 dc->link_srv->blank_all_dp_displays(dc); 227 228 /* If taking control over from VBIOS, we may want to optimize our first 229 * mode set, so we need to skip powering down pipes until we know which 230 * pipes we want to use. 231 * Otherwise, if taking control is not possible, we need to power 232 * everything down. 233 */ 234 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { 235 /* Disable boot optimizations means power down everything including PHY, DIG, 236 * and OTG (i.e. the boot is not optimized because we do a full power down). 237 */ 238 if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations) 239 dc->hwss.enable_accelerated_mode(dc, dc->current_state); 240 else 241 hws->funcs.init_pipes(dc, dc->current_state); 242 243 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) 244 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, 245 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); 246 247 dcn401_initialize_min_clocks(dc); 248 249 /* On HW init, allow idle optimizations after pipes have been turned off. 250 * 251 * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state 252 * is reset (i.e. not in idle at the time hw init is called), but software state 253 * still has idle_optimizations = true, so we must disable idle optimizations first 254 * (i.e. set false), then re-enable (set true). 255 */ 256 dc_allow_idle_optimizations(dc, false); 257 dc_allow_idle_optimizations(dc, true); 258 } 259 260 /* In headless boot cases, DIG may be turned 261 * on which causes HW/SW discrepancies. 262 * To avoid this, power down hardware on boot 263 * if DIG is turned on and seamless boot not enabled 264 */ 265 if (!dc->config.seamless_boot_edp_requested) { 266 struct dc_link *edp_links[MAX_NUM_EDP]; 267 struct dc_link *edp_link; 268 269 dc_get_edp_links(dc, edp_links, &edp_num); 270 if (edp_num) { 271 for (i = 0; i < edp_num; i++) { 272 edp_link = edp_links[i]; 273 if (edp_link->link_enc->funcs->is_dig_enabled && 274 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && 275 dc->hwss.edp_backlight_control && 276 hws->funcs.power_down && 277 dc->hwss.edp_power_control) { 278 dc->hwss.edp_backlight_control(edp_link, false); 279 hws->funcs.power_down(dc); 280 dc->hwss.edp_power_control(edp_link, false); 281 } 282 } 283 } else { 284 for (i = 0; i < dc->link_count; i++) { 285 struct dc_link *link = dc->links[i]; 286 287 if (link->link_enc->funcs->is_dig_enabled && 288 link->link_enc->funcs->is_dig_enabled(link->link_enc) && 289 hws->funcs.power_down) { 290 hws->funcs.power_down(dc); 291 break; 292 } 293 294 } 295 } 296 } 297 298 for (i = 0; i < res_pool->audio_count; i++) { 299 struct audio *audio = res_pool->audios[i]; 300 301 audio->funcs->hw_init(audio); 302 } 303 304 for (i = 0; i < dc->link_count; i++) { 305 struct dc_link *link = dc->links[i]; 306 307 if (link->panel_cntl) { 308 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); 309 user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; 310 } 311 } 312 313 for (i = 0; i < dc->res_pool->pipe_count; i++) { 314 if (abms[i] != NULL && abms[i]->funcs != NULL) 315 abms[i]->funcs->abm_init(abms[i], backlight, user_level); 316 } 317 318 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ 319 REG_WRITE(DIO_MEM_PWR_CTRL, 0); 320 321 if (!dc->debug.disable_clock_gate) { 322 /* enable all DCN clock gating */ 323 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 324 325 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 326 327 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 328 } 329 330 dcn401_setup_hpo_hw_control(hws, true); 331 332 if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) 333 dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); 334 335 if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges) 336 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); 337 338 if (dc->res_pool->hubbub->funcs->force_pstate_change_control) 339 dc->res_pool->hubbub->funcs->force_pstate_change_control( 340 dc->res_pool->hubbub, false, false); 341 342 if (dc->res_pool->hubbub->funcs->init_crb) 343 dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); 344 345 if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0) 346 dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc); 347 348 // Get DMCUB capabilities 349 if (dc->ctx->dmub_srv) { 350 dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); 351 dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; 352 dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0; 353 dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; 354 dc->debug.fams2_config.bits.enable &= 355 dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support 356 if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box) 357 || res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) { 358 /* update bounding box if FAMS2 disabled, or if dchub clk has changed */ 359 if (dc->clk_mgr) 360 dc->res_pool->funcs->update_bw_bounding_box(dc, 361 dc->clk_mgr->bw_params); 362 } 363 } 364 } 365 366 static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ctx *pipe_ctx, 367 enum MCM_LUT_XABLE *shaper_xable, 368 enum MCM_LUT_XABLE *lut3d_xable, 369 enum MCM_LUT_XABLE *lut1d_xable) 370 { 371 enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting = DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL; 372 bool lut1d_enable = false; 373 struct mpc *mpc = dc->res_pool->mpc; 374 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 375 376 if (!pipe_ctx->plane_state) 377 return; 378 shaper_3dlut_setting = pipe_ctx->plane_state->mcm_shaper_3dlut_setting; 379 lut1d_enable = pipe_ctx->plane_state->mcm_lut1d_enable; 380 mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id); 381 pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE; 382 383 *lut1d_xable = lut1d_enable ? MCM_LUT_ENABLE : MCM_LUT_DISABLE; 384 385 switch (shaper_3dlut_setting) { 386 case DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL: 387 *lut3d_xable = *shaper_xable = MCM_LUT_DISABLE; 388 break; 389 case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER: 390 *lut3d_xable = MCM_LUT_DISABLE; 391 *shaper_xable = MCM_LUT_ENABLE; 392 break; 393 case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT: 394 *lut3d_xable = *shaper_xable = MCM_LUT_ENABLE; 395 break; 396 } 397 } 398 399 static void dcn401_set_mcm_location_post_blend(struct dc *dc, struct pipe_ctx *pipe_ctx, bool bPostBlend) 400 { 401 struct mpc *mpc = dc->res_pool->mpc; 402 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 403 404 if (!pipe_ctx->plane_state) 405 return; 406 407 mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id); 408 pipe_ctx->plane_state->mcm_location = (bPostBlend) ? 409 MPCC_MOVABLE_CM_LOCATION_AFTER : 410 MPCC_MOVABLE_CM_LOCATION_BEFORE; 411 } 412 413 static void dc_get_lut_mode( 414 enum dc_cm2_gpu_mem_layout layout, 415 enum hubp_3dlut_fl_mode *mode, 416 enum hubp_3dlut_fl_addressing_mode *addr_mode) 417 { 418 switch (layout) { 419 case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB: 420 *mode = hubp_3dlut_fl_mode_native_1; 421 *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; 422 break; 423 case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR: 424 *mode = hubp_3dlut_fl_mode_native_2; 425 *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; 426 break; 427 case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR: 428 *mode = hubp_3dlut_fl_mode_transform; 429 *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear; 430 break; 431 default: 432 *mode = hubp_3dlut_fl_mode_disable; 433 *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; 434 break; 435 } 436 } 437 438 static void dc_get_lut_format( 439 enum dc_cm2_gpu_mem_format dc_format, 440 enum hubp_3dlut_fl_format *format) 441 { 442 switch (dc_format) { 443 case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB: 444 *format = hubp_3dlut_fl_format_unorm_12msb_bitslice; 445 break; 446 case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB: 447 *format = hubp_3dlut_fl_format_unorm_12lsb_bitslice; 448 break; 449 case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10: 450 *format = hubp_3dlut_fl_format_float_fp1_5_10; 451 break; 452 } 453 } 454 455 static void dc_get_lut_xbar( 456 enum dc_cm2_gpu_mem_pixel_component_order order, 457 enum hubp_3dlut_fl_crossbar_bit_slice *cr_r, 458 enum hubp_3dlut_fl_crossbar_bit_slice *y_g, 459 enum hubp_3dlut_fl_crossbar_bit_slice *cb_b) 460 { 461 switch (order) { 462 case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA: 463 *cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47; 464 *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31; 465 *cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15; 466 break; 467 case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA: 468 *cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15; 469 *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31; 470 *cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47; 471 break; 472 } 473 } 474 475 static void dc_get_lut_width( 476 enum dc_cm2_gpu_mem_size size, 477 enum hubp_3dlut_fl_width *width) 478 { 479 switch (size) { 480 case DC_CM2_GPU_MEM_SIZE_333333: 481 *width = hubp_3dlut_fl_width_33; 482 break; 483 case DC_CM2_GPU_MEM_SIZE_171717: 484 *width = hubp_3dlut_fl_width_17; 485 break; 486 case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: 487 *width = hubp_3dlut_fl_width_transformed; 488 break; 489 } 490 } 491 static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc) 492 { 493 if (mpc->funcs->rmcm.update_3dlut_fast_load_select && 494 mpc->funcs->rmcm.program_lut_read_write_control && 495 hubp->funcs->hubp_program_3dlut_fl_addr && 496 mpc->funcs->rmcm.program_bit_depth && 497 hubp->funcs->hubp_program_3dlut_fl_mode && 498 hubp->funcs->hubp_program_3dlut_fl_addressing_mode && 499 hubp->funcs->hubp_program_3dlut_fl_format && 500 hubp->funcs->hubp_update_3dlut_fl_bias_scale && 501 mpc->funcs->rmcm.program_bias_scale && 502 hubp->funcs->hubp_program_3dlut_fl_crossbar && 503 hubp->funcs->hubp_program_3dlut_fl_width && 504 mpc->funcs->rmcm.update_3dlut_fast_load_select && 505 mpc->funcs->rmcm.populate_lut && 506 mpc->funcs->rmcm.program_lut_mode && 507 hubp->funcs->hubp_enable_3dlut_fl && 508 mpc->funcs->rmcm.enable_3dlut_fl) 509 return true; 510 511 return false; 512 } 513 514 bool dcn401_program_rmcm_luts( 515 struct hubp *hubp, 516 struct pipe_ctx *pipe_ctx, 517 enum dc_cm2_transfer_func_source lut3d_src, 518 struct dc_cm2_func_luts *mcm_luts, 519 struct mpc *mpc, 520 bool lut_bank_a, 521 int mpcc_id) 522 { 523 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 524 union mcm_lut_params m_lut_params; 525 enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable; 526 enum hubp_3dlut_fl_mode mode; 527 enum hubp_3dlut_fl_addressing_mode addr_mode; 528 enum hubp_3dlut_fl_format format = 0; 529 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0; 530 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0; 531 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0; 532 enum hubp_3dlut_fl_width width = 0; 533 struct dc *dc = hubp->ctx->dc; 534 535 bool bypass_rmcm_3dlut = false; 536 bool bypass_rmcm_shaper = false; 537 538 dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); 539 540 /* 3DLUT */ 541 switch (lut3d_src) { 542 case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM: 543 memset(&m_lut_params, 0, sizeof(m_lut_params)); 544 // Don't know what to do in this case. 545 //case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM: 546 break; 547 case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM: 548 dc_get_lut_width(mcm_luts->lut3d_data.gpu_mem_params.size, &width); 549 if (!dc_is_rmcm_3dlut_supported(hubp, mpc) || 550 !mpc->funcs->rmcm.is_config_supported(width)) 551 return false; 552 553 //0. disable fl on mpc 554 mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, 0xF); 555 556 //1. power down the block 557 mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, false); 558 559 //2. program RMCM 560 //2a. 3dlut reg programming 561 mpc->funcs->rmcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, 562 (!bypass_rmcm_3dlut) && lut3d_xable != MCM_LUT_DISABLE, mpcc_id); 563 564 hubp->funcs->hubp_program_3dlut_fl_addr(hubp, 565 mcm_luts->lut3d_data.gpu_mem_params.addr); 566 567 mpc->funcs->rmcm.program_bit_depth(mpc, 568 mcm_luts->lut3d_data.gpu_mem_params.bit_depth, mpcc_id); 569 570 // setting native or transformed mode, 571 dc_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout, &mode, &addr_mode); 572 573 //these program the mcm 3dlut 574 hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode); 575 576 hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode); 577 578 //seems to be only for the MCM 579 dc_get_lut_format(mcm_luts->lut3d_data.gpu_mem_params.format_params.format, &format); 580 hubp->funcs->hubp_program_3dlut_fl_format(hubp, format); 581 582 mpc->funcs->rmcm.program_bias_scale(mpc, 583 mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias, 584 mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale, 585 mpcc_id); 586 hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp, 587 mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias, 588 mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale); 589 590 dc_get_lut_xbar( 591 mcm_luts->lut3d_data.gpu_mem_params.component_order, 592 &crossbar_bit_slice_cr_r, 593 &crossbar_bit_slice_y_g, 594 &crossbar_bit_slice_cb_b); 595 596 hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, 597 crossbar_bit_slice_cr_r, 598 crossbar_bit_slice_y_g, 599 crossbar_bit_slice_cb_b); 600 601 mpc->funcs->rmcm.program_3dlut_size(mpc, width, mpcc_id); 602 603 mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst); 604 605 //2b. shaper reg programming 606 memset(&m_lut_params, 0, sizeof(m_lut_params)); 607 608 if (mcm_luts->shaper->type == TF_TYPE_HWPWL) { 609 m_lut_params.pwl = &mcm_luts->shaper->pwl; 610 } else if (mcm_luts->shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { 611 ASSERT(false); 612 cm_helper_translate_curve_to_hw_format( 613 dc->ctx, 614 mcm_luts->shaper, 615 &dpp_base->regamma_params, true); 616 m_lut_params.pwl = &dpp_base->regamma_params; 617 } 618 if (m_lut_params.pwl) { 619 mpc->funcs->rmcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id); 620 mpc->funcs->rmcm.program_lut_mode(mpc, !bypass_rmcm_shaper, lut_bank_a, mpcc_id); 621 } else { 622 //RMCM 3dlut won't work without its shaper 623 return false; 624 } 625 626 //3. Select the hubp connected to this RMCM 627 hubp->funcs->hubp_enable_3dlut_fl(hubp, true); 628 mpc->funcs->rmcm.enable_3dlut_fl(mpc, true, mpcc_id); 629 630 //4. power on the block 631 if (m_lut_params.pwl) 632 mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, true); 633 634 break; 635 default: 636 return false; 637 } 638 639 return true; 640 } 641 642 void dcn401_populate_mcm_luts(struct dc *dc, 643 struct pipe_ctx *pipe_ctx, 644 struct dc_cm2_func_luts mcm_luts, 645 bool lut_bank_a) 646 { 647 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 648 struct hubp *hubp = pipe_ctx->plane_res.hubp; 649 int mpcc_id = hubp->inst; 650 struct mpc *mpc = dc->res_pool->mpc; 651 union mcm_lut_params m_lut_params; 652 enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src; 653 enum hubp_3dlut_fl_format format = 0; 654 enum hubp_3dlut_fl_mode mode; 655 enum hubp_3dlut_fl_width width = 0; 656 enum hubp_3dlut_fl_addressing_mode addr_mode; 657 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0; 658 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0; 659 enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0; 660 enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE; 661 enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE; 662 enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE; 663 bool rval; 664 665 dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); 666 667 //MCM - setting its location (Before/After) blender 668 //set to post blend (true) 669 dcn401_set_mcm_location_post_blend( 670 dc, 671 pipe_ctx, 672 mcm_luts.lut3d_data.mpc_mcm_post_blend); 673 674 //RMCM - 3dLUT+Shaper 675 if (mcm_luts.lut3d_data.rmcm_3dlut_enable) { 676 dcn401_program_rmcm_luts( 677 hubp, 678 pipe_ctx, 679 lut3d_src, 680 &mcm_luts, 681 mpc, 682 lut_bank_a, 683 mpcc_id); 684 } 685 686 /* 1D LUT */ 687 if (mcm_luts.lut1d_func) { 688 memset(&m_lut_params, 0, sizeof(m_lut_params)); 689 if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL) 690 m_lut_params.pwl = &mcm_luts.lut1d_func->pwl; 691 else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) { 692 rval = cm3_helper_translate_curve_to_hw_format( 693 mcm_luts.lut1d_func, 694 &dpp_base->regamma_params, false); 695 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; 696 } 697 if (m_lut_params.pwl) { 698 if (mpc->funcs->populate_lut) 699 mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id); 700 } 701 if (mpc->funcs->program_lut_mode) 702 mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id); 703 } 704 705 /* Shaper */ 706 if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) { 707 memset(&m_lut_params, 0, sizeof(m_lut_params)); 708 if (mcm_luts.shaper->type == TF_TYPE_HWPWL) 709 m_lut_params.pwl = &mcm_luts.shaper->pwl; 710 else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { 711 ASSERT(false); 712 rval = cm3_helper_translate_curve_to_hw_format( 713 mcm_luts.shaper, 714 &dpp_base->regamma_params, true); 715 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; 716 } 717 if (m_lut_params.pwl) { 718 if (mpc->funcs->mcm.populate_lut) 719 mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id); 720 if (mpc->funcs->program_lut_mode) 721 mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id); 722 } 723 } 724 725 /* 3DLUT */ 726 switch (lut3d_src) { 727 case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM: 728 memset(&m_lut_params, 0, sizeof(m_lut_params)); 729 if (hubp->funcs->hubp_enable_3dlut_fl) 730 hubp->funcs->hubp_enable_3dlut_fl(hubp, false); 731 732 if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) { 733 m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d; 734 if (mpc->funcs->populate_lut) 735 mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, m_lut_params, lut_bank_a, mpcc_id); 736 if (mpc->funcs->program_lut_mode) 737 mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, 738 mpcc_id); 739 } 740 break; 741 case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM: 742 switch (mcm_luts.lut3d_data.gpu_mem_params.size) { 743 case DC_CM2_GPU_MEM_SIZE_333333: 744 width = hubp_3dlut_fl_width_33; 745 break; 746 case DC_CM2_GPU_MEM_SIZE_171717: 747 width = hubp_3dlut_fl_width_17; 748 break; 749 case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: 750 width = hubp_3dlut_fl_width_transformed; 751 break; 752 } 753 754 //check for support 755 if (mpc->funcs->mcm.is_config_supported && 756 !mpc->funcs->mcm.is_config_supported(width)) 757 break; 758 759 if (mpc->funcs->program_lut_read_write_control) 760 mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id); 761 if (mpc->funcs->program_lut_mode) 762 mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id); 763 764 if (hubp->funcs->hubp_program_3dlut_fl_addr) 765 hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr); 766 767 if (mpc->funcs->mcm.program_bit_depth) 768 mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id); 769 770 switch (mcm_luts.lut3d_data.gpu_mem_params.layout) { 771 case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB: 772 mode = hubp_3dlut_fl_mode_native_1; 773 addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; 774 break; 775 case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR: 776 mode = hubp_3dlut_fl_mode_native_2; 777 addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; 778 break; 779 case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR: 780 mode = hubp_3dlut_fl_mode_transform; 781 addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear; 782 break; 783 default: 784 mode = hubp_3dlut_fl_mode_disable; 785 addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; 786 break; 787 } 788 if (hubp->funcs->hubp_program_3dlut_fl_mode) 789 hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode); 790 791 if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode) 792 hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode); 793 794 switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) { 795 case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB: 796 format = hubp_3dlut_fl_format_unorm_12msb_bitslice; 797 break; 798 case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB: 799 format = hubp_3dlut_fl_format_unorm_12lsb_bitslice; 800 break; 801 case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10: 802 format = hubp_3dlut_fl_format_float_fp1_5_10; 803 break; 804 } 805 if (hubp->funcs->hubp_program_3dlut_fl_format) 806 hubp->funcs->hubp_program_3dlut_fl_format(hubp, format); 807 if (hubp->funcs->hubp_update_3dlut_fl_bias_scale && 808 mpc->funcs->mcm.program_bias_scale) { 809 mpc->funcs->mcm.program_bias_scale(mpc, 810 mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias, 811 mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale, 812 mpcc_id); 813 hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp, 814 mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias, 815 mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale); 816 } 817 818 //navi 4x has a bug and r and blue are swapped and need to be worked around here in 819 //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x 820 dc_get_lut_xbar( 821 mcm_luts.lut3d_data.gpu_mem_params.component_order, 822 &crossbar_bit_slice_cr_r, 823 &crossbar_bit_slice_y_g, 824 &crossbar_bit_slice_cb_b); 825 826 if (hubp->funcs->hubp_program_3dlut_fl_crossbar) 827 hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, 828 crossbar_bit_slice_cr_r, 829 crossbar_bit_slice_y_g, 830 crossbar_bit_slice_cb_b); 831 832 if (mpc->funcs->mcm.program_lut_read_write_control) 833 mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id); 834 835 if (mpc->funcs->mcm.program_3dlut_size) 836 mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id); 837 838 if (mpc->funcs->update_3dlut_fast_load_select) 839 mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst); 840 841 if (hubp->funcs->hubp_enable_3dlut_fl) 842 hubp->funcs->hubp_enable_3dlut_fl(hubp, true); 843 else { 844 if (mpc->funcs->program_lut_mode) { 845 mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_DISABLE, lut_bank_a, mpcc_id); 846 mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id); 847 mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id); 848 } 849 } 850 break; 851 852 } 853 } 854 855 void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx) 856 { 857 struct hubp *hubp = pipe_ctx->plane_res.hubp; 858 859 if (hubp->funcs->hubp_enable_3dlut_fl) { 860 hubp->funcs->hubp_enable_3dlut_fl(hubp, true); 861 } 862 } 863 864 bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx, 865 const struct dc_plane_state *plane_state) 866 { 867 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 868 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 869 struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc; 870 struct mpc *mpc = dc->res_pool->mpc; 871 bool result; 872 const struct pwl_params *lut_params = NULL; 873 bool rval; 874 875 if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { 876 dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a); 877 return true; 878 } 879 880 mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id); 881 pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE; 882 // 1D LUT 883 if (plane_state->blend_tf.type == TF_TYPE_HWPWL) 884 lut_params = &plane_state->blend_tf.pwl; 885 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { 886 rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf, 887 &dpp_base->regamma_params, false); 888 lut_params = rval ? &dpp_base->regamma_params : NULL; 889 } 890 result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); 891 lut_params = NULL; 892 893 // Shaper 894 if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL) 895 lut_params = &plane_state->in_shaper_func.pwl; 896 else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { 897 // TODO: dpp_base replace 898 rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func, 899 &dpp_base->shaper_params, true); 900 lut_params = rval ? &dpp_base->shaper_params : NULL; 901 } 902 result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); 903 904 // 3D 905 if (mpc->funcs->program_3dlut) { 906 if (plane_state->lut3d_func.state.bits.initialized == 1) 907 result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id); 908 else 909 result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id); 910 } 911 912 return result; 913 } 914 915 bool dcn401_set_output_transfer_func(struct dc *dc, 916 struct pipe_ctx *pipe_ctx, 917 const struct dc_stream_state *stream) 918 { 919 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 920 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 921 const struct pwl_params *params = NULL; 922 bool ret = false; 923 924 /* program OGAM or 3DLUT only for the top pipe*/ 925 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) { 926 /*program shaper and 3dlut in MPC*/ 927 ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream); 928 if (ret == false && mpc->funcs->set_output_gamma) { 929 if (stream->out_transfer_func.type == TF_TYPE_HWPWL) 930 params = &stream->out_transfer_func.pwl; 931 else if (pipe_ctx->stream->out_transfer_func.type == 932 TF_TYPE_DISTRIBUTED_POINTS && 933 cm3_helper_translate_curve_to_hw_format( 934 &stream->out_transfer_func, 935 &mpc->blender_params, false)) 936 params = &mpc->blender_params; 937 /* there are no ROM LUTs in OUTGAM */ 938 if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED) 939 BREAK_TO_DEBUGGER(); 940 } 941 } 942 943 if (mpc->funcs->set_output_gamma) 944 mpc->funcs->set_output_gamma(mpc, mpcc_id, params); 945 946 return ret; 947 } 948 949 void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx, 950 unsigned int *tmds_div) 951 { 952 struct dc_stream_state *stream = pipe_ctx->stream; 953 954 if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) { 955 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) 956 *tmds_div = PIXEL_RATE_DIV_BY_2; 957 else 958 *tmds_div = PIXEL_RATE_DIV_BY_4; 959 } else { 960 *tmds_div = PIXEL_RATE_DIV_BY_1; 961 } 962 963 if (*tmds_div == PIXEL_RATE_DIV_NA) 964 ASSERT(false); 965 966 } 967 968 static void enable_stream_timing_calc( 969 struct pipe_ctx *pipe_ctx, 970 struct dc_state *context, 971 struct dc *dc, 972 unsigned int *tmds_div, 973 int *opp_inst, 974 int *opp_cnt, 975 struct pipe_ctx *opp_heads[MAX_PIPES], 976 bool *manual_mode, 977 struct drr_params *params, 978 unsigned int *event_triggers) 979 { 980 struct dc_stream_state *stream = pipe_ctx->stream; 981 int i; 982 983 if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) 984 dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div); 985 986 *opp_cnt = resource_get_opp_heads_for_otg_master(pipe_ctx, &context->res_ctx, opp_heads); 987 for (i = 0; i < *opp_cnt; i++) 988 opp_inst[i] = opp_heads[i]->stream_res.opp->inst; 989 990 if (dc_is_tmds_signal(stream->signal)) { 991 stream->link->phy_state.symclk_ref_cnts.otg = 1; 992 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF) 993 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; 994 else 995 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON; 996 } 997 998 params->vertical_total_min = stream->adjust.v_total_min; 999 params->vertical_total_max = stream->adjust.v_total_max; 1000 params->vertical_total_mid = stream->adjust.v_total_mid; 1001 params->vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num; 1002 1003 // DRR should set trigger event to monitor surface update event 1004 if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) 1005 *event_triggers = 0x80; 1006 } 1007 1008 enum dc_status dcn401_enable_stream_timing( 1009 struct pipe_ctx *pipe_ctx, 1010 struct dc_state *context, 1011 struct dc *dc) 1012 { 1013 struct dce_hwseq *hws = dc->hwseq; 1014 struct dc_stream_state *stream = pipe_ctx->stream; 1015 struct drr_params params = {0}; 1016 unsigned int event_triggers = 0; 1017 int opp_cnt = 1; 1018 int opp_inst[MAX_PIPES] = {0}; 1019 struct pipe_ctx *opp_heads[MAX_PIPES] = {0}; 1020 struct dc_crtc_timing patched_crtc_timing = stream->timing; 1021 bool manual_mode = false; 1022 unsigned int tmds_div = PIXEL_RATE_DIV_NA; 1023 unsigned int unused_div = PIXEL_RATE_DIV_NA; 1024 int odm_slice_width; 1025 int last_odm_slice_width; 1026 int i; 1027 1028 if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER)) 1029 return DC_OK; 1030 1031 enable_stream_timing_calc(pipe_ctx, context, dc, &tmds_div, opp_inst, 1032 &opp_cnt, opp_heads, &manual_mode, ¶ms, &event_triggers); 1033 1034 if (dc->res_pool->dccg->funcs->set_pixel_rate_div) { 1035 dc->res_pool->dccg->funcs->set_pixel_rate_div( 1036 dc->res_pool->dccg, pipe_ctx->stream_res.tg->inst, 1037 tmds_div, unused_div); 1038 } 1039 1040 /* TODO check if timing_changed, disable stream if timing changed */ 1041 1042 if (opp_cnt > 1) { 1043 odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false); 1044 last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true); 1045 pipe_ctx->stream_res.tg->funcs->set_odm_combine( 1046 pipe_ctx->stream_res.tg, 1047 opp_inst, opp_cnt, 1048 odm_slice_width, last_odm_slice_width); 1049 } 1050 1051 /* set DTBCLK_P */ 1052 if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) { 1053 if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) { 1054 dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst); 1055 } 1056 } 1057 1058 /* HW program guide assume display already disable 1059 * by unplug sequence. OTG assume stop. 1060 */ 1061 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true); 1062 1063 if (false == pipe_ctx->clock_source->funcs->program_pix_clk( 1064 pipe_ctx->clock_source, 1065 &pipe_ctx->stream_res.pix_clk_params, 1066 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings), 1067 &pipe_ctx->pll_settings)) { 1068 BREAK_TO_DEBUGGER(); 1069 return DC_ERROR_UNEXPECTED; 1070 } 1071 1072 if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal))) 1073 dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx); 1074 1075 /* if we are borrowing from hblank, h_addressable needs to be adjusted */ 1076 if (dc->debug.enable_hblank_borrow) 1077 patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow; 1078 1079 pipe_ctx->stream_res.tg->funcs->program_timing( 1080 pipe_ctx->stream_res.tg, 1081 &patched_crtc_timing, 1082 (unsigned int)pipe_ctx->global_sync.dcn4x.vready_offset_pixels, 1083 (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines, 1084 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels, 1085 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels, 1086 (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines, 1087 pipe_ctx->stream->signal, 1088 true); 1089 1090 for (i = 0; i < opp_cnt; i++) { 1091 opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control( 1092 opp_heads[i]->stream_res.opp, 1093 true); 1094 opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel( 1095 opp_heads[i]->stream_res.opp, 1096 stream->timing.pixel_encoding, 1097 resource_is_pipe_type(opp_heads[i], OTG_MASTER)); 1098 } 1099 1100 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( 1101 pipe_ctx->stream_res.opp, 1102 true); 1103 1104 hws->funcs.blank_pixel_data(dc, pipe_ctx, true); 1105 1106 /* VTG is within DCHUB command block. DCFCLK is always on */ 1107 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) { 1108 BREAK_TO_DEBUGGER(); 1109 return DC_ERROR_UNEXPECTED; 1110 } 1111 1112 hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); 1113 set_drr_and_clear_adjust_pending(pipe_ctx, stream, ¶ms); 1114 1115 /* Event triggers and num frames initialized for DRR, but can be 1116 * later updated for PSR use. Note DRR trigger events are generated 1117 * regardless of whether num frames met. 1118 */ 1119 if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) 1120 pipe_ctx->stream_res.tg->funcs->set_static_screen_control( 1121 pipe_ctx->stream_res.tg, event_triggers, 2); 1122 1123 /* TODO program crtc source select for non-virtual signal*/ 1124 /* TODO program FMT */ 1125 /* TODO setup link_enc */ 1126 /* TODO set stream attributes */ 1127 /* TODO program audio */ 1128 /* TODO enable stream if timing changed */ 1129 /* TODO unblank stream if DP */ 1130 1131 if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) { 1132 if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable) 1133 pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg); 1134 } 1135 1136 return DC_OK; 1137 } 1138 1139 static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) 1140 { 1141 switch (link->link_enc->transmitter) { 1142 case TRANSMITTER_UNIPHY_A: 1143 return PHYD32CLKA; 1144 case TRANSMITTER_UNIPHY_B: 1145 return PHYD32CLKB; 1146 case TRANSMITTER_UNIPHY_C: 1147 return PHYD32CLKC; 1148 case TRANSMITTER_UNIPHY_D: 1149 return PHYD32CLKD; 1150 case TRANSMITTER_UNIPHY_E: 1151 return PHYD32CLKE; 1152 default: 1153 return PHYD32CLKA; 1154 } 1155 } 1156 1157 static void dcn401_enable_stream_calc( 1158 struct pipe_ctx *pipe_ctx, 1159 int *dp_hpo_inst, 1160 enum phyd32clk_clock_source *phyd32clk, 1161 unsigned int *tmds_div, 1162 uint32_t *early_control) 1163 { 1164 1165 struct dc *dc = pipe_ctx->stream->ctx->dc; 1166 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 1167 enum dc_lane_count lane_count = 1168 pipe_ctx->stream->link->cur_link_settings.lane_count; 1169 uint32_t active_total_with_borders; 1170 1171 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) 1172 *dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; 1173 1174 *phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link); 1175 1176 if (dc_is_tmds_signal(pipe_ctx->stream->signal)) 1177 dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div); 1178 else 1179 *tmds_div = PIXEL_RATE_DIV_BY_1; 1180 1181 /* enable early control to avoid corruption on DP monitor*/ 1182 active_total_with_borders = 1183 timing->h_addressable 1184 + timing->h_border_left 1185 + timing->h_border_right; 1186 1187 if (lane_count != 0) 1188 *early_control = active_total_with_borders % lane_count; 1189 1190 if (*early_control == 0) 1191 *early_control = lane_count; 1192 1193 } 1194 1195 void dcn401_enable_stream(struct pipe_ctx *pipe_ctx) 1196 { 1197 uint32_t early_control = 0; 1198 struct timing_generator *tg = pipe_ctx->stream_res.tg; 1199 struct dc_link *link = pipe_ctx->stream->link; 1200 const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); 1201 struct dc *dc = pipe_ctx->stream->ctx->dc; 1202 struct dccg *dccg = dc->res_pool->dccg; 1203 enum phyd32clk_clock_source phyd32clk; 1204 int dp_hpo_inst = 0; 1205 unsigned int tmds_div = PIXEL_RATE_DIV_NA; 1206 unsigned int unused_div = PIXEL_RATE_DIV_NA; 1207 struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc; 1208 struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; 1209 1210 if (!dc->config.unify_link_enc_assignment) 1211 link_enc = link_enc_cfg_get_link_enc(link); 1212 1213 dcn401_enable_stream_calc(pipe_ctx, &dp_hpo_inst, &phyd32clk, 1214 &tmds_div, &early_control); 1215 1216 if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) { 1217 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { 1218 dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst); 1219 if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) { 1220 dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); 1221 } else { 1222 dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); 1223 } 1224 } else { 1225 dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, 1226 link_enc->transmitter - TRANSMITTER_UNIPHY_A); 1227 } 1228 } 1229 1230 if (dc->res_pool->dccg->funcs->set_pixel_rate_div) { 1231 dc->res_pool->dccg->funcs->set_pixel_rate_div( 1232 dc->res_pool->dccg, 1233 pipe_ctx->stream_res.tg->inst, 1234 tmds_div, 1235 unused_div); 1236 } 1237 1238 link_hwss->setup_stream_encoder(pipe_ctx); 1239 1240 if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { 1241 if (dc->hwss.program_dmdata_engine) 1242 dc->hwss.program_dmdata_engine(pipe_ctx); 1243 } 1244 1245 dc->hwss.update_info_frame(pipe_ctx); 1246 1247 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 1248 dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 1249 1250 tg->funcs->set_early_control(tg, early_control); 1251 } 1252 1253 void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable) 1254 { 1255 REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, enable); 1256 } 1257 1258 void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy) 1259 { 1260 if (cursor_width <= 128) { 1261 pos_cpy->x_hotspot /= 2; 1262 pos_cpy->x_hotspot += 1; 1263 } else { 1264 pos_cpy->x_hotspot /= 2; 1265 pos_cpy->x_hotspot += 2; 1266 } 1267 } 1268 1269 static void disable_link_output_symclk_on_tx_off(struct dc_link *link, enum dp_link_encoding link_encoding) 1270 { 1271 struct dc *dc = link->ctx->dc; 1272 struct pipe_ctx *pipe_ctx = NULL; 1273 uint8_t i; 1274 1275 for (i = 0; i < MAX_PIPES; i++) { 1276 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1277 if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) { 1278 pipe_ctx->clock_source->funcs->program_pix_clk( 1279 pipe_ctx->clock_source, 1280 &pipe_ctx->stream_res.pix_clk_params, 1281 link_encoding, 1282 &pipe_ctx->pll_settings); 1283 break; 1284 } 1285 } 1286 } 1287 1288 void dcn401_disable_link_output(struct dc_link *link, 1289 const struct link_resource *link_res, 1290 enum signal_type signal) 1291 { 1292 struct dc *dc = link->ctx->dc; 1293 const struct link_hwss *link_hwss = get_link_hwss(link, link_res); 1294 struct dmcu *dmcu = dc->res_pool->dmcu; 1295 1296 if (signal == SIGNAL_TYPE_EDP && 1297 link->dc->hwss.edp_backlight_control && 1298 !link->skip_implict_edp_power_control) 1299 link->dc->hwss.edp_backlight_control(link, false); 1300 else if (dmcu != NULL && dmcu->funcs->lock_phy) 1301 dmcu->funcs->lock_phy(dmcu); 1302 1303 if (dc_is_tmds_signal(signal) && link->phy_state.symclk_ref_cnts.otg > 0) { 1304 disable_link_output_symclk_on_tx_off(link, DP_UNKNOWN_ENCODING); 1305 link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; 1306 } else { 1307 link_hwss->disable_link_output(link, link_res, signal); 1308 link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; 1309 } 1310 1311 if (signal == SIGNAL_TYPE_EDP && 1312 link->dc->hwss.edp_backlight_control && 1313 !link->skip_implict_edp_power_control) 1314 link->dc->hwss.edp_power_control(link, false); 1315 else if (dmcu != NULL && dmcu->funcs->lock_phy) 1316 dmcu->funcs->unlock_phy(dmcu); 1317 1318 dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); 1319 } 1320 1321 void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx) 1322 { 1323 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; 1324 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1325 struct dpp *dpp = pipe_ctx->plane_res.dpp; 1326 struct dc_cursor_mi_param param = { 1327 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10, 1328 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz, 1329 .viewport = pipe_ctx->plane_res.scl_data.viewport, 1330 .recout = pipe_ctx->plane_res.scl_data.recout, 1331 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz, 1332 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert, 1333 .rotation = pipe_ctx->plane_state->rotation, 1334 .mirror = pipe_ctx->plane_state->horizontal_mirror, 1335 .stream = pipe_ctx->stream 1336 }; 1337 struct rect odm_slice_src = { 0 }; 1338 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) || 1339 (pipe_ctx->prev_odm_pipe != NULL); 1340 int prev_odm_width = 0; 1341 struct pipe_ctx *prev_odm_pipe = NULL; 1342 bool mpc_combine_on = false; 1343 int bottom_pipe_x_pos = 0; 1344 1345 int x_pos = pos_cpy.x; 1346 int y_pos = pos_cpy.y; 1347 int recout_x_pos = 0; 1348 int recout_y_pos = 0; 1349 1350 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) { 1351 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) || 1352 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) { 1353 mpc_combine_on = true; 1354 } 1355 } 1356 1357 /* DCN4 moved cursor composition after Scaler, so in HW it is in 1358 * recout space and for HW Cursor position programming need to 1359 * translate to recout space. 1360 * 1361 * Cursor X and Y position programmed into HW can't be negative, 1362 * in fact it is X, Y coordinate shifted for the HW Cursor Hot spot 1363 * position that goes into HW X and Y coordinates while HW Hot spot 1364 * X and Y coordinates are length relative to the cursor top left 1365 * corner, hotspot must be smaller than the cursor size. 1366 * 1367 * DMs/DC interface for Cursor position is in stream->src space, and 1368 * DMs supposed to transform Cursor coordinates to stream->src space, 1369 * then here we need to translate Cursor coordinates to stream->dst 1370 * space, as now in HW, Cursor coordinates are in per pipe recout 1371 * space, and for the given pipe valid coordinates are only in range 1372 * from 0,0 - recout width, recout height space. 1373 * If certain pipe combining is in place, need to further adjust per 1374 * pipe to make sure each pipe enabling cursor on its part of the 1375 * screen. 1376 */ 1377 x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width / 1378 pipe_ctx->stream->src.width; 1379 y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height / 1380 pipe_ctx->stream->src.height; 1381 1382 /* If the cursor's source viewport is clipped then we need to 1383 * translate the cursor to appear in the correct position on 1384 * the screen. 1385 * 1386 * This translation isn't affected by scaling so it needs to be 1387 * done *after* we adjust the position for the scale factor. 1388 * 1389 * This is only done by opt-in for now since there are still 1390 * some usecases like tiled display that might enable the 1391 * cursor on both streams while expecting dc to clip it. 1392 */ 1393 if (pos_cpy.translate_by_source) { 1394 x_pos += pipe_ctx->plane_state->src_rect.x; 1395 y_pos += pipe_ctx->plane_state->src_rect.y; 1396 } 1397 1398 /* Adjust for ODM Combine 1399 * next/prev_odm_offset is to account for scaled modes that have underscan 1400 */ 1401 if (odm_combine_on) { 1402 prev_odm_pipe = pipe_ctx->prev_odm_pipe; 1403 1404 while (prev_odm_pipe != NULL) { 1405 odm_slice_src = resource_get_odm_slice_src_rect(prev_odm_pipe); 1406 prev_odm_width += odm_slice_src.width; 1407 prev_odm_pipe = prev_odm_pipe->prev_odm_pipe; 1408 } 1409 1410 x_pos -= (prev_odm_width); 1411 } 1412 1413 /* If the position is negative then we need to add to the hotspot 1414 * to fix cursor size between ODM slices 1415 */ 1416 1417 if (x_pos < 0) { 1418 pos_cpy.x_hotspot -= x_pos; 1419 if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION) 1420 adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy); 1421 x_pos = 0; 1422 } 1423 1424 if (y_pos < 0) { 1425 pos_cpy.y_hotspot -= y_pos; 1426 y_pos = 0; 1427 } 1428 1429 /* If the position on bottom MPC pipe is negative then we need to add to the hotspot and 1430 * adjust x_pos on bottom pipe to make cursor visible when crossing between MPC slices. 1431 */ 1432 if (mpc_combine_on && 1433 pipe_ctx->top_pipe && 1434 (pipe_ctx == pipe_ctx->top_pipe->bottom_pipe)) { 1435 1436 bottom_pipe_x_pos = x_pos - pipe_ctx->plane_res.scl_data.recout.x; 1437 if (bottom_pipe_x_pos < 0) { 1438 x_pos = pipe_ctx->plane_res.scl_data.recout.x; 1439 pos_cpy.x_hotspot -= bottom_pipe_x_pos; 1440 if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION) 1441 adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy); 1442 } 1443 } 1444 1445 pos_cpy.x = (uint32_t)x_pos; 1446 pos_cpy.y = (uint32_t)y_pos; 1447 1448 if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx)) 1449 pos_cpy.enable = false; 1450 1451 x_pos = pos_cpy.x - param.recout.x; 1452 y_pos = pos_cpy.y - param.recout.y; 1453 1454 recout_x_pos = x_pos - pos_cpy.x_hotspot; 1455 recout_y_pos = y_pos - pos_cpy.y_hotspot; 1456 1457 if (recout_x_pos >= (int)param.recout.width) 1458 pos_cpy.enable = false; /* not visible beyond right edge*/ 1459 1460 if (recout_y_pos >= (int)param.recout.height) 1461 pos_cpy.enable = false; /* not visible beyond bottom edge*/ 1462 1463 if (recout_x_pos + (int)hubp->curs_attr.width <= 0) 1464 pos_cpy.enable = false; /* not visible beyond left edge*/ 1465 1466 if (recout_y_pos + (int)hubp->curs_attr.height <= 0) 1467 pos_cpy.enable = false; /* not visible beyond top edge*/ 1468 1469 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m); 1470 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height); 1471 } 1472 1473 static bool dcn401_check_no_memory_request_for_cab(struct dc *dc) 1474 { 1475 int i; 1476 1477 /* First, check no-memory-request case */ 1478 for (i = 0; i < dc->current_state->stream_count; i++) { 1479 if ((dc->current_state->stream_status[i].plane_count) && 1480 (dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)) 1481 /* Fail eligibility on a visible stream */ 1482 return false; 1483 } 1484 1485 return true; 1486 } 1487 1488 static uint32_t dcn401_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx) 1489 { 1490 int i; 1491 uint8_t num_ways = 0; 1492 uint32_t mall_ss_size_bytes = 0; 1493 1494 mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes; 1495 // TODO add additional logic for PSR active stream exclusion optimization 1496 // mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes; 1497 1498 // Include cursor size for CAB allocation 1499 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1500 struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i]; 1501 1502 if (!pipe->stream || !pipe->plane_state) 1503 continue; 1504 1505 mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false); 1506 } 1507 1508 // Convert number of cache lines required to number of ways 1509 if (dc->debug.force_mall_ss_num_ways > 0) 1510 num_ways = dc->debug.force_mall_ss_num_ways; 1511 else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) 1512 num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes); 1513 else 1514 num_ways = 0; 1515 1516 return num_ways; 1517 } 1518 1519 bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable) 1520 { 1521 union dmub_rb_cmd cmd; 1522 uint8_t ways, i; 1523 int j; 1524 bool mall_ss_unsupported = false; 1525 struct dc_plane_state *plane = NULL; 1526 1527 if (!dc->ctx->dmub_srv || !dc->current_state) 1528 return false; 1529 1530 for (i = 0; i < dc->current_state->stream_count; i++) { 1531 /* MALL SS messaging is not supported with PSR at this time */ 1532 if (dc->current_state->streams[i] != NULL && 1533 dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { 1534 DC_LOG_MALL("MALL SS not supported with PSR at this time\n"); 1535 return false; 1536 } 1537 } 1538 1539 memset(&cmd, 0, sizeof(cmd)); 1540 cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS; 1541 cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); 1542 1543 if (enable) { 1544 if (dcn401_check_no_memory_request_for_cab(dc)) { 1545 /* 1. Check no memory request case for CAB. 1546 * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message 1547 */ 1548 DC_LOG_MALL("sending CAB action NO_DCN_REQ\n"); 1549 cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ; 1550 } else { 1551 /* 2. Check if all surfaces can fit in CAB. 1552 * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message 1553 * and configure HUBP's to fetch from MALL 1554 */ 1555 ways = dcn401_calculate_cab_allocation(dc, dc->current_state); 1556 1557 /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo, 1558 * or TMZ surface, don't try to enter MALL. 1559 */ 1560 for (i = 0; i < dc->current_state->stream_count; i++) { 1561 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { 1562 plane = dc->current_state->stream_status[i].plane_states[j]; 1563 1564 if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO || 1565 plane->address.tmz_surface) { 1566 mall_ss_unsupported = true; 1567 break; 1568 } 1569 } 1570 if (mall_ss_unsupported) 1571 break; 1572 } 1573 if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) { 1574 cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB; 1575 cmd.cab.cab_alloc_ways = ways; 1576 DC_LOG_MALL("cab allocation: %d ways. CAB action: DCN_SS_FIT_IN_CAB\n", ways); 1577 } else { 1578 cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB; 1579 DC_LOG_MALL("frame does not fit in CAB: %d ways required. CAB action: DCN_SS_NOT_FIT_IN_CAB\n", ways); 1580 } 1581 } 1582 } else { 1583 /* Disable CAB */ 1584 cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION; 1585 DC_LOG_MALL("idle optimization disabled\n"); 1586 } 1587 1588 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 1589 1590 return true; 1591 } 1592 1593 void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc, 1594 const struct pipe_ctx *top_pipe) 1595 { 1596 bool is_wait_needed = false; 1597 const struct pipe_ctx *pipe_ctx = top_pipe; 1598 1599 /* check if any surfaces are updating address while using flip immediate and dcc */ 1600 while (pipe_ctx != NULL) { 1601 if (pipe_ctx->plane_state && 1602 pipe_ctx->plane_state->dcc.enable && 1603 pipe_ctx->plane_state->flip_immediate && 1604 pipe_ctx->plane_state->update_flags.bits.addr_update) { 1605 is_wait_needed = true; 1606 break; 1607 } 1608 1609 /* check next pipe */ 1610 pipe_ctx = pipe_ctx->bottom_pipe; 1611 } 1612 1613 if (is_wait_needed && dc->debug.dcc_meta_propagation_delay_us > 0) { 1614 udelay(dc->debug.dcc_meta_propagation_delay_us); 1615 } 1616 } 1617 1618 void dcn401_prepare_bandwidth(struct dc *dc, 1619 struct dc_state *context) 1620 { 1621 struct hubbub *hubbub = dc->res_pool->hubbub; 1622 bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; 1623 unsigned int compbuf_size = 0; 1624 1625 /* Any transition into P-State support should disable MCLK switching first to avoid hangs */ 1626 if (p_state_change_support) { 1627 dc->optimized_required = true; 1628 context->bw_ctx.bw.dcn.clk.p_state_change_support = false; 1629 } 1630 1631 if (dc->clk_mgr->dc_mode_softmax_enabled) 1632 if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && 1633 context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) 1634 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); 1635 1636 /* Increase clocks */ 1637 dc->clk_mgr->funcs->update_clocks( 1638 dc->clk_mgr, 1639 context, 1640 false); 1641 1642 /* program dchubbub watermarks: 1643 * For assigning wm_optimized_required, use |= operator since we don't want 1644 * to clear the value if the optimize has not happened yet 1645 */ 1646 dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub, 1647 &context->bw_ctx.bw.dcn.watermarks, 1648 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 1649 false); 1650 /* update timeout thresholds */ 1651 if (hubbub->funcs->program_arbiter) { 1652 dc->wm_optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false); 1653 } 1654 1655 /* decrease compbuf size */ 1656 if (hubbub->funcs->program_compbuf_segments) { 1657 compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size; 1658 dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size); 1659 1660 hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false); 1661 } 1662 1663 if (dc->debug.fams2_config.bits.enable) { 1664 dcn401_fams2_global_control_lock(dc, context, true); 1665 dcn401_fams2_update_config(dc, context, false); 1666 dcn401_fams2_global_control_lock(dc, context, false); 1667 } 1668 1669 if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) { 1670 /* After disabling P-State, restore the original value to ensure we get the correct P-State 1671 * on the next optimize. */ 1672 context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; 1673 } 1674 } 1675 1676 void dcn401_optimize_bandwidth( 1677 struct dc *dc, 1678 struct dc_state *context) 1679 { 1680 int i; 1681 struct hubbub *hubbub = dc->res_pool->hubbub; 1682 1683 /* enable fams2 if needed */ 1684 if (dc->debug.fams2_config.bits.enable) { 1685 dcn401_fams2_global_control_lock(dc, context, true); 1686 dcn401_fams2_update_config(dc, context, true); 1687 dcn401_fams2_global_control_lock(dc, context, false); 1688 } 1689 1690 /* program dchubbub watermarks */ 1691 hubbub->funcs->program_watermarks(hubbub, 1692 &context->bw_ctx.bw.dcn.watermarks, 1693 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 1694 true); 1695 /* update timeout thresholds */ 1696 if (hubbub->funcs->program_arbiter) { 1697 hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, true); 1698 } 1699 1700 if (dc->clk_mgr->dc_mode_softmax_enabled) 1701 if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && 1702 context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) 1703 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk); 1704 1705 /* increase compbuf size */ 1706 if (hubbub->funcs->program_compbuf_segments) 1707 hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true); 1708 1709 dc->clk_mgr->funcs->update_clocks( 1710 dc->clk_mgr, 1711 context, 1712 true); 1713 if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) { 1714 for (i = 0; i < dc->res_pool->pipe_count; ++i) { 1715 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1716 1717 if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank 1718 && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max 1719 && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total) 1720 pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp, 1721 pipe_ctx->dlg_regs.min_dst_y_next_start); 1722 } 1723 } 1724 } 1725 1726 void dcn401_fams2_global_control_lock(struct dc *dc, 1727 struct dc_state *context, 1728 bool lock) 1729 { 1730 /* use always for now */ 1731 union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; 1732 1733 if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable) 1734 return; 1735 1736 hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; 1737 hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; 1738 hw_lock_cmd.bits.lock = lock; 1739 hw_lock_cmd.bits.should_release = !lock; 1740 dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); 1741 } 1742 1743 void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params) 1744 { 1745 struct dc *dc = params->fams2_global_control_lock_fast_params.dc; 1746 bool lock = params->fams2_global_control_lock_fast_params.lock; 1747 1748 if (params->fams2_global_control_lock_fast_params.is_required) { 1749 union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; 1750 1751 hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; 1752 hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; 1753 hw_lock_cmd.bits.lock = lock; 1754 hw_lock_cmd.bits.should_release = !lock; 1755 dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); 1756 } 1757 } 1758 1759 void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable) 1760 { 1761 bool fams2_required; 1762 1763 if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable) 1764 return; 1765 1766 fams2_required = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable; 1767 1768 dc_dmub_srv_fams2_update_config(dc, context, enable && fams2_required); 1769 } 1770 1771 static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context, 1772 struct pipe_ctx *otg_master) 1773 { 1774 int i; 1775 struct pipe_ctx *old_pipe; 1776 struct pipe_ctx *new_pipe; 1777 struct pipe_ctx *old_opp_heads[MAX_PIPES]; 1778 struct pipe_ctx *old_otg_master; 1779 int old_opp_head_count = 0; 1780 1781 old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx]; 1782 1783 if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) { 1784 old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master, 1785 &dc->current_state->res_ctx, 1786 old_opp_heads); 1787 } else { 1788 // DC cannot assume that the current state and the new state 1789 // share the same OTG pipe since this is not true when called 1790 // in the context of a commit stream not checked. Hence, set 1791 // old_otg_master to NULL to skip the DSC configuration. 1792 old_otg_master = NULL; 1793 } 1794 1795 1796 if (otg_master->stream_res.dsc) 1797 dcn32_update_dsc_on_stream(otg_master, 1798 otg_master->stream->timing.flags.DSC); 1799 if (old_otg_master && old_otg_master->stream_res.dsc) { 1800 for (i = 0; i < old_opp_head_count; i++) { 1801 old_pipe = old_opp_heads[i]; 1802 new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx]; 1803 if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) 1804 old_pipe->stream_res.dsc->funcs->dsc_disconnect( 1805 old_pipe->stream_res.dsc); 1806 } 1807 } 1808 } 1809 1810 void dcn401_update_odm(struct dc *dc, struct dc_state *context, 1811 struct pipe_ctx *otg_master) 1812 { 1813 struct pipe_ctx *opp_heads[MAX_PIPES]; 1814 int opp_inst[MAX_PIPES] = {0}; 1815 int opp_head_count; 1816 int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false); 1817 int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true); 1818 int i; 1819 1820 opp_head_count = resource_get_opp_heads_for_otg_master( 1821 otg_master, &context->res_ctx, opp_heads); 1822 1823 for (i = 0; i < opp_head_count; i++) 1824 opp_inst[i] = opp_heads[i]->stream_res.opp->inst; 1825 if (opp_head_count > 1) 1826 otg_master->stream_res.tg->funcs->set_odm_combine( 1827 otg_master->stream_res.tg, 1828 opp_inst, opp_head_count, 1829 odm_slice_width, last_odm_slice_width); 1830 else 1831 otg_master->stream_res.tg->funcs->set_odm_bypass( 1832 otg_master->stream_res.tg, 1833 &otg_master->stream->timing); 1834 1835 for (i = 0; i < opp_head_count; i++) { 1836 opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control( 1837 opp_heads[i]->stream_res.opp, 1838 true); 1839 opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel( 1840 opp_heads[i]->stream_res.opp, 1841 opp_heads[i]->stream->timing.pixel_encoding, 1842 resource_is_pipe_type(opp_heads[i], OTG_MASTER)); 1843 } 1844 1845 update_dsc_for_odm_change(dc, context, otg_master); 1846 1847 if (!resource_is_pipe_type(otg_master, DPP_PIPE)) 1848 /* 1849 * blank pattern is generated by OPP, reprogram blank pattern 1850 * due to OPP count change 1851 */ 1852 dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true); 1853 } 1854 1855 void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, 1856 struct dc_link_settings *link_settings) 1857 { 1858 struct encoder_unblank_param params = {0}; 1859 struct dc_stream_state *stream = pipe_ctx->stream; 1860 struct dc_link *link = stream->link; 1861 struct dce_hwseq *hws = link->dc->hwseq; 1862 1863 /* calculate parameters for unblank */ 1864 params.opp_cnt = resource_get_odm_slice_count(pipe_ctx); 1865 1866 params.timing = pipe_ctx->stream->timing; 1867 params.link_settings.link_rate = link_settings->link_rate; 1868 params.pix_per_cycle = pipe_ctx->stream_res.pix_clk_params.dio_se_pix_per_cycle; 1869 1870 if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { 1871 pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank( 1872 pipe_ctx->stream_res.hpo_dp_stream_enc, 1873 pipe_ctx->stream_res.tg->inst); 1874 } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { 1875 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); 1876 } 1877 1878 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) 1879 hws->funcs.edp_backlight_control(link, true); 1880 } 1881 1882 void dcn401_hardware_release(struct dc *dc) 1883 { 1884 dc_dmub_srv_fams2_update_config(dc, dc->current_state, false); 1885 1886 /* If pstate unsupported, or still supported 1887 * by firmware, force it supported by dcn 1888 */ 1889 if (dc->current_state) { 1890 if ((!dc->clk_mgr->clks.p_state_change_support || 1891 dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) && 1892 dc->res_pool->hubbub->funcs->force_pstate_change_control) 1893 dc->res_pool->hubbub->funcs->force_pstate_change_control( 1894 dc->res_pool->hubbub, true, true); 1895 1896 dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true; 1897 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true); 1898 } 1899 } 1900 1901 void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master) 1902 { 1903 struct pipe_ctx *opp_heads[MAX_PIPES]; 1904 struct pipe_ctx *dpp_pipes[MAX_PIPES]; 1905 struct hubbub *hubbub = dc->res_pool->hubbub; 1906 int dpp_count = 0; 1907 1908 if (!otg_master->stream) 1909 return; 1910 1911 int slice_count = resource_get_opp_heads_for_otg_master(otg_master, 1912 &context->res_ctx, opp_heads); 1913 1914 for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) { 1915 if (opp_heads[slice_idx]->plane_state) { 1916 dpp_count = resource_get_dpp_pipes_for_opp_head( 1917 opp_heads[slice_idx], 1918 &context->res_ctx, 1919 dpp_pipes); 1920 for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) { 1921 struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx]; 1922 if (dpp_pipe && hubbub && 1923 dpp_pipe->plane_res.hubp && 1924 hubbub->funcs->wait_for_det_update) 1925 hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst); 1926 } 1927 } else { 1928 if (hubbub && opp_heads[slice_idx]->plane_res.hubp && hubbub->funcs->wait_for_det_update) 1929 hubbub->funcs->wait_for_det_update(hubbub, opp_heads[slice_idx]->plane_res.hubp->inst); 1930 } 1931 } 1932 } 1933 1934 void dcn401_interdependent_update_lock(struct dc *dc, 1935 struct dc_state *context, bool lock) 1936 { 1937 unsigned int i = 0; 1938 struct pipe_ctx *pipe = NULL; 1939 struct timing_generator *tg = NULL; 1940 1941 if (lock) { 1942 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1943 pipe = &context->res_ctx.pipe_ctx[i]; 1944 tg = pipe->stream_res.tg; 1945 1946 if (!resource_is_pipe_type(pipe, OTG_MASTER) || 1947 !tg->funcs->is_tg_enabled(tg) || 1948 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) 1949 continue; 1950 dc->hwss.pipe_control_lock(dc, pipe, true); 1951 } 1952 } else { 1953 /* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/ 1954 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1955 pipe = &context->res_ctx.pipe_ctx[i]; 1956 tg = pipe->stream_res.tg; 1957 1958 if (!resource_is_pipe_type(pipe, OTG_MASTER) || 1959 !tg->funcs->is_tg_enabled(tg) || 1960 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 1961 continue; 1962 } 1963 1964 if (dc->scratch.pipes_to_unlock_first[i]) { 1965 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1966 dc->hwss.pipe_control_lock(dc, pipe, false); 1967 /* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/ 1968 dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe); 1969 } 1970 } 1971 1972 /* Unlocking the rest of the pipes */ 1973 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1974 if (dc->scratch.pipes_to_unlock_first[i]) 1975 continue; 1976 1977 pipe = &context->res_ctx.pipe_ctx[i]; 1978 tg = pipe->stream_res.tg; 1979 if (!resource_is_pipe_type(pipe, OTG_MASTER) || 1980 !tg->funcs->is_tg_enabled(tg) || 1981 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 1982 continue; 1983 } 1984 1985 dc->hwss.pipe_control_lock(dc, pipe, false); 1986 } 1987 } 1988 } 1989 1990 void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx) 1991 { 1992 /* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that 1993 * HUBP will properly fetch 3DLUT contents after unlock. 1994 * 1995 * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless 1996 * of whether OTG lock is currently being held or not. 1997 */ 1998 struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL }; 1999 struct pipe_ctx *odm_pipe, *mpc_pipe; 2000 int i, wa_pipe_ct = 0; 2001 2002 for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) { 2003 for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) { 2004 if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src 2005 == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM 2006 && mpc_pipe->plane_state->mcm_shaper_3dlut_setting 2007 == DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) { 2008 wa_pipes[wa_pipe_ct++] = mpc_pipe; 2009 } 2010 } 2011 } 2012 2013 if (wa_pipe_ct > 0) { 2014 if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout) 2015 pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true); 2016 2017 for (i = 0; i < wa_pipe_ct; ++i) { 2018 if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl) 2019 wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true); 2020 } 2021 2022 pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); 2023 if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status) 2024 pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false); 2025 2026 for (i = 0; i < wa_pipe_ct; ++i) { 2027 if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl) 2028 wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true); 2029 } 2030 2031 if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout) 2032 pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false); 2033 } else { 2034 pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); 2035 } 2036 } 2037 2038 void dcn401_program_outstanding_updates(struct dc *dc, 2039 struct dc_state *context) 2040 { 2041 struct hubbub *hubbub = dc->res_pool->hubbub; 2042 2043 /* update compbuf if required */ 2044 if (hubbub->funcs->program_compbuf_segments) 2045 hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true); 2046 } 2047 2048 void dcn401_reset_back_end_for_pipe( 2049 struct dc *dc, 2050 struct pipe_ctx *pipe_ctx, 2051 struct dc_state *context) 2052 { 2053 struct dc_link *link = pipe_ctx->stream->link; 2054 const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); 2055 2056 DC_LOGGER_INIT(dc->ctx->logger); 2057 if (pipe_ctx->stream_res.stream_enc == NULL) { 2058 pipe_ctx->stream = NULL; 2059 return; 2060 } 2061 2062 /* DPMS may already disable or */ 2063 /* dpms_off status is incorrect due to fastboot 2064 * feature. When system resume from S4 with second 2065 * screen only, the dpms_off would be true but 2066 * VBIOS lit up eDP, so check link status too. 2067 */ 2068 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) 2069 dc->link_srv->set_dpms_off(pipe_ctx); 2070 else if (pipe_ctx->stream_res.audio) 2071 dc->hwss.disable_audio_stream(pipe_ctx); 2072 2073 /* free acquired resources */ 2074 if (pipe_ctx->stream_res.audio) { 2075 /*disable az_endpoint*/ 2076 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 2077 2078 /*free audio*/ 2079 if (dc->caps.dynamic_audio == true) { 2080 /*we have to dynamic arbitrate the audio endpoints*/ 2081 /*we free the resource, need reset is_audio_acquired*/ 2082 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, 2083 pipe_ctx->stream_res.audio, false); 2084 pipe_ctx->stream_res.audio = NULL; 2085 } 2086 } 2087 2088 /* by upper caller loop, parent pipe: pipe0, will be reset last. 2089 * back end share by all pipes and will be disable only when disable 2090 * parent pipe. 2091 */ 2092 if (pipe_ctx->top_pipe == NULL) { 2093 2094 dc->hwss.set_abm_immediate_disable(pipe_ctx); 2095 2096 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); 2097 2098 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); 2099 if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) 2100 pipe_ctx->stream_res.tg->funcs->set_odm_bypass( 2101 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 2102 2103 set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL); 2104 2105 /* TODO - convert symclk_ref_cnts for otg to a bit map to solve 2106 * the case where the same symclk is shared across multiple otg 2107 * instances 2108 */ 2109 if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) 2110 link->phy_state.symclk_ref_cnts.otg = 0; 2111 if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) { 2112 link_hwss->disable_link_output(link, 2113 &pipe_ctx->link_res, pipe_ctx->stream->signal); 2114 link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; 2115 } 2116 2117 /* reset DTBCLK_P */ 2118 if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) 2119 dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst); 2120 } 2121 2122 /* 2123 * In case of a dangling plane, setting this to NULL unconditionally 2124 * causes failures during reset hw ctx where, if stream is NULL, 2125 * it is expected that the pipe_ctx pointers to pipes and plane are NULL. 2126 */ 2127 pipe_ctx->stream = NULL; 2128 pipe_ctx->top_pipe = NULL; 2129 pipe_ctx->bottom_pipe = NULL; 2130 pipe_ctx->next_odm_pipe = NULL; 2131 pipe_ctx->prev_odm_pipe = NULL; 2132 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", 2133 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); 2134 } 2135 2136 void dcn401_reset_hw_ctx_wrap( 2137 struct dc *dc, 2138 struct dc_state *context) 2139 { 2140 int i; 2141 struct dce_hwseq *hws = dc->hwseq; 2142 2143 /* Reset Back End*/ 2144 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { 2145 struct pipe_ctx *pipe_ctx_old = 2146 &dc->current_state->res_ctx.pipe_ctx[i]; 2147 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2148 2149 if (!pipe_ctx_old->stream) 2150 continue; 2151 2152 if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) 2153 continue; 2154 2155 if (!pipe_ctx->stream || 2156 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { 2157 struct clock_source *old_clk = pipe_ctx_old->clock_source; 2158 2159 if (hws->funcs.reset_back_end_for_pipe) 2160 hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); 2161 if (hws->funcs.enable_stream_gating) 2162 hws->funcs.enable_stream_gating(dc, pipe_ctx_old); 2163 if (old_clk) 2164 old_clk->funcs->cs_power_down(old_clk); 2165 } 2166 } 2167 } 2168 2169 static unsigned int dcn401_calculate_vready_offset_for_group(struct pipe_ctx *pipe) 2170 { 2171 struct pipe_ctx *other_pipe; 2172 unsigned int vready_offset = pipe->global_sync.dcn4x.vready_offset_pixels; 2173 2174 /* Always use the largest vready_offset of all connected pipes */ 2175 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { 2176 if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset) 2177 vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels; 2178 } 2179 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { 2180 if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset) 2181 vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels; 2182 } 2183 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { 2184 if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset) 2185 vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels; 2186 } 2187 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { 2188 if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset) 2189 vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels; 2190 } 2191 2192 return vready_offset; 2193 } 2194 2195 static void dcn401_program_tg( 2196 struct dc *dc, 2197 struct pipe_ctx *pipe_ctx, 2198 struct dc_state *context, 2199 struct dce_hwseq *hws) 2200 { 2201 pipe_ctx->stream_res.tg->funcs->program_global_sync( 2202 pipe_ctx->stream_res.tg, 2203 dcn401_calculate_vready_offset_for_group(pipe_ctx), 2204 (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines, 2205 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels, 2206 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels, 2207 (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines); 2208 2209 if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) 2210 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); 2211 2212 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 2213 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true); 2214 2215 if (hws->funcs.setup_vupdate_interrupt) 2216 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); 2217 } 2218 2219 void dcn401_program_pipe( 2220 struct dc *dc, 2221 struct pipe_ctx *pipe_ctx, 2222 struct dc_state *context) 2223 { 2224 struct dce_hwseq *hws = dc->hwseq; 2225 2226 /* Only need to unblank on top pipe */ 2227 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) { 2228 if (pipe_ctx->update_flags.bits.enable || 2229 pipe_ctx->update_flags.bits.odm || 2230 pipe_ctx->stream->update_flags.bits.abm_level) 2231 hws->funcs.blank_pixel_data(dc, pipe_ctx, 2232 !pipe_ctx->plane_state || 2233 !pipe_ctx->plane_state->visible); 2234 } 2235 2236 /* Only update TG on top pipe */ 2237 if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe 2238 && !pipe_ctx->prev_odm_pipe) 2239 dcn401_program_tg(dc, pipe_ctx, context, hws); 2240 2241 if (pipe_ctx->update_flags.bits.odm) 2242 hws->funcs.update_odm(dc, context, pipe_ctx); 2243 2244 if (pipe_ctx->update_flags.bits.enable) { 2245 if (hws->funcs.enable_plane) 2246 hws->funcs.enable_plane(dc, pipe_ctx, context); 2247 else 2248 dc->hwss.enable_plane(dc, pipe_ctx, context); 2249 2250 if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes) 2251 dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub); 2252 } 2253 2254 if (pipe_ctx->update_flags.bits.det_size) { 2255 if (dc->res_pool->hubbub->funcs->program_det_size) 2256 dc->res_pool->hubbub->funcs->program_det_size( 2257 dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); 2258 if (dc->res_pool->hubbub->funcs->program_det_segments) 2259 dc->res_pool->hubbub->funcs->program_det_segments( 2260 dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size); 2261 } 2262 2263 if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw || 2264 pipe_ctx->plane_state->update_flags.raw || 2265 pipe_ctx->stream->update_flags.raw)) 2266 dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context); 2267 2268 if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable || 2269 pipe_ctx->plane_state->update_flags.bits.hdr_mult)) 2270 hws->funcs.set_hdr_multiplier(pipe_ctx); 2271 2272 if (hws->funcs.populate_mcm_luts) { 2273 if (pipe_ctx->plane_state) { 2274 hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts, 2275 pipe_ctx->plane_state->lut_bank_a); 2276 pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a; 2277 } 2278 } 2279 2280 if (pipe_ctx->plane_state && 2281 (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || 2282 pipe_ctx->plane_state->update_flags.bits.gamma_change || 2283 pipe_ctx->plane_state->update_flags.bits.lut_3d || 2284 pipe_ctx->update_flags.bits.enable)) 2285 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); 2286 2287 /* dcn10_translate_regamma_to_hw_format takes 750us to finish 2288 * only do gamma programming for powering on, internal memcmp to avoid 2289 * updating on slave planes 2290 */ 2291 if (pipe_ctx->update_flags.bits.enable || 2292 pipe_ctx->update_flags.bits.plane_changed || 2293 pipe_ctx->stream->update_flags.bits.out_tf || 2294 (pipe_ctx->plane_state && 2295 pipe_ctx->plane_state->update_flags.bits.output_tf_change)) 2296 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); 2297 2298 /* If the pipe has been enabled or has a different opp, we 2299 * should reprogram the fmt. This deals with cases where 2300 * interation between mpc and odm combine on different streams 2301 * causes a different pipe to be chosen to odm combine with. 2302 */ 2303 if (pipe_ctx->update_flags.bits.enable 2304 || pipe_ctx->update_flags.bits.opp_changed) { 2305 2306 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 2307 pipe_ctx->stream_res.opp, 2308 COLOR_SPACE_YCBCR601, 2309 pipe_ctx->stream->timing.display_color_depth, 2310 pipe_ctx->stream->signal); 2311 2312 pipe_ctx->stream_res.opp->funcs->opp_program_fmt( 2313 pipe_ctx->stream_res.opp, 2314 &pipe_ctx->stream->bit_depth_params, 2315 &pipe_ctx->stream->clamping); 2316 } 2317 2318 /* Set ABM pipe after other pipe configurations done */ 2319 if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) { 2320 if (pipe_ctx->stream_res.abm) { 2321 dc->hwss.set_pipe(pipe_ctx); 2322 pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm, 2323 pipe_ctx->stream->abm_level); 2324 } 2325 } 2326 2327 if (pipe_ctx->update_flags.bits.test_pattern_changed) { 2328 struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp; 2329 struct bit_depth_reduction_params params; 2330 2331 memset(¶ms, 0, sizeof(params)); 2332 odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); 2333 dc->hwss.set_disp_pattern_generator(dc, 2334 pipe_ctx, 2335 pipe_ctx->stream_res.test_pattern_params.test_pattern, 2336 pipe_ctx->stream_res.test_pattern_params.color_space, 2337 pipe_ctx->stream_res.test_pattern_params.color_depth, 2338 NULL, 2339 pipe_ctx->stream_res.test_pattern_params.width, 2340 pipe_ctx->stream_res.test_pattern_params.height, 2341 pipe_ctx->stream_res.test_pattern_params.offset); 2342 } 2343 } 2344 2345 void dcn401_program_front_end_for_ctx( 2346 struct dc *dc, 2347 struct dc_state *context) 2348 { 2349 int i; 2350 unsigned int prev_hubp_count = 0; 2351 unsigned int hubp_count = 0; 2352 struct dce_hwseq *hws = dc->hwseq; 2353 struct pipe_ctx *pipe = NULL; 2354 2355 DC_LOGGER_INIT(dc->ctx->logger); 2356 2357 if (resource_is_pipe_topology_changed(dc->current_state, context)) 2358 resource_log_pipe_topology_update(dc, context); 2359 2360 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 2361 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2362 pipe = &context->res_ctx.pipe_ctx[i]; 2363 2364 if (pipe->plane_state) { 2365 if (pipe->plane_state->triplebuffer_flips) 2366 BREAK_TO_DEBUGGER(); 2367 2368 /*turn off triple buffer for full update*/ 2369 dc->hwss.program_triplebuffer( 2370 dc, pipe, pipe->plane_state->triplebuffer_flips); 2371 } 2372 } 2373 } 2374 2375 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2376 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state) 2377 prev_hubp_count++; 2378 if (context->res_ctx.pipe_ctx[i].plane_state) 2379 hubp_count++; 2380 } 2381 2382 if (prev_hubp_count == 0 && hubp_count > 0) { 2383 if (dc->res_pool->hubbub->funcs->force_pstate_change_control) 2384 dc->res_pool->hubbub->funcs->force_pstate_change_control( 2385 dc->res_pool->hubbub, true, false); 2386 udelay(500); 2387 } 2388 2389 /* Set pipe update flags and lock pipes */ 2390 for (i = 0; i < dc->res_pool->pipe_count; i++) 2391 dc->hwss.detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i], 2392 &context->res_ctx.pipe_ctx[i]); 2393 2394 /* When disabling phantom pipes, turn on phantom OTG first (so we can get double 2395 * buffer updates properly) 2396 */ 2397 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2398 struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream; 2399 2400 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2401 2402 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream && 2403 dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { 2404 struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; 2405 2406 if (tg->funcs->enable_crtc) { 2407 if (dc->hwseq->funcs.blank_pixel_data) 2408 dc->hwseq->funcs.blank_pixel_data(dc, pipe, true); 2409 2410 tg->funcs->enable_crtc(tg); 2411 } 2412 } 2413 } 2414 /* OTG blank before disabling all front ends */ 2415 for (i = 0; i < dc->res_pool->pipe_count; i++) 2416 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable 2417 && !context->res_ctx.pipe_ctx[i].top_pipe 2418 && !context->res_ctx.pipe_ctx[i].prev_odm_pipe 2419 && context->res_ctx.pipe_ctx[i].stream) 2420 hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); 2421 2422 2423 /* Disconnect mpcc */ 2424 for (i = 0; i < dc->res_pool->pipe_count; i++) 2425 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable 2426 || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { 2427 struct hubbub *hubbub = dc->res_pool->hubbub; 2428 2429 /* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom 2430 * then we want to do the programming here (effectively it's being disabled). If we do 2431 * the programming later the DET won't be updated until the OTG for the phantom pipe is 2432 * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with 2433 * DET allocation. 2434 */ 2435 if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable || 2436 (context->res_ctx.pipe_ctx[i].plane_state && 2437 dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == 2438 SUBVP_PHANTOM))) { 2439 if (hubbub->funcs->program_det_size) 2440 hubbub->funcs->program_det_size(hubbub, 2441 dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); 2442 if (dc->res_pool->hubbub->funcs->program_det_segments) 2443 dc->res_pool->hubbub->funcs->program_det_segments( 2444 hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); 2445 } 2446 hws->funcs.plane_atomic_disconnect(dc, dc->current_state, 2447 &dc->current_state->res_ctx.pipe_ctx[i]); 2448 DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); 2449 } 2450 2451 /* update ODM for blanked OTG master pipes */ 2452 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2453 pipe = &context->res_ctx.pipe_ctx[i]; 2454 if (resource_is_pipe_type(pipe, OTG_MASTER) && 2455 !resource_is_pipe_type(pipe, DPP_PIPE) && 2456 pipe->update_flags.bits.odm && 2457 hws->funcs.update_odm) 2458 hws->funcs.update_odm(dc, context, pipe); 2459 } 2460 2461 /* 2462 * Program all updated pipes, order matters for mpcc setup. Start with 2463 * top pipe and program all pipes that follow in order 2464 */ 2465 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2466 pipe = &context->res_ctx.pipe_ctx[i]; 2467 2468 if (pipe->plane_state && !pipe->top_pipe) { 2469 while (pipe) { 2470 if (hws->funcs.program_pipe) 2471 hws->funcs.program_pipe(dc, pipe, context); 2472 else { 2473 /* Don't program phantom pipes in the regular front end programming sequence. 2474 * There is an MPO transition case where a pipe being used by a video plane is 2475 * transitioned directly to be a phantom pipe when closing the MPO video. 2476 * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place 2477 * right away) but the MPO still exists until the double buffered update of the 2478 * main pipe so we will get a frame of underflow if the phantom pipe is 2479 * programmed here. 2480 */ 2481 if (pipe->stream && 2482 dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) 2483 dcn401_program_pipe(dc, pipe, context); 2484 } 2485 2486 pipe = pipe->bottom_pipe; 2487 } 2488 } 2489 2490 /* Program secondary blending tree and writeback pipes */ 2491 pipe = &context->res_ctx.pipe_ctx[i]; 2492 if (!pipe->top_pipe && !pipe->prev_odm_pipe 2493 && pipe->stream && pipe->stream->num_wb_info > 0 2494 && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw) 2495 || pipe->stream->update_flags.raw) 2496 && hws->funcs.program_all_writeback_pipes_in_tree) 2497 hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); 2498 2499 /* Avoid underflow by check of pipe line read when adding 2nd plane. */ 2500 if (hws->wa.wait_hubpret_read_start_during_mpo_transition && 2501 !pipe->top_pipe && 2502 pipe->stream && 2503 pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start && 2504 dc->current_state->stream_status[0].plane_count == 1 && 2505 context->stream_status[0].plane_count > 1) { 2506 pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp); 2507 } 2508 } 2509 } 2510 2511 void dcn401_post_unlock_program_front_end( 2512 struct dc *dc, 2513 struct dc_state *context) 2514 { 2515 // Timeout for pipe enable 2516 unsigned int timeout_us = 100000; 2517 unsigned int polling_interval_us = 1; 2518 struct dce_hwseq *hwseq = dc->hwseq; 2519 int i; 2520 2521 DC_LOGGER_INIT(dc->ctx->logger); 2522 2523 for (i = 0; i < dc->res_pool->pipe_count; i++) 2524 if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) && 2525 !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD)) 2526 dc->hwss.post_unlock_reset_opp(dc, 2527 &dc->current_state->res_ctx.pipe_ctx[i]); 2528 2529 for (i = 0; i < dc->res_pool->pipe_count; i++) 2530 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) 2531 dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); 2532 2533 /* 2534 * If we are enabling a pipe, we need to wait for pending clear as this is a critical 2535 * part of the enable operation otherwise, DM may request an immediate flip which 2536 * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which 2537 * is unsupported on DCN. 2538 */ 2539 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2540 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2541 // Don't check flip pending on phantom pipes 2542 if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable && 2543 dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) { 2544 struct hubp *hubp = pipe->plane_res.hubp; 2545 int j = 0; 2546 2547 for (j = 0; j < timeout_us / polling_interval_us 2548 && hubp->funcs->hubp_is_flip_pending(hubp); j++) 2549 udelay(polling_interval_us); 2550 } 2551 } 2552 2553 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2554 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2555 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2556 2557 /* When going from a smaller ODM slice count to larger, we must ensure double 2558 * buffer update completes before we return to ensure we don't reduce DISPCLK 2559 * before we've transitioned to 2:1 or 4:1 2560 */ 2561 if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) && 2562 resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) && 2563 dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) { 2564 int j = 0; 2565 struct timing_generator *tg = pipe->stream_res.tg; 2566 2567 if (tg->funcs->get_optc_double_buffer_pending) { 2568 for (j = 0; j < timeout_us / polling_interval_us 2569 && tg->funcs->get_optc_double_buffer_pending(tg); j++) 2570 udelay(polling_interval_us); 2571 } 2572 } 2573 } 2574 2575 if (dc->res_pool->hubbub->funcs->force_pstate_change_control) 2576 dc->res_pool->hubbub->funcs->force_pstate_change_control( 2577 dc->res_pool->hubbub, false, false); 2578 2579 2580 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2581 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2582 2583 if (pipe->plane_state && !pipe->top_pipe) { 2584 /* Program phantom pipe here to prevent a frame of underflow in the MPO transition 2585 * case (if a pipe being used for a video plane transitions to a phantom pipe, it 2586 * can underflow due to HUBP_VTG_SEL programming if done in the regular front end 2587 * programming sequence). 2588 */ 2589 while (pipe) { 2590 if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 2591 /* When turning on the phantom pipe we want to run through the 2592 * entire enable sequence, so apply all the "enable" flags. 2593 */ 2594 if (dc->hwss.apply_update_flags_for_phantom) 2595 dc->hwss.apply_update_flags_for_phantom(pipe); 2596 if (dc->hwss.update_phantom_vp_position) 2597 dc->hwss.update_phantom_vp_position(dc, context, pipe); 2598 dcn401_program_pipe(dc, pipe, context); 2599 } 2600 pipe = pipe->bottom_pipe; 2601 } 2602 } 2603 } 2604 2605 if (!hwseq) 2606 return; 2607 2608 /* P-State support transitions: 2609 * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe 2610 * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally) 2611 * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe 2612 * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe 2613 * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes 2614 */ 2615 if (hwseq->funcs.update_force_pstate) 2616 dc->hwseq->funcs.update_force_pstate(dc, context); 2617 2618 /* Only program the MALL registers after all the main and phantom pipes 2619 * are done programming. 2620 */ 2621 if (hwseq->funcs.program_mall_pipe_config) 2622 hwseq->funcs.program_mall_pipe_config(dc, context); 2623 2624 /* WA to apply WM setting*/ 2625 if (hwseq->wa.DEGVIDCN21) 2626 dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub); 2627 2628 2629 /* WA for stutter underflow during MPO transitions when adding 2nd plane */ 2630 if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) { 2631 2632 if (dc->current_state->stream_status[0].plane_count == 1 && 2633 context->stream_status[0].plane_count > 1) { 2634 2635 struct timing_generator *tg = dc->res_pool->timing_generators[0]; 2636 2637 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false); 2638 2639 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true; 2640 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = 2641 tg->funcs->get_frame_count(tg); 2642 } 2643 } 2644 } 2645 2646 bool dcn401_update_bandwidth( 2647 struct dc *dc, 2648 struct dc_state *context) 2649 { 2650 int i; 2651 struct dce_hwseq *hws = dc->hwseq; 2652 2653 /* recalculate DML parameters */ 2654 if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) 2655 return false; 2656 2657 /* apply updated bandwidth parameters */ 2658 dc->hwss.prepare_bandwidth(dc, context); 2659 2660 /* update hubp configs for all pipes */ 2661 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2662 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2663 2664 if (pipe_ctx->plane_state == NULL) 2665 continue; 2666 2667 if (pipe_ctx->top_pipe == NULL) { 2668 bool blank = !is_pipe_tree_visible(pipe_ctx); 2669 2670 pipe_ctx->stream_res.tg->funcs->program_global_sync( 2671 pipe_ctx->stream_res.tg, 2672 dcn401_calculate_vready_offset_for_group(pipe_ctx), 2673 (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines, 2674 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels, 2675 (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels, 2676 (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines); 2677 2678 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 2679 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false); 2680 2681 if (pipe_ctx->prev_odm_pipe == NULL) 2682 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); 2683 2684 if (hws->funcs.setup_vupdate_interrupt) 2685 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); 2686 } 2687 2688 if (pipe_ctx->plane_res.hubp->funcs->hubp_setup2) 2689 pipe_ctx->plane_res.hubp->funcs->hubp_setup2( 2690 pipe_ctx->plane_res.hubp, 2691 &pipe_ctx->hubp_regs, 2692 &pipe_ctx->global_sync, 2693 &pipe_ctx->stream->timing); 2694 } 2695 2696 return true; 2697 } 2698 2699 void dcn401_detect_pipe_changes(struct dc_state *old_state, 2700 struct dc_state *new_state, 2701 struct pipe_ctx *old_pipe, 2702 struct pipe_ctx *new_pipe) 2703 { 2704 bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM; 2705 bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM; 2706 2707 unsigned int old_pipe_vready_offset_pixels = old_pipe->global_sync.dcn4x.vready_offset_pixels; 2708 unsigned int new_pipe_vready_offset_pixels = new_pipe->global_sync.dcn4x.vready_offset_pixels; 2709 unsigned int old_pipe_vstartup_lines = old_pipe->global_sync.dcn4x.vstartup_lines; 2710 unsigned int new_pipe_vstartup_lines = new_pipe->global_sync.dcn4x.vstartup_lines; 2711 unsigned int old_pipe_vupdate_offset_pixels = old_pipe->global_sync.dcn4x.vupdate_offset_pixels; 2712 unsigned int new_pipe_vupdate_offset_pixels = new_pipe->global_sync.dcn4x.vupdate_offset_pixels; 2713 unsigned int old_pipe_vupdate_width_pixels = old_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels; 2714 unsigned int new_pipe_vupdate_width_pixels = new_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels; 2715 2716 new_pipe->update_flags.raw = 0; 2717 2718 /* If non-phantom pipe is being transitioned to a phantom pipe, 2719 * set disable and return immediately. This is because the pipe 2720 * that was previously in use must be fully disabled before we 2721 * can "enable" it as a phantom pipe (since the OTG will certainly 2722 * be different). The post_unlock sequence will set the correct 2723 * update flags to enable the phantom pipe. 2724 */ 2725 if (old_pipe->plane_state && !old_is_phantom && 2726 new_pipe->plane_state && new_is_phantom) { 2727 new_pipe->update_flags.bits.disable = 1; 2728 return; 2729 } 2730 2731 if (resource_is_pipe_type(new_pipe, OTG_MASTER) && 2732 resource_is_odm_topology_changed(new_pipe, old_pipe)) 2733 /* Detect odm changes */ 2734 new_pipe->update_flags.bits.odm = 1; 2735 2736 /* Exit on unchanged, unused pipe */ 2737 if (!old_pipe->plane_state && !new_pipe->plane_state) 2738 return; 2739 /* Detect pipe enable/disable */ 2740 if (!old_pipe->plane_state && new_pipe->plane_state) { 2741 new_pipe->update_flags.bits.enable = 1; 2742 new_pipe->update_flags.bits.mpcc = 1; 2743 new_pipe->update_flags.bits.dppclk = 1; 2744 new_pipe->update_flags.bits.hubp_interdependent = 1; 2745 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; 2746 new_pipe->update_flags.bits.unbounded_req = 1; 2747 new_pipe->update_flags.bits.gamut_remap = 1; 2748 new_pipe->update_flags.bits.scaler = 1; 2749 new_pipe->update_flags.bits.viewport = 1; 2750 new_pipe->update_flags.bits.det_size = 1; 2751 if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE && 2752 new_pipe->stream_res.test_pattern_params.width != 0 && 2753 new_pipe->stream_res.test_pattern_params.height != 0) 2754 new_pipe->update_flags.bits.test_pattern_changed = 1; 2755 if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { 2756 new_pipe->update_flags.bits.odm = 1; 2757 new_pipe->update_flags.bits.global_sync = 1; 2758 } 2759 return; 2760 } 2761 2762 /* For SubVP we need to unconditionally enable because any phantom pipes are 2763 * always removed then newly added for every full updates whenever SubVP is in use. 2764 * The remove-add sequence of the phantom pipe always results in the pipe 2765 * being blanked in enable_stream_timing (DPG). 2766 */ 2767 if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM) 2768 new_pipe->update_flags.bits.enable = 1; 2769 2770 /* Phantom pipes are effectively disabled, if the pipe was previously phantom 2771 * we have to enable 2772 */ 2773 if (old_pipe->plane_state && old_is_phantom && 2774 new_pipe->plane_state && !new_is_phantom) 2775 new_pipe->update_flags.bits.enable = 1; 2776 2777 if (old_pipe->plane_state && !new_pipe->plane_state) { 2778 new_pipe->update_flags.bits.disable = 1; 2779 return; 2780 } 2781 2782 /* Detect plane change */ 2783 if (old_pipe->plane_state != new_pipe->plane_state) 2784 new_pipe->update_flags.bits.plane_changed = true; 2785 2786 /* Detect top pipe only changes */ 2787 if (resource_is_pipe_type(new_pipe, OTG_MASTER)) { 2788 /* Detect global sync changes */ 2789 if ((old_pipe_vready_offset_pixels != new_pipe_vready_offset_pixels) 2790 || (old_pipe_vstartup_lines != new_pipe_vstartup_lines) 2791 || (old_pipe_vupdate_offset_pixels != new_pipe_vupdate_offset_pixels) 2792 || (old_pipe_vupdate_width_pixels != new_pipe_vupdate_width_pixels)) 2793 new_pipe->update_flags.bits.global_sync = 1; 2794 } 2795 2796 if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb) 2797 new_pipe->update_flags.bits.det_size = 1; 2798 2799 /* 2800 * Detect opp / tg change, only set on change, not on enable 2801 * Assume mpcc inst = pipe index, if not this code needs to be updated 2802 * since mpcc is what is affected by these. In fact all of our sequence 2803 * makes this assumption at the moment with how hubp reset is matched to 2804 * same index mpcc reset. 2805 */ 2806 if (old_pipe->stream_res.opp != new_pipe->stream_res.opp) 2807 new_pipe->update_flags.bits.opp_changed = 1; 2808 if (old_pipe->stream_res.tg != new_pipe->stream_res.tg) 2809 new_pipe->update_flags.bits.tg_changed = 1; 2810 2811 /* 2812 * Detect mpcc blending changes, only dpp inst and opp matter here, 2813 * mpccs getting removed/inserted update connected ones during their own 2814 * programming 2815 */ 2816 if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp 2817 || old_pipe->stream_res.opp != new_pipe->stream_res.opp) 2818 new_pipe->update_flags.bits.mpcc = 1; 2819 2820 /* Detect dppclk change */ 2821 if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz) 2822 new_pipe->update_flags.bits.dppclk = 1; 2823 2824 /* Check for scl update */ 2825 if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data))) 2826 new_pipe->update_flags.bits.scaler = 1; 2827 /* Check for vp update */ 2828 if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect)) 2829 || memcmp(&old_pipe->plane_res.scl_data.viewport_c, 2830 &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect))) 2831 new_pipe->update_flags.bits.viewport = 1; 2832 2833 /* Detect dlg/ttu/rq updates */ 2834 { 2835 struct dml2_display_dlg_regs old_dlg_regs = old_pipe->hubp_regs.dlg_regs; 2836 struct dml2_display_ttu_regs old_ttu_regs = old_pipe->hubp_regs.ttu_regs; 2837 struct dml2_display_rq_regs old_rq_regs = old_pipe->hubp_regs.rq_regs; 2838 struct dml2_display_dlg_regs *new_dlg_regs = &new_pipe->hubp_regs.dlg_regs; 2839 struct dml2_display_ttu_regs *new_ttu_regs = &new_pipe->hubp_regs.ttu_regs; 2840 struct dml2_display_rq_regs *new_rq_regs = &new_pipe->hubp_regs.rq_regs; 2841 2842 /* Detect pipe interdependent updates */ 2843 if ((old_dlg_regs.dst_y_prefetch != new_dlg_regs->dst_y_prefetch) 2844 || (old_dlg_regs.vratio_prefetch != new_dlg_regs->vratio_prefetch) 2845 || (old_dlg_regs.vratio_prefetch_c != new_dlg_regs->vratio_prefetch_c) 2846 || (old_dlg_regs.dst_y_per_vm_vblank != new_dlg_regs->dst_y_per_vm_vblank) 2847 || (old_dlg_regs.dst_y_per_row_vblank != new_dlg_regs->dst_y_per_row_vblank) 2848 || (old_dlg_regs.dst_y_per_vm_flip != new_dlg_regs->dst_y_per_vm_flip) 2849 || (old_dlg_regs.dst_y_per_row_flip != new_dlg_regs->dst_y_per_row_flip) 2850 || (old_dlg_regs.refcyc_per_meta_chunk_vblank_l != new_dlg_regs->refcyc_per_meta_chunk_vblank_l) 2851 || (old_dlg_regs.refcyc_per_meta_chunk_vblank_c != new_dlg_regs->refcyc_per_meta_chunk_vblank_c) 2852 || (old_dlg_regs.refcyc_per_meta_chunk_flip_l != new_dlg_regs->refcyc_per_meta_chunk_flip_l) 2853 || (old_dlg_regs.refcyc_per_line_delivery_pre_l != new_dlg_regs->refcyc_per_line_delivery_pre_l) 2854 || (old_dlg_regs.refcyc_per_line_delivery_pre_c != new_dlg_regs->refcyc_per_line_delivery_pre_c) 2855 || (old_ttu_regs.refcyc_per_req_delivery_pre_l != new_ttu_regs->refcyc_per_req_delivery_pre_l) 2856 || (old_ttu_regs.refcyc_per_req_delivery_pre_c != new_ttu_regs->refcyc_per_req_delivery_pre_c) 2857 || (old_ttu_regs.refcyc_per_req_delivery_pre_cur0 != 2858 new_ttu_regs->refcyc_per_req_delivery_pre_cur0) 2859 || (old_ttu_regs.min_ttu_vblank != new_ttu_regs->min_ttu_vblank) 2860 || (old_ttu_regs.qos_level_flip != new_ttu_regs->qos_level_flip)) { 2861 old_dlg_regs.dst_y_prefetch = new_dlg_regs->dst_y_prefetch; 2862 old_dlg_regs.vratio_prefetch = new_dlg_regs->vratio_prefetch; 2863 old_dlg_regs.vratio_prefetch_c = new_dlg_regs->vratio_prefetch_c; 2864 old_dlg_regs.dst_y_per_vm_vblank = new_dlg_regs->dst_y_per_vm_vblank; 2865 old_dlg_regs.dst_y_per_row_vblank = new_dlg_regs->dst_y_per_row_vblank; 2866 old_dlg_regs.dst_y_per_vm_flip = new_dlg_regs->dst_y_per_vm_flip; 2867 old_dlg_regs.dst_y_per_row_flip = new_dlg_regs->dst_y_per_row_flip; 2868 old_dlg_regs.refcyc_per_meta_chunk_vblank_l = new_dlg_regs->refcyc_per_meta_chunk_vblank_l; 2869 old_dlg_regs.refcyc_per_meta_chunk_vblank_c = new_dlg_regs->refcyc_per_meta_chunk_vblank_c; 2870 old_dlg_regs.refcyc_per_meta_chunk_flip_l = new_dlg_regs->refcyc_per_meta_chunk_flip_l; 2871 old_dlg_regs.refcyc_per_line_delivery_pre_l = new_dlg_regs->refcyc_per_line_delivery_pre_l; 2872 old_dlg_regs.refcyc_per_line_delivery_pre_c = new_dlg_regs->refcyc_per_line_delivery_pre_c; 2873 old_ttu_regs.refcyc_per_req_delivery_pre_l = new_ttu_regs->refcyc_per_req_delivery_pre_l; 2874 old_ttu_regs.refcyc_per_req_delivery_pre_c = new_ttu_regs->refcyc_per_req_delivery_pre_c; 2875 old_ttu_regs.refcyc_per_req_delivery_pre_cur0 = new_ttu_regs->refcyc_per_req_delivery_pre_cur0; 2876 old_ttu_regs.min_ttu_vblank = new_ttu_regs->min_ttu_vblank; 2877 old_ttu_regs.qos_level_flip = new_ttu_regs->qos_level_flip; 2878 new_pipe->update_flags.bits.hubp_interdependent = 1; 2879 } 2880 /* Detect any other updates to ttu/rq/dlg */ 2881 if (memcmp(&old_dlg_regs, new_dlg_regs, sizeof(old_dlg_regs)) || 2882 memcmp(&old_ttu_regs, new_ttu_regs, sizeof(old_ttu_regs)) || 2883 memcmp(&old_rq_regs, new_rq_regs, sizeof(old_rq_regs))) 2884 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; 2885 } 2886 2887 if (old_pipe->unbounded_req != new_pipe->unbounded_req) 2888 new_pipe->update_flags.bits.unbounded_req = 1; 2889 2890 if (memcmp(&old_pipe->stream_res.test_pattern_params, 2891 &new_pipe->stream_res.test_pattern_params, sizeof(struct test_pattern_params))) { 2892 new_pipe->update_flags.bits.test_pattern_changed = 1; 2893 } 2894 } 2895 2896 void dcn401_plane_atomic_power_down(struct dc *dc, 2897 struct dpp *dpp, 2898 struct hubp *hubp) 2899 { 2900 struct dce_hwseq *hws = dc->hwseq; 2901 uint32_t org_ip_request_cntl = 0; 2902 2903 DC_LOGGER_INIT(dc->ctx->logger); 2904 2905 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); 2906 if (org_ip_request_cntl == 0) 2907 REG_SET(DC_IP_REQUEST_CNTL, 0, 2908 IP_REQUEST_EN, 1); 2909 2910 if (hws->funcs.dpp_pg_control) 2911 hws->funcs.dpp_pg_control(hws, dpp->inst, false); 2912 2913 if (hws->funcs.hubp_pg_control) 2914 hws->funcs.hubp_pg_control(hws, hubp->inst, false); 2915 2916 hubp->funcs->hubp_reset(hubp); 2917 dpp->funcs->dpp_reset(dpp); 2918 2919 if (org_ip_request_cntl == 0) 2920 REG_SET(DC_IP_REQUEST_CNTL, 0, 2921 IP_REQUEST_EN, 0); 2922 2923 DC_LOG_DEBUG( 2924 "Power gated front end %d\n", hubp->inst); 2925 2926 if (hws->funcs.dpp_root_clock_control) 2927 hws->funcs.dpp_root_clock_control(hws, dpp->inst, false); 2928 } 2929