Lines Matching refs:dm
175 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
234 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
236 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
254 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
297 struct dc *dc = adev->dm.dc;
589 adev->dm.freesync_module,
594 adev->dm.dc,
644 dc_stream_fc_disable_writeback(adev->dm.dc,
682 mod_freesync_handle_v_update(adev->dm.freesync_module,
686 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
749 if (adev->dm.dmub_notify)
750 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
752 complete(&adev->dm.dmub_aux_transfer_done);
766 if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) {
771 struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line];
805 if (notify->link_index > adev->dm.dc->link_count) {
817 link = adev->dm.dc->links[link_index];
818 dev = adev->dm.ddev;
884 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
885 adev->dm.dmub_callback[type] = callback;
886 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
904 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
905 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
951 struct amdgpu_display_manager *dm = &adev->dm;
957 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
973 if (dc_enable_dmub_notifications(adev->dm.dc) &&
977 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
978 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
982 if (!dm->dmub_callback[notify.type]) {
987 if (dm->dmub_thread_offload[notify.type] == true) {
1002 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
1004 dm->dmub_callback[notify.type](adev, ¬ify);
1029 struct dm_compressor_info *compressor = &adev->dm.compressor;
1034 if (adev->dm.dc->fbc_compressor == NULL)
1057 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
1078 mutex_lock(&adev->dm.audio_lock);
1100 mutex_unlock(&adev->dm.audio_lock);
1120 adev->dm.audio_component = acomp;
1133 adev->dm.audio_component = NULL;
1150 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
1160 adev->dm.dc->res_pool->audios[i]->inst;
1168 adev->dm.audio_registered = true;
1181 if (adev->dm.audio_registered) {
1183 adev->dm.audio_registered = false;
1193 struct drm_audio_component *acomp = adev->dm.audio_component;
1206 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1207 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1208 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1209 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1210 struct abm *abm = adev->dm.dc->res_pool->abm;
1211 struct dc_context *ctx = adev->dm.dc->ctx;
1323 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1357 if (!adev->dm.dc->ctx->dmub_srv)
1358 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1359 if (!adev->dm.dc->ctx->dmub_srv) {
1365 adev->dm.dmcub_fw_version);
1375 if (adev->dm.dmcub_fw_version &&
1376 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
1377 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59))
1378 adev->dm.dc->debug.sanity_checks = true;
1381 if (adev->dm.dmcub_fw_version &&
1382 adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
1383 adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16))
1384 adev->dm.dc->debug.sanity_checks = true;
1395 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1557 mutex_lock(&adev->dm.dc_lock);
1601 mutex_unlock(&adev->dm.dc_lock);
1610 struct dc *dc = adev->dm.dc;
1703 /* add da to list in dm */
1704 list_add(&da->list, &adev->dm.da_list);
1718 list_for_each_entry(da, &adev->dm.da_list, list) {
1744 cgs_write_register(adev->dm.cgs_device, 0x34c0 + 0x01f8, reg.all);
1752 cgs_read_register(adev->dm.cgs_device, 0x34c0 + 0x01f8);
1840 adev->dm.ddev = adev_to_drm(adev);
1841 adev->dm.adev = adev;
1847 mutex_init(&adev->dm.dpia_aux_lock);
1848 mutex_init(&adev->dm.dc_lock);
1849 mutex_init(&adev->dm.audio_lock);
1870 init_data.cgs_device = adev->dm.cgs_device;
1876 switch (adev->dm.dmcub_fw_version) {
1937 drm_dbg(adev->dm.ddev, "Seamless boot requested\n");
1963 retrieve_dmi_info(&adev->dm);
1964 if (adev->dm.edp0_on_dp1_quirk)
1967 if (adev->dm.bb_from_dmub)
1968 init_data.bb_from_dmub = adev->dm.bb_from_dmub;
1973 adev->dm.dc = dc_create(&init_data);
1975 if (adev->dm.dc) {
1977 dce_version_to_string(adev->dm.dc->ctx->dce_version));
1984 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1985 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1989 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1991 adev->dm.dc->debug.disable_stutter = true;
1994 adev->dm.dc->debug.disable_stutter = true;
1997 adev->dm.dc->debug.disable_dsc = true;
2000 adev->dm.dc->debug.disable_clock_gate = true;
2003 adev->dm.dc->debug.force_subvp_mclk_switch = true;
2006 adev->dm.dc->debug.force_disable_subvp = true;
2007 adev->dm.dc->debug.fams2_config.bits.enable = false;
2011 adev->dm.dc->debug.using_dml2 = true;
2012 adev->dm.dc->debug.using_dml21 = true;
2016 adev->dm.dc->debug.hdcp_lc_force_fw_enable = true;
2019 adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true;
2022 adev->dm.dc->debug.skip_detection_link_training = true;
2024 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
2027 adev->dm.dc->debug.ignore_cable_id = true;
2029 if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
2038 dc_hardware_init(adev->dm.dc);
2040 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
2041 if (!adev->dm.hpd_rx_offload_wq) {
2052 dc_setup_system_context(adev->dm.dc, &pa_config);
2055 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
2056 if (!adev->dm.freesync_module) {
2061 adev->dm.freesync_module);
2065 if (adev->dm.dc->caps.max_links > 0) {
2066 adev->dm.vblank_control_workqueue =
2068 if (!adev->dm.vblank_control_workqueue)
2072 if (adev->dm.dc->caps.ips_support &&
2073 adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL)
2074 adev->dm.idle_workqueue = idle_create_workqueue(adev);
2076 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
2077 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
2079 if (!adev->dm.hdcp_workqueue)
2082 drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
2084 dc_init_callbacks(adev->dm.dc, &init_params);
2086 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2087 init_completion(&adev->dm.dmub_aux_transfer_done);
2088 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
2089 if (!adev->dm.dmub_notify) {
2090 drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify");
2094 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
2095 if (!adev->dm.delayed_hpd_wq) {
2107 for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++)
2108 init_completion(&adev->dm.fused_io[i].replied);
2121 dc_enable_dmub_outbox(adev->dm.dc);
2125 dc_dmub_srv_enable_dpia_trace(adev->dm.dc);
2140 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
2141 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
2143 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
2151 if (!adev->dm.secure_display_ctx.crtc_ctx)
2155 adev->dm.secure_display_ctx.support_mul_roi = true;
2181 if (adev->dm.vblank_control_workqueue) {
2182 destroy_workqueue(adev->dm.vblank_control_workqueue);
2183 adev->dm.vblank_control_workqueue = NULL;
2186 if (adev->dm.idle_workqueue) {
2187 if (adev->dm.idle_workqueue->running) {
2188 adev->dm.idle_workqueue->enable = false;
2189 flush_work(&adev->dm.idle_workqueue->work);
2192 kfree(adev->dm.idle_workqueue);
2193 adev->dm.idle_workqueue = NULL;
2196 amdgpu_dm_destroy_drm_device(&adev->dm);
2199 if (adev->dm.secure_display_ctx.crtc_ctx) {
2201 if (adev->dm.secure_display_ctx.crtc_ctx[i].crtc) {
2202 flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].notify_ta_work);
2203 flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].forward_roi_work);
2206 kfree(adev->dm.secure_display_ctx.crtc_ctx);
2207 adev->dm.secure_display_ctx.crtc_ctx = NULL;
2210 if (adev->dm.hdcp_workqueue) {
2211 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
2212 adev->dm.hdcp_workqueue = NULL;
2215 if (adev->dm.dc) {
2216 dc_deinit_callbacks(adev->dm.dc);
2217 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
2218 if (dc_enable_dmub_notifications(adev->dm.dc)) {
2219 kfree(adev->dm.dmub_notify);
2220 adev->dm.dmub_notify = NULL;
2221 destroy_workqueue(adev->dm.delayed_hpd_wq);
2222 adev->dm.delayed_hpd_wq = NULL;
2226 if (adev->dm.dmub_bo)
2227 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
2228 &adev->dm.dmub_bo_gpu_addr,
2229 &adev->dm.dmub_bo_cpu_addr);
2231 if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) {
2232 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
2233 if (adev->dm.hpd_rx_offload_wq[i].wq) {
2234 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
2235 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
2239 kfree(adev->dm.hpd_rx_offload_wq);
2240 adev->dm.hpd_rx_offload_wq = NULL;
2244 if (adev->dm.dc)
2245 dc_destroy(&adev->dm.dc);
2252 if (adev->dm.cgs_device) {
2253 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
2254 adev->dm.cgs_device = NULL;
2256 if (adev->dm.freesync_module) {
2257 mod_freesync_destroy(adev->dm.freesync_module);
2258 adev->dm.freesync_module = NULL;
2261 mutex_destroy(&adev->dm.audio_lock);
2262 mutex_destroy(&adev->dm.dc_lock);
2263 mutex_destroy(&adev->dm.dpia_aux_lock);
2337 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
2341 r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, AMDGPU_UCODE_REQUIRED,
2345 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
2346 adev->dm.fw_dmcu = NULL;
2352 amdgpu_ucode_release(&adev->dm.fw_dmcu);
2356 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
2358 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
2363 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
2367 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
2378 return dm_read_reg(adev->dm.dc->ctx, address);
2386 return dm_write_reg(adev->dm.dc->ctx, address, value);
2465 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2466 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2472 adev->dm.dmub_fw;
2477 adev->dm.dmcub_fw_version);
2481 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2482 dmub_srv = adev->dm.dmub_srv;
2510 adev->dm.dmub_fw->data +
2514 adev->dm.dmub_fw->data +
2534 &adev->dm.dmub_bo,
2535 &adev->dm.dmub_bo_gpu_addr,
2536 &adev->dm.dmub_bo_cpu_addr);
2542 memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
2543 memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
2547 adev->dm.dmub_fb_info =
2548 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2549 fb_info = adev->dm.dmub_fb_info;
2563 adev->dm.bb_from_dmub = dm_dmub_get_vbios_bounding_box(adev);
2573 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
2575 if (!adev->dm.cgs_device) {
2580 /* Moved from dm init since we need to use allocations for storing bounding box data */
2581 INIT_LIST_HEAD(&adev->dm.da_list);
2595 list_for_each_entry(da, &adev->dm.da_list, list) {
2596 if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) {
2600 adev->dm.bb_from_dmub = NULL;
2606 kfree(adev->dm.dmub_fb_info);
2607 adev->dm.dmub_fb_info = NULL;
2609 if (adev->dm.dmub_srv) {
2610 dmub_srv_destroy(adev->dm.dmub_srv);
2611 kfree(adev->dm.dmub_srv);
2612 adev->dm.dmub_srv = NULL;
2615 amdgpu_ucode_release(&adev->dm.dmub_fw);
2616 amdgpu_ucode_release(&adev->dm.fw_dmcu);
2666 dmcu = adev->dm.dc->res_pool->dmcu;
2689 } else if (adev->dm.dc->ctx->dmub_srv) {
2693 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
2695 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2891 struct amdgpu_display_manager *dm = &adev->dm;
2896 oem_ddc_service = dc_get_oem_i2c_device(adev->dm.dc);
2910 dm->oem_i2c = oem_i2c;
2966 kfree(adev->dm.oem_i2c);
2990 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3008 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
3056 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
3060 if (dm->hpd_rx_offload_wq) {
3061 for (i = 0; i < dm->dc->caps.max_links; i++)
3062 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
3070 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
3071 if (IS_ERR(adev->dm.cached_state)) {
3072 r = PTR_ERR(adev->dm.cached_state);
3073 adev->dm.cached_state = NULL;
3076 return adev->dm.cached_state ? 0 : r;
3081 struct amdgpu_display_manager *dm = &adev->dm;
3091 if (!dm->cached_state)
3095 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
3106 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
3116 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
3125 drm_atomic_helper_resume(ddev, dm->cached_state);
3127 dm->cached_state = NULL;
3144 WARN_ON(adev->dm.cached_state);
3152 struct amdgpu_display_manager *dm = &adev->dm;
3157 mutex_lock(&dm->dc_lock);
3159 dc_allow_idle_optimizations(adev->dm.dc, false);
3161 dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
3163 if (dm->cached_dc_state)
3164 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
3166 res = amdgpu_dm_commit_zero_streams(dm->dc);
3174 hpd_rx_irq_work_suspend(dm);
3179 if (!adev->dm.cached_state) {
3192 hpd_rx_irq_work_suspend(dm);
3194 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
3196 if (dm->dc->caps.ips_support && adev->in_s0ix)
3197 dc_allow_idle_optimizations(dm->dc, true);
3199 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
3307 struct amdgpu_display_manager *dm)
3321 drm_err(dm->ddev, "Failed to allocate update bundle\n");
3335 update_planes_and_stream_adapter(dm->dc,
3366 struct amdgpu_display_manager *dm = &adev->dm;
3370 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
3376 if (dm->dc->caps.ips_support) {
3377 dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
3381 dc_state = dm->cached_dc_state;
3384 * The dc->current_state is backed up into dm->cached_dc_state
3398 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
3406 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
3407 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
3409 dc_resume(dm->dc);
3421 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
3423 dc_enable_dmub_outbox(adev->dm.dc);
3428 dc_exit_ips_for_hw_access(dm->dc);
3429 WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
3431 dm_gpureset_commit_state(dm->cached_dc_state, dm);
3433 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
3435 dc_state_release(dm->cached_dc_state);
3436 dm->cached_dc_state = NULL;
3440 mutex_unlock(&dm->dc_lock);
3443 for (i = 0; i < dm->num_of_edps; i++) {
3444 if (dm->backlight_dev[i])
3445 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
3452 dm_state->context = dc_state_create(dm->dc, NULL);
3459 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
3461 dc_enable_dmub_outbox(adev->dm.dc);
3465 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
3466 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
3469 dc_resume(dm->dc);
3509 guard(mutex)(&dm->dc_lock);
3510 dc_exit_ips_for_hw_access(dm->dc);
3566 .name = "dm",
3628 caps = &adev->dm.backlight_caps[aconnector->bl_idx];
3816 if (adev->dm.disable_hpd_irq)
3825 if (adev->dm.hdcp_workqueue) {
3826 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3847 scoped_guard(mutex, &adev->dm.dc_lock) {
3908 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3913 if (adev->dm.disable_hpd_irq)
3998 mutex_lock(&adev->dm.dc_lock);
4001 mutex_unlock(&adev->dm.dc_lock);
4018 if (adev->dm.hdcp_workqueue)
4019 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
4039 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
4109 struct dc *dc = adev->dm.dc;
4150 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4180 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
4207 struct dc *dc = adev->dm.dc;
4251 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4280 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
4310 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
4336 struct dc *dc = adev->dm.dc;
4389 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
4421 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
4460 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
4491 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
4516 struct dc *dc = adev->dm.dc;
4537 c_irq_params = &adev->dm.dmub_outbox_params[0];
4561 struct amdgpu_display_manager *dm = &adev->dm;
4567 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
4581 struct amdgpu_display_manager *dm = &adev->dm;
4587 if (obj->funcs == dm->atomic_obj.funcs)
4660 state->context = dc_state_create_current_copy(adev->dm.dc);
4667 &adev->dm.atomic_obj,
4701 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
4704 struct amdgpu_dm_backlight_caps *caps = &dm->backlight_caps[bl_idx];
4842 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4851 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4852 caps = &dm->backlight_caps[bl_idx];
4854 dm->brightness[bl_idx] = user_brightness;
4857 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4858 brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]);
4859 link = (struct dc_link *)dm->backlight_link[bl_idx];
4862 mutex_lock(&dm->dc_lock);
4863 if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) {
4864 dc_allow_idle_optimizations(dm->dc, false);
4892 if (dm->dc->caps.ips_support && reallow_idle)
4893 dc_allow_idle_optimizations(dm->dc, true);
4895 mutex_unlock(&dm->dc_lock);
4898 dm->actual_brightness[bl_idx] = user_brightness;
4903 struct amdgpu_display_manager *dm = bl_get_data(bd);
4906 for (i = 0; i < dm->num_of_edps; i++) {
4907 if (bd == dm->backlight_dev[i])
4912 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4917 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4922 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4924 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4925 caps = dm->backlight_caps[bl_idx];
4933 return dm->brightness[bl_idx];
4940 return dm->brightness[bl_idx];
4947 struct amdgpu_display_manager *dm = bl_get_data(bd);
4950 for (i = 0; i < dm->num_of_edps; i++) {
4951 if (bd == dm->backlight_dev[i])
4956 return amdgpu_dm_backlight_get_level(dm, i);
4969 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
4985 caps = &dm->backlight_caps[aconnector->bl_idx];
5005 dm->backlight_dev[aconnector->bl_idx] =
5006 backlight_device_register(bl_name, aconnector->base.kdev, dm,
5008 dm->brightness[aconnector->bl_idx] = props.brightness;
5010 if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
5012 dm->backlight_dev[aconnector->bl_idx] = NULL;
5017 static int initialize_plane(struct amdgpu_display_manager *dm,
5028 drm_err(adev_to_drm(dm->adev), "KMS: Failed to allocate plane\n");
5040 if (plane_id >= dm->dc->caps.max_streams)
5043 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
5046 drm_err(adev_to_drm(dm->adev), "KMS: Failed to initialize plane\n");
5058 static void setup_backlight_device(struct amdgpu_display_manager *dm,
5062 int bl_idx = dm->num_of_edps;
5068 if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
5069 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
5075 amdgpu_dm_update_backlight_caps(dm, bl_idx);
5076 dm->backlight_link[bl_idx] = link;
5077 dm->num_of_edps++;
5094 struct amdgpu_display_manager *dm = &adev->dm;
5105 int max_overlay = dm->dc->caps.max_slave_planes;
5107 dm->display_indexes_num = dm->dc->caps.max_streams;
5109 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
5113 link_cnt = dm->dc->caps.max_links;
5114 if (amdgpu_dm_mode_config_init(dm->adev)) {
5120 primary_planes = dm->dc->caps.max_streams;
5131 plane = &dm->dc->caps.planes[i];
5133 if (initialize_plane(dm, mode_info, i,
5149 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
5150 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
5165 if (initialize_plane(dm, NULL, primary_planes + i,
5172 for (i = 0; i < dm->dc->caps.max_streams; i++)
5173 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
5193 if (register_outbox_irq_handlers(dm->adev)) {
5254 link = dc_get_link_at_index(dm->dc, i);
5264 if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
5284 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
5289 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
5294 if (dm->hpd_rx_offload_wq)
5295 dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
5307 mutex_lock(&dm->dc_lock);
5308 dc_exit_ips_for_hw_access(dm->dc);
5310 mutex_unlock(&dm->dc_lock);
5314 setup_backlight_device(dm, aconnector);
5342 if (dce60_register_irq_handlers(dm->adev)) {
5364 if (dce110_register_irq_handlers(dm->adev)) {
5392 if (dcn10_register_irq_handlers(dm->adev)) {
5413 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
5415 if (dm->atomic_obj.state)
5416 drm_atomic_private_obj_fini(&dm->atomic_obj);
5544 r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, AMDGPU_UCODE_REQUIRED,
7267 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
7270 caps = &dm->backlight_caps[amdgpu_dm_connector->bl_idx];
7293 struct amdgpu_display_manager *dm = &adev->dm;
7303 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
7304 dm->backlight_dev[aconnector->bl_idx] = NULL;
7628 dc_result = dc_validate_stream(adev->dm.dc, stream);
7637 dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
8397 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8403 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8453 dm->ddev->mode_config.scaling_mode_property,
8498 if (adev->dm.hdcp_workqueue)
8608 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8615 struct dc *dc = dm->dc;
8625 drm_err(adev_to_drm(dm->adev), "Failed to create i2c adapter data\n");
8633 drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index);
8640 dm->ddev,
8647 drm_err(adev_to_drm(dm->adev), "connector_init failed\n");
8657 dm,
8672 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8967 struct amdgpu_display_manager *dm,
8975 struct amdgpu_device *adev = dm->adev;
8998 dm->freesync_module,
9006 mod_freesync_handle_v_update(dm->freesync_module,
9010 dc_stream_adjust_vmin_vmax(dm->dc,
9033 dm->freesync_module,
9062 struct amdgpu_display_manager *dm,
9068 struct amdgpu_device *adev = dm->adev;
9108 mod_freesync_build_vrr_params(dm->freesync_module,
9223 adev->dm.dc->caps.color.dpp.gamma_corr)
9298 struct amdgpu_display_manager *dm,
9375 if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) != 0)
9404 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
9420 dm->adev, new_plane_state,
9454 mutex_lock(&dm->dc_lock);
9459 mutex_unlock(&dm->dc_lock);
9496 dm,
9541 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9593 if (dm->vblank_control_workqueue)
9594 flush_workqueue(dm->vblank_control_workqueue);
9623 mutex_lock(&dm->dc_lock);
9630 mutex_unlock(&dm->dc_lock);
9640 dm->dc, acrtc_state->stream,
9644 mutex_lock(&dm->dc_lock);
9645 update_planes_and_stream_adapter(dm->dc,
9671 mutex_unlock(&dm->dc_lock);
9680 (!updated_planes_and_streams || amdgpu_ip_version(dm->adev, DCE_HWIP, 0) == 0) &&
9725 mutex_lock(&adev->dm.audio_lock);
9728 mutex_unlock(&adev->dm.audio_lock);
9760 mutex_lock(&adev->dm.audio_lock);
9763 mutex_unlock(&adev->dm.audio_lock);
9783 static void dm_clear_writeback(struct amdgpu_display_manager *dm,
9786 dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0);
9794 struct amdgpu_display_manager *dm = &adev->dm;
9828 dm_clear_writeback(dm, dm_old_crtc_state);
9870 mutex_lock(&dm->dc_lock);
9871 dc_exit_ips_for_hw_access(dm->dc);
9873 mutex_unlock(&dm->dc_lock);
9938 if (dm->vblank_control_workqueue)
9939 flush_workqueue(dm->vblank_control_workqueue);
9941 amdgpu_dm_replay_disable_all(dm);
9942 amdgpu_dm_psr_disable_all(dm);
9946 mutex_lock(&dm->dc_lock);
9947 dc_exit_ips_for_hw_access(dm->dc);
9948 WARN_ON(!dc_commit_streams(dm->dc, ¶ms));
9951 if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev))
9952 dc_allow_idle_optimizations(dm->dc, true);
9953 mutex_unlock(&dm->dc_lock);
9978 * It will cause the dm->actual_brightness is not the current panel brightness
9979 * level. (the dm->brightness is the correct panel level)
9980 * So we set the backlight level with dm->brightness value after set mode
9983 for (i = 0; i < dm->num_of_edps; i++) {
9984 if (dm->backlight_dev[i])
9985 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9990 static void dm_set_writeback(struct amdgpu_display_manager *dm,
9996 struct amdgpu_device *adev = dm->adev;
10024 if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) {
10025 pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i];
10082 dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info);
10101 struct amdgpu_display_manager *dm = &adev->dm;
10135 if (!adev->dm.hdcp_workqueue)
10186 if (!adev->dm.hdcp_workqueue)
10201 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
10208 old_con_state, connector, adev->dm.hdcp_workqueue)) {
10225 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
10243 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
10340 mutex_lock(&dm->dc_lock);
10341 dc_exit_ips_for_hw_access(dm->dc);
10342 dc_update_planes_and_stream(dm->dc,
10347 mutex_unlock(&dm->dc_lock);
10370 update_stream_irq_parameters(dm, dm_new_crtc_state);
10432 amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
10456 dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state);
10464 for (i = 0; i < dm->num_of_edps; i++) {
10465 if (dm->backlight_dev[i] &&
10466 (dm->actual_brightness[i] != dm->brightness[i]))
10467 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10761 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10772 struct amdgpu_device *adev = dm->adev;
10837 dm->force_timing_sync;
10928 dm->dc,
10971 dm->dc,
11818 struct dc *dc = adev->dm.dc;
12000 ret = dm_update_crtc_state(&adev->dm, state, crtc,
12013 ret = dm_update_crtc_state(&adev->dm, state, crtc,
12247 if (obj->funcs == adev->dm.atomic_obj.funcs) {
12315 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
12343 res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
12345 drm_err(adev_to_drm(dm->adev), "EDID CEA parser failed\n");
12353 drm_err(adev_to_drm(dm->adev), "EDID CEA ack failed at offset %d\n",
12365 drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n");
12372 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
12384 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
12391 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
12405 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
12413 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
12422 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
12436 mutex_lock(&adev->dm.dc_lock);
12437 if (adev->dm.dmub_srv)
12438 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
12440 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
12441 mutex_unlock(&adev->dm.dc_lock);
12603 if (!adev->dm.freesync_module)
12683 struct dc *dc = adev->dm.dc;
12686 mutex_lock(&adev->dm.dc_lock);
12691 adev->dm.force_timing_sync;
12696 mutex_unlock(&adev->dm.dc_lock);
12756 struct dmub_notification *p_notify = adev->dm.dmub_notify;
12759 mutex_lock(&adev->dm.dpia_aux_lock);
12765 if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
12785 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF;
12786 if (adev->dm.dmub_notify->aux_reply.command & 0xF0)
12788 payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
12799 reinit_completion(&adev->dm.dmub_aux_transfer_done);
12800 mutex_unlock(&adev->dm.dpia_aux_lock);
12829 if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io))
12832 struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line];
12867 struct amdgpu_display_manager *dm = &dev->dm;
12869 mutex_lock(&dm->dpia_aux_lock);
12873 mutex_unlock(&dm->dpia_aux_lock);
12887 mutex_lock(&adev->dm.dpia_aux_lock);
12889 link_index, payload, adev->dm.dmub_notify);
12891 if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
12893 *operation_result = adev->dm.dmub_notify->sc_status;
12901 reinit_completion(&adev->dm.dmub_aux_transfer_done);
12902 mutex_unlock(&adev->dm.dpia_aux_lock);