1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <acpi/video.h> 27 28 #include <linux/string.h> 29 #include <linux/acpi.h> 30 #include <linux/i2c.h> 31 32 #include <drm/drm_atomic.h> 33 #include <drm/drm_probe_helper.h> 34 #include <drm/amdgpu_drm.h> 35 #include <drm/drm_edid.h> 36 #include <drm/drm_fixed.h> 37 38 #include "dm_services.h" 39 #include "amdgpu.h" 40 #include "dc.h" 41 #include "amdgpu_dm.h" 42 #include "amdgpu_dm_irq.h" 43 #include "amdgpu_dm_mst_types.h" 44 #include "dpcd_defs.h" 45 #include "dc/inc/core_types.h" 46 47 #include "dm_helpers.h" 48 #include "ddc_service_types.h" 49 #include "clk_mgr.h" 50 51 static u32 edid_extract_panel_id(struct edid *edid) 52 { 53 return (u32)edid->mfg_id[0] << 24 | 54 (u32)edid->mfg_id[1] << 16 | 55 (u32)EDID_PRODUCT_ID(edid); 56 } 57 58 static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) 59 { 60 uint32_t panel_id = edid_extract_panel_id(edid); 61 62 switch (panel_id) { 63 /* Workaround for some monitors which does not work well with FAMS */ 64 case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E): 65 case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053): 66 case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC): 67 DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id); 68 edid_caps->panel_patch.disable_fams = true; 69 break; 70 /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */ 71 case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB): 72 case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B): 73 case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A): 74 case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1): 75 case drm_edid_encode_panel_id('M', 'S', 'F', 0x1003): 76 DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); 77 edid_caps->panel_patch.remove_sink_ext_caps = true; 78 break; 79 case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): 80 DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id); 81 edid_caps->panel_patch.disable_colorimetry = true; 82 break; 83 default: 84 return; 85 } 86 } 87 88 /** 89 * dm_helpers_parse_edid_caps() - Parse edid caps 90 * 91 * @link: current detected link 92 * @edid: [in] pointer to edid 93 * @edid_caps: [in] pointer to edid caps 94 * 95 * Return: void 96 */ 97 enum dc_edid_status dm_helpers_parse_edid_caps( 98 struct dc_link *link, 99 const struct dc_edid *edid, 100 struct dc_edid_caps *edid_caps) 101 { 102 struct amdgpu_dm_connector *aconnector = link->priv; 103 struct drm_connector *connector = &aconnector->base; 104 struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL; 105 struct cea_sad *sads; 106 int sad_count = -1; 107 int sadb_count = -1; 108 int i = 0; 109 uint8_t *sadb = NULL; 110 111 enum dc_edid_status result = EDID_OK; 112 113 if (!edid_caps || !edid) 114 return EDID_BAD_INPUT; 115 116 if (!drm_edid_is_valid(edid_buf)) 117 result = EDID_BAD_CHECKSUM; 118 119 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | 120 ((uint16_t) edid_buf->mfg_id[1])<<8; 121 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | 122 ((uint16_t) edid_buf->prod_code[1])<<8; 123 edid_caps->serial_number = edid_buf->serial; 124 edid_caps->manufacture_week = edid_buf->mfg_week; 125 edid_caps->manufacture_year = edid_buf->mfg_year; 126 127 drm_edid_get_monitor_name(edid_buf, 128 edid_caps->display_name, 129 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 130 131 edid_caps->edid_hdmi = connector->display_info.is_hdmi; 132 133 apply_edid_quirks(edid_buf, edid_caps); 134 135 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); 136 if (sad_count <= 0) 137 return result; 138 139 edid_caps->audio_mode_count = min(sad_count, DC_MAX_AUDIO_DESC_COUNT); 140 for (i = 0; i < edid_caps->audio_mode_count; ++i) { 141 struct cea_sad *sad = &sads[i]; 142 143 edid_caps->audio_modes[i].format_code = sad->format; 144 edid_caps->audio_modes[i].channel_count = sad->channels + 1; 145 edid_caps->audio_modes[i].sample_rate = sad->freq; 146 edid_caps->audio_modes[i].sample_size = sad->byte2; 147 } 148 149 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); 150 151 if (sadb_count < 0) { 152 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); 153 sadb_count = 0; 154 } 155 156 if (sadb_count) 157 edid_caps->speaker_flags = sadb[0]; 158 else 159 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; 160 161 kfree(sads); 162 kfree(sadb); 163 164 return result; 165 } 166 167 static void 168 fill_dc_mst_payload_table_from_drm(struct dc_link *link, 169 bool enable, 170 struct drm_dp_mst_atomic_payload *target_payload, 171 struct dc_dp_mst_stream_allocation_table *table) 172 { 173 struct dc_dp_mst_stream_allocation_table new_table = { 0 }; 174 struct dc_dp_mst_stream_allocation *sa; 175 struct link_mst_stream_allocation_table copy_of_link_table = 176 link->mst_stream_alloc_table; 177 178 int i; 179 int current_hw_table_stream_cnt = copy_of_link_table.stream_count; 180 struct link_mst_stream_allocation *dc_alloc; 181 182 /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/ 183 if (enable) { 184 dc_alloc = 185 ©_of_link_table.stream_allocations[current_hw_table_stream_cnt]; 186 dc_alloc->vcp_id = target_payload->vcpi; 187 dc_alloc->slot_count = target_payload->time_slots; 188 } else { 189 for (i = 0; i < copy_of_link_table.stream_count; i++) { 190 dc_alloc = 191 ©_of_link_table.stream_allocations[i]; 192 193 if (dc_alloc->vcp_id == target_payload->vcpi) { 194 dc_alloc->vcp_id = 0; 195 dc_alloc->slot_count = 0; 196 break; 197 } 198 } 199 ASSERT(i != copy_of_link_table.stream_count); 200 } 201 202 /* Fill payload info*/ 203 for (i = 0; i < MAX_CONTROLLER_NUM; i++) { 204 dc_alloc = 205 ©_of_link_table.stream_allocations[i]; 206 if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) { 207 sa = &new_table.stream_allocations[new_table.stream_count]; 208 sa->slot_count = dc_alloc->slot_count; 209 sa->vcp_id = dc_alloc->vcp_id; 210 new_table.stream_count++; 211 } 212 } 213 214 /* Overwrite the old table */ 215 *table = new_table; 216 } 217 218 void dm_helpers_dp_update_branch_info( 219 struct dc_context *ctx, 220 const struct dc_link *link) 221 {} 222 223 static void dm_helpers_construct_old_payload( 224 struct drm_dp_mst_topology_mgr *mgr, 225 struct drm_dp_mst_topology_state *mst_state, 226 struct drm_dp_mst_atomic_payload *new_payload, 227 struct drm_dp_mst_atomic_payload *old_payload) 228 { 229 struct drm_dp_mst_atomic_payload *pos; 230 int pbn_per_slot = dfixed_trunc(mst_state->pbn_div); 231 u8 next_payload_vc_start = mgr->next_start_slot; 232 u8 payload_vc_start = new_payload->vc_start_slot; 233 u8 allocated_time_slots; 234 235 *old_payload = *new_payload; 236 237 /* Set correct time_slots/PBN of old payload. 238 * other fields (delete & dsc_enabled) in 239 * struct drm_dp_mst_atomic_payload are don't care fields 240 * while calling drm_dp_remove_payload_part2() 241 */ 242 list_for_each_entry(pos, &mst_state->payloads, next) { 243 if (pos != new_payload && 244 pos->vc_start_slot > payload_vc_start && 245 pos->vc_start_slot < next_payload_vc_start) 246 next_payload_vc_start = pos->vc_start_slot; 247 } 248 249 allocated_time_slots = next_payload_vc_start - payload_vc_start; 250 251 old_payload->time_slots = allocated_time_slots; 252 old_payload->pbn = allocated_time_slots * pbn_per_slot; 253 } 254 255 /* 256 * Writes payload allocation table in immediate downstream device. 257 */ 258 bool dm_helpers_dp_mst_write_payload_allocation_table( 259 struct dc_context *ctx, 260 const struct dc_stream_state *stream, 261 struct dc_dp_mst_stream_allocation_table *proposed_table, 262 bool enable) 263 { 264 struct amdgpu_dm_connector *aconnector; 265 struct drm_dp_mst_topology_state *mst_state; 266 struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload; 267 struct drm_dp_mst_topology_mgr *mst_mgr; 268 269 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 270 /* Accessing the connector state is required for vcpi_slots allocation 271 * and directly relies on behaviour in commit check 272 * that blocks before commit guaranteeing that the state 273 * is not gonna be swapped while still in use in commit tail 274 */ 275 276 if (!aconnector || !aconnector->mst_root) 277 return false; 278 279 mst_mgr = &aconnector->mst_root->mst_mgr; 280 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); 281 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); 282 283 if (enable) { 284 target_payload = new_payload; 285 286 /* It's OK for this to fail */ 287 drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload); 288 } else { 289 /* construct old payload by VCPI*/ 290 dm_helpers_construct_old_payload(mst_mgr, mst_state, 291 new_payload, &old_payload); 292 target_payload = &old_payload; 293 294 drm_dp_remove_payload_part1(mst_mgr, mst_state, new_payload); 295 } 296 297 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or 298 * AUX message. The sequence is slot 1-63 allocated sequence for each 299 * stream. AMD ASIC stream slot allocation should follow the same 300 * sequence. copy DRM MST allocation to dc 301 */ 302 fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table); 303 304 return true; 305 } 306 307 /* 308 * poll pending down reply 309 */ 310 void dm_helpers_dp_mst_poll_pending_down_reply( 311 struct dc_context *ctx, 312 const struct dc_link *link) 313 {} 314 315 /* 316 * Clear payload allocation table before enable MST DP link. 317 */ 318 void dm_helpers_dp_mst_clear_payload_allocation_table( 319 struct dc_context *ctx, 320 const struct dc_link *link) 321 {} 322 323 /* 324 * Polls for ACT (allocation change trigger) handled and sends 325 * ALLOCATE_PAYLOAD message. 326 */ 327 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( 328 struct dc_context *ctx, 329 const struct dc_stream_state *stream) 330 { 331 struct amdgpu_dm_connector *aconnector; 332 struct drm_dp_mst_topology_mgr *mst_mgr; 333 int ret; 334 335 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 336 337 if (!aconnector || !aconnector->mst_root) 338 return ACT_FAILED; 339 340 mst_mgr = &aconnector->mst_root->mst_mgr; 341 342 if (!mst_mgr->mst_state) 343 return ACT_FAILED; 344 345 ret = drm_dp_check_act_status(mst_mgr); 346 347 if (ret) 348 return ACT_FAILED; 349 350 return ACT_SUCCESS; 351 } 352 353 void dm_helpers_dp_mst_send_payload_allocation( 354 struct dc_context *ctx, 355 const struct dc_stream_state *stream) 356 { 357 struct amdgpu_dm_connector *aconnector; 358 struct drm_dp_mst_topology_state *mst_state; 359 struct drm_dp_mst_topology_mgr *mst_mgr; 360 struct drm_dp_mst_atomic_payload *new_payload; 361 enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; 362 enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; 363 int ret = 0; 364 365 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 366 367 if (!aconnector || !aconnector->mst_root) 368 return; 369 370 mst_mgr = &aconnector->mst_root->mst_mgr; 371 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); 372 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); 373 374 ret = drm_dp_add_payload_part2(mst_mgr, new_payload); 375 376 if (ret) { 377 amdgpu_dm_set_mst_status(&aconnector->mst_status, 378 set_flag, false); 379 } else { 380 amdgpu_dm_set_mst_status(&aconnector->mst_status, 381 set_flag, true); 382 amdgpu_dm_set_mst_status(&aconnector->mst_status, 383 clr_flag, false); 384 } 385 } 386 387 void dm_helpers_dp_mst_update_mst_mgr_for_deallocation( 388 struct dc_context *ctx, 389 const struct dc_stream_state *stream) 390 { 391 struct amdgpu_dm_connector *aconnector; 392 struct drm_dp_mst_topology_state *mst_state; 393 struct drm_dp_mst_topology_mgr *mst_mgr; 394 struct drm_dp_mst_atomic_payload *new_payload, old_payload; 395 enum mst_progress_status set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; 396 enum mst_progress_status clr_flag = MST_ALLOCATE_NEW_PAYLOAD; 397 398 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 399 400 if (!aconnector || !aconnector->mst_root) 401 return; 402 403 mst_mgr = &aconnector->mst_root->mst_mgr; 404 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); 405 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); 406 dm_helpers_construct_old_payload(mst_mgr, mst_state, 407 new_payload, &old_payload); 408 409 drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); 410 411 amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, true); 412 amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); 413 } 414 415 void dm_dtn_log_begin(struct dc_context *ctx, 416 struct dc_log_buffer_ctx *log_ctx) 417 { 418 static const char msg[] = "[dtn begin]\n"; 419 420 if (!log_ctx) { 421 pr_info("%s", msg); 422 return; 423 } 424 425 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 426 } 427 428 __printf(3, 4) 429 void dm_dtn_log_append_v(struct dc_context *ctx, 430 struct dc_log_buffer_ctx *log_ctx, 431 const char *msg, ...) 432 { 433 va_list args; 434 size_t total; 435 int n; 436 437 if (!log_ctx) { 438 /* No context, redirect to dmesg. */ 439 struct va_format vaf; 440 441 vaf.fmt = msg; 442 vaf.va = &args; 443 444 va_start(args, msg); 445 pr_info("%pV", &vaf); 446 va_end(args); 447 448 return; 449 } 450 451 /* Measure the output. */ 452 va_start(args, msg); 453 n = vsnprintf(NULL, 0, msg, args); 454 va_end(args); 455 456 if (n <= 0) 457 return; 458 459 /* Reallocate the string buffer as needed. */ 460 total = log_ctx->pos + n + 1; 461 462 if (total > log_ctx->size) { 463 char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL); 464 465 if (buf) { 466 memcpy(buf, log_ctx->buf, log_ctx->pos); 467 kfree(log_ctx->buf); 468 469 log_ctx->buf = buf; 470 log_ctx->size = total; 471 } 472 } 473 474 if (!log_ctx->buf) 475 return; 476 477 /* Write the formatted string to the log buffer. */ 478 va_start(args, msg); 479 n = vscnprintf( 480 log_ctx->buf + log_ctx->pos, 481 log_ctx->size - log_ctx->pos, 482 msg, 483 args); 484 va_end(args); 485 486 if (n > 0) 487 log_ctx->pos += n; 488 } 489 490 void dm_dtn_log_end(struct dc_context *ctx, 491 struct dc_log_buffer_ctx *log_ctx) 492 { 493 static const char msg[] = "[dtn end]\n"; 494 495 if (!log_ctx) { 496 pr_info("%s", msg); 497 return; 498 } 499 500 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 501 } 502 503 bool dm_helpers_dp_mst_start_top_mgr( 504 struct dc_context *ctx, 505 const struct dc_link *link, 506 bool boot) 507 { 508 struct amdgpu_dm_connector *aconnector = link->priv; 509 int ret; 510 511 if (!aconnector) { 512 DRM_ERROR("Failed to find connector for link!"); 513 return false; 514 } 515 516 if (boot) { 517 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", 518 aconnector, aconnector->base.base.id); 519 return true; 520 } 521 522 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", 523 aconnector, aconnector->base.base.id); 524 525 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 526 if (ret < 0) { 527 DRM_ERROR("DM_MST: Failed to set the device into MST mode!"); 528 return false; 529 } 530 531 DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0], 532 aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK); 533 534 return true; 535 } 536 537 bool dm_helpers_dp_mst_stop_top_mgr( 538 struct dc_context *ctx, 539 struct dc_link *link) 540 { 541 struct amdgpu_dm_connector *aconnector = link->priv; 542 543 if (!aconnector) { 544 DRM_ERROR("Failed to find connector for link!"); 545 return false; 546 } 547 548 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", 549 aconnector, aconnector->base.base.id); 550 551 if (aconnector->mst_mgr.mst_state == true) { 552 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); 553 link->cur_link_settings.lane_count = 0; 554 } 555 556 return false; 557 } 558 559 bool dm_helpers_dp_read_dpcd( 560 struct dc_context *ctx, 561 const struct dc_link *link, 562 uint32_t address, 563 uint8_t *data, 564 uint32_t size) 565 { 566 567 struct amdgpu_dm_connector *aconnector = link->priv; 568 569 if (!aconnector) 570 return false; 571 572 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data, 573 size) == size; 574 } 575 576 bool dm_helpers_dp_write_dpcd( 577 struct dc_context *ctx, 578 const struct dc_link *link, 579 uint32_t address, 580 const uint8_t *data, 581 uint32_t size) 582 { 583 struct amdgpu_dm_connector *aconnector = link->priv; 584 585 if (!aconnector) 586 return false; 587 588 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, 589 address, (uint8_t *)data, size) > 0; 590 } 591 592 bool dm_helpers_submit_i2c( 593 struct dc_context *ctx, 594 const struct dc_link *link, 595 struct i2c_command *cmd) 596 { 597 struct amdgpu_dm_connector *aconnector = link->priv; 598 struct i2c_msg *msgs; 599 int i = 0; 600 int num = cmd->number_of_payloads; 601 bool result; 602 603 if (!aconnector) { 604 DRM_ERROR("Failed to find connector for link!"); 605 return false; 606 } 607 608 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL); 609 610 if (!msgs) 611 return false; 612 613 for (i = 0; i < num; i++) { 614 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD; 615 msgs[i].addr = cmd->payloads[i].address; 616 msgs[i].len = cmd->payloads[i].length; 617 msgs[i].buf = cmd->payloads[i].data; 618 } 619 620 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num; 621 622 kfree(msgs); 623 624 return result; 625 } 626 627 static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, 628 bool is_write_cmd, 629 unsigned char cmd, 630 unsigned int length, 631 unsigned int offset, 632 unsigned char *data) 633 { 634 bool success = false; 635 unsigned char rc_data[16] = {0}; 636 unsigned char rc_offset[4] = {0}; 637 unsigned char rc_length[2] = {0}; 638 unsigned char rc_cmd = 0; 639 unsigned char rc_result = 0xFF; 640 unsigned char i = 0; 641 int ret; 642 643 if (is_write_cmd) { 644 // write rc data 645 memmove(rc_data, data, length); 646 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); 647 if (ret < 0) 648 goto err; 649 } 650 651 // write rc offset 652 rc_offset[0] = (unsigned char) offset & 0xFF; 653 rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF; 654 rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; 655 rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; 656 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); 657 if (ret < 0) 658 goto err; 659 660 // write rc length 661 rc_length[0] = (unsigned char) length & 0xFF; 662 rc_length[1] = (unsigned char) (length >> 8) & 0xFF; 663 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); 664 if (ret < 0) 665 goto err; 666 667 // write rc cmd 668 rc_cmd = cmd | 0x80; 669 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 670 if (ret < 0) 671 goto err; 672 673 // poll until active is 0 674 for (i = 0; i < 10; i++) { 675 drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 676 if (rc_cmd == cmd) 677 // active is 0 678 break; 679 msleep(10); 680 } 681 682 // read rc result 683 drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result)); 684 success = (rc_result == 0); 685 686 if (success && !is_write_cmd) { 687 // read rc data 688 drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); 689 } 690 691 drm_dbg_dp(aux->drm_dev, "success = %d\n", success); 692 693 return success; 694 695 err: 696 DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); 697 return false; 698 } 699 700 static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) 701 { 702 unsigned char data[16] = {0}; 703 704 drm_dbg_dp(aux->drm_dev, "Start\n"); 705 706 // Step 2 707 data[0] = 'P'; 708 data[1] = 'R'; 709 data[2] = 'I'; 710 data[3] = 'U'; 711 data[4] = 'S'; 712 713 if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data)) 714 return; 715 716 // Step 3 and 4 717 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 718 return; 719 720 data[0] &= (~(1 << 1)); // set bit 1 to 0 721 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 722 return; 723 724 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 725 return; 726 727 data[0] &= (~(1 << 1)); // set bit 1 to 0 728 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data)) 729 return; 730 731 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 732 return; 733 734 data[0] &= (~(1 << 1)); // set bit 1 to 0 735 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 736 return; 737 738 // Step 3 and 5 739 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 740 return; 741 742 data[0] |= (1 << 1); // set bit 1 to 1 743 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 744 return; 745 746 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 747 return; 748 749 data[0] |= (1 << 1); // set bit 1 to 1 750 751 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 752 return; 753 754 data[0] |= (1 << 1); // set bit 1 to 1 755 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 756 return; 757 758 // Step 6 759 if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) 760 return; 761 762 drm_dbg_dp(aux->drm_dev, "Done\n"); 763 } 764 765 /* MST Dock */ 766 static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; 767 768 static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( 769 struct drm_dp_aux *aux, 770 const struct dc_stream_state *stream, 771 bool enable) 772 { 773 uint8_t ret = 0; 774 775 drm_dbg_dp(aux->drm_dev, 776 "MST_DSC Configure DSC to non-virtual dpcd synaptics\n"); 777 778 if (enable) { 779 /* When DSC is enabled on previous boot and reboot with the hub, 780 * there is a chance that Synaptics hub gets stuck during reboot sequence. 781 * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream 782 */ 783 if (!stream->link->link_status.link_active && 784 memcmp(stream->link->dpcd_caps.branch_dev_name, 785 (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0) 786 apply_synaptics_fifo_reset_wa(aux); 787 788 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 789 DRM_INFO("MST_DSC Send DSC enable to synaptics\n"); 790 791 } else { 792 /* Synaptics hub not support virtual dpcd, 793 * external monitor occur garbage while disable DSC, 794 * Disable DSC only when entire link status turn to false, 795 */ 796 if (!stream->link->link_status.link_active) { 797 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 798 DRM_INFO("MST_DSC Send DSC disable to synaptics\n"); 799 } 800 } 801 802 return ret; 803 } 804 805 bool dm_helpers_dp_write_dsc_enable( 806 struct dc_context *ctx, 807 const struct dc_stream_state *stream, 808 bool enable) 809 { 810 static const uint8_t DSC_DISABLE; 811 static const uint8_t DSC_DECODING = 0x01; 812 static const uint8_t DSC_PASSTHROUGH = 0x02; 813 814 struct amdgpu_dm_connector *aconnector = 815 (struct amdgpu_dm_connector *)stream->dm_stream_context; 816 struct drm_device *dev = aconnector->base.dev; 817 struct drm_dp_mst_port *port; 818 uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE; 819 uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE; 820 uint8_t ret = 0; 821 822 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 823 if (!aconnector->dsc_aux) 824 return false; 825 826 // apply w/a to synaptics 827 if (needs_dsc_aux_workaround(aconnector->dc_link) && 828 (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3) 829 return write_dsc_enable_synaptics_non_virtual_dpcd_mst( 830 aconnector->dsc_aux, stream, enable_dsc); 831 832 port = aconnector->mst_output_port; 833 834 if (enable) { 835 if (port->passthrough_aux) { 836 ret = drm_dp_dpcd_write(port->passthrough_aux, 837 DP_DSC_ENABLE, 838 &enable_passthrough, 1); 839 drm_dbg_dp(dev, 840 "MST_DSC Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", 841 ret); 842 } 843 844 ret = drm_dp_dpcd_write(aconnector->dsc_aux, 845 DP_DSC_ENABLE, &enable_dsc, 1); 846 drm_dbg_dp(dev, 847 "MST_DSC Sent DSC decoding enable to %s port, ret = %u\n", 848 (port->passthrough_aux) ? "remote RX" : 849 "virtual dpcd", 850 ret); 851 } else { 852 ret = drm_dp_dpcd_write(aconnector->dsc_aux, 853 DP_DSC_ENABLE, &enable_dsc, 1); 854 drm_dbg_dp(dev, 855 "MST_DSC Sent DSC decoding disable to %s port, ret = %u\n", 856 (port->passthrough_aux) ? "remote RX" : 857 "virtual dpcd", 858 ret); 859 860 if (port->passthrough_aux) { 861 ret = drm_dp_dpcd_write(port->passthrough_aux, 862 DP_DSC_ENABLE, 863 &enable_passthrough, 1); 864 drm_dbg_dp(dev, 865 "MST_DSC Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", 866 ret); 867 } 868 } 869 } 870 871 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { 872 if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 873 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 874 drm_dbg_dp(dev, 875 "SST_DSC Send DSC %s to SST RX\n", 876 enable_dsc ? "enable" : "disable"); 877 } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 878 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 879 drm_dbg_dp(dev, 880 "SST_DSC Send DSC %s to DP-HDMI PCON\n", 881 enable_dsc ? "enable" : "disable"); 882 } 883 } 884 885 return ret; 886 } 887 888 bool dm_helpers_dp_write_hblank_reduction(struct dc_context *ctx, const struct dc_stream_state *stream) 889 { 890 // TODO 891 return false; 892 } 893 894 bool dm_helpers_is_dp_sink_present(struct dc_link *link) 895 { 896 bool dp_sink_present; 897 struct amdgpu_dm_connector *aconnector = link->priv; 898 899 if (!aconnector) { 900 BUG_ON("Failed to find connector for link!"); 901 return true; 902 } 903 904 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex); 905 dp_sink_present = dc_link_is_dp_sink_present(link); 906 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex); 907 return dp_sink_present; 908 } 909 910 static int 911 dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len) 912 { 913 struct drm_connector *connector = data; 914 struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev); 915 unsigned char start = block * EDID_LENGTH; 916 struct edid *edid; 917 int r; 918 919 if (!acpidev) 920 return -ENODEV; 921 922 /* fetch the entire edid from BIOS */ 923 r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, (void *)&edid); 924 if (r < 0) { 925 drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r); 926 return r; 927 } 928 if (len > r || start > r || start + len > r) { 929 r = -EINVAL; 930 goto cleanup; 931 } 932 933 /* sanity check */ 934 if (edid->revision < 4 || !(edid->input & DRM_EDID_INPUT_DIGITAL) || 935 (edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_UNDEF) { 936 r = -EINVAL; 937 goto cleanup; 938 } 939 940 memcpy(buf, (void *)edid + start, len); 941 r = 0; 942 943 cleanup: 944 kfree(edid); 945 946 return r; 947 } 948 949 static const struct drm_edid * 950 dm_helpers_read_acpi_edid(struct amdgpu_dm_connector *aconnector) 951 { 952 struct drm_connector *connector = &aconnector->base; 953 954 if (amdgpu_dc_debug_mask & DC_DISABLE_ACPI_EDID) 955 return NULL; 956 957 switch (connector->connector_type) { 958 case DRM_MODE_CONNECTOR_LVDS: 959 case DRM_MODE_CONNECTOR_eDP: 960 break; 961 default: 962 return NULL; 963 } 964 965 if (connector->force == DRM_FORCE_OFF) 966 return NULL; 967 968 return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector); 969 } 970 971 enum dc_edid_status dm_helpers_read_local_edid( 972 struct dc_context *ctx, 973 struct dc_link *link, 974 struct dc_sink *sink) 975 { 976 struct amdgpu_dm_connector *aconnector = link->priv; 977 struct drm_connector *connector = &aconnector->base; 978 struct i2c_adapter *ddc; 979 int retry = 3; 980 enum dc_edid_status edid_status; 981 const struct drm_edid *drm_edid; 982 const struct edid *edid; 983 984 if (link->aux_mode) 985 ddc = &aconnector->dm_dp_aux.aux.ddc; 986 else 987 ddc = &aconnector->i2c->base; 988 989 /* some dongles read edid incorrectly the first time, 990 * do check sum and retry to make sure read correct edid. 991 */ 992 do { 993 drm_edid = dm_helpers_read_acpi_edid(aconnector); 994 if (drm_edid) 995 drm_info(connector->dev, "Using ACPI provided EDID for %s\n", connector->name); 996 else 997 drm_edid = drm_edid_read_ddc(connector, ddc); 998 drm_edid_connector_update(connector, drm_edid); 999 1000 /* DP Compliance Test 4.2.2.6 */ 1001 if (link->aux_mode && connector->edid_corrupt) 1002 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); 1003 1004 if (!drm_edid && connector->edid_corrupt) { 1005 connector->edid_corrupt = false; 1006 return EDID_BAD_CHECKSUM; 1007 } 1008 1009 if (!drm_edid) 1010 return EDID_NO_RESPONSE; 1011 1012 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() 1013 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); 1014 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); 1015 1016 /* We don't need the original edid anymore */ 1017 drm_edid_free(drm_edid); 1018 1019 edid_status = dm_helpers_parse_edid_caps( 1020 link, 1021 &sink->dc_edid, 1022 &sink->edid_caps); 1023 1024 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); 1025 1026 if (edid_status != EDID_OK) 1027 DRM_ERROR("EDID err: %d, on connector: %s", 1028 edid_status, 1029 aconnector->base.name); 1030 if (link->aux_mode) { 1031 union test_request test_request = {0}; 1032 union test_response test_response = {0}; 1033 1034 dm_helpers_dp_read_dpcd(ctx, 1035 link, 1036 DP_TEST_REQUEST, 1037 &test_request.raw, 1038 sizeof(union test_request)); 1039 1040 if (!test_request.bits.EDID_READ) 1041 return edid_status; 1042 1043 test_response.bits.EDID_CHECKSUM_WRITE = 1; 1044 1045 dm_helpers_dp_write_dpcd(ctx, 1046 link, 1047 DP_TEST_EDID_CHECKSUM, 1048 &sink->dc_edid.raw_edid[sink->dc_edid.length-1], 1049 1); 1050 1051 dm_helpers_dp_write_dpcd(ctx, 1052 link, 1053 DP_TEST_RESPONSE, 1054 &test_response.raw, 1055 sizeof(test_response)); 1056 1057 } 1058 1059 return edid_status; 1060 } 1061 int dm_helper_dmub_aux_transfer_sync( 1062 struct dc_context *ctx, 1063 const struct dc_link *link, 1064 struct aux_payload *payload, 1065 enum aux_return_code_type *operation_result) 1066 { 1067 if (!link->hpd_status) { 1068 *operation_result = AUX_RET_ERROR_HPD_DISCON; 1069 return -1; 1070 } 1071 1072 return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload, 1073 operation_result); 1074 } 1075 1076 int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, 1077 const struct dc_link *link, 1078 struct set_config_cmd_payload *payload, 1079 enum set_config_status *operation_result) 1080 { 1081 return amdgpu_dm_process_dmub_set_config_sync(ctx, link->link_index, payload, 1082 operation_result); 1083 } 1084 1085 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) 1086 { 1087 /* TODO: something */ 1088 } 1089 1090 void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us) 1091 { 1092 // TODO: 1093 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL); 1094 } 1095 1096 void dm_helpers_init_panel_settings( 1097 struct dc_context *ctx, 1098 struct dc_panel_config *panel_config, 1099 struct dc_sink *sink) 1100 { 1101 // Extra Panel Power Sequence 1102 panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms; 1103 panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms; 1104 panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off; 1105 panel_config->pps.extra_post_t7_ms = 0; 1106 panel_config->pps.extra_pre_t11_ms = 0; 1107 panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms; 1108 panel_config->pps.extra_post_OUI_ms = 0; 1109 // Feature DSC 1110 panel_config->dsc.disable_dsc_edp = false; 1111 panel_config->dsc.force_dsc_edp_policy = 0; 1112 } 1113 1114 void dm_helpers_override_panel_settings( 1115 struct dc_context *ctx, 1116 struct dc_panel_config *panel_config) 1117 { 1118 // Feature DSC 1119 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 1120 panel_config->dsc.disable_dsc_edp = true; 1121 } 1122 1123 void *dm_helpers_allocate_gpu_mem( 1124 struct dc_context *ctx, 1125 enum dc_gpu_mem_alloc_type type, 1126 size_t size, 1127 long long *addr) 1128 { 1129 struct amdgpu_device *adev = ctx->driver_context; 1130 1131 return dm_allocate_gpu_mem(adev, type, size, addr); 1132 } 1133 1134 void dm_helpers_free_gpu_mem( 1135 struct dc_context *ctx, 1136 enum dc_gpu_mem_alloc_type type, 1137 void *pvMem) 1138 { 1139 struct amdgpu_device *adev = ctx->driver_context; 1140 1141 dm_free_gpu_mem(adev, type, pvMem); 1142 } 1143 1144 bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) 1145 { 1146 enum dc_irq_source irq_source; 1147 bool ret; 1148 1149 irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; 1150 1151 ret = dc_interrupt_set(ctx->dc, irq_source, enable); 1152 1153 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n", 1154 enable ? "en" : "dis", ret); 1155 return ret; 1156 } 1157 1158 void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) 1159 { 1160 /* TODO: virtual DPCD */ 1161 struct dc_link *link = stream->link; 1162 union down_spread_ctrl old_downspread; 1163 union down_spread_ctrl new_downspread; 1164 1165 if (link->aux_access_disabled) 1166 return; 1167 1168 if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 1169 &old_downspread.raw, 1170 sizeof(old_downspread))) 1171 return; 1172 1173 new_downspread.raw = old_downspread.raw; 1174 new_downspread.bits.IGNORE_MSA_TIMING_PARAM = 1175 (stream->ignore_msa_timing_param) ? 1 : 0; 1176 1177 if (new_downspread.raw != old_downspread.raw) 1178 dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 1179 &new_downspread.raw, 1180 sizeof(new_downspread)); 1181 } 1182 1183 bool dm_helpers_dp_handle_test_pattern_request( 1184 struct dc_context *ctx, 1185 const struct dc_link *link, 1186 union link_test_pattern dpcd_test_pattern, 1187 union test_misc dpcd_test_params) 1188 { 1189 enum dp_test_pattern test_pattern; 1190 enum dp_test_pattern_color_space test_pattern_color_space = 1191 DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; 1192 enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED; 1193 enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED; 1194 struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; 1195 struct pipe_ctx *pipe_ctx = NULL; 1196 struct amdgpu_dm_connector *aconnector = link->priv; 1197 struct drm_device *dev = aconnector->base.dev; 1198 struct dc_state *dc_state = ctx->dc->current_state; 1199 struct clk_mgr *clk_mgr = ctx->dc->clk_mgr; 1200 int i; 1201 1202 for (i = 0; i < MAX_PIPES; i++) { 1203 if (pipes[i].stream == NULL) 1204 continue; 1205 1206 if (pipes[i].stream->link == link && !pipes[i].top_pipe && 1207 !pipes[i].prev_odm_pipe) { 1208 pipe_ctx = &pipes[i]; 1209 break; 1210 } 1211 } 1212 1213 if (pipe_ctx == NULL) 1214 return false; 1215 1216 switch (dpcd_test_pattern.bits.PATTERN) { 1217 case LINK_TEST_PATTERN_COLOR_RAMP: 1218 test_pattern = DP_TEST_PATTERN_COLOR_RAMP; 1219 break; 1220 case LINK_TEST_PATTERN_VERTICAL_BARS: 1221 test_pattern = DP_TEST_PATTERN_VERTICAL_BARS; 1222 break; /* black and white */ 1223 case LINK_TEST_PATTERN_COLOR_SQUARES: 1224 test_pattern = (dpcd_test_params.bits.DYN_RANGE == 1225 TEST_DYN_RANGE_VESA ? 1226 DP_TEST_PATTERN_COLOR_SQUARES : 1227 DP_TEST_PATTERN_COLOR_SQUARES_CEA); 1228 break; 1229 default: 1230 test_pattern = DP_TEST_PATTERN_VIDEO_MODE; 1231 break; 1232 } 1233 1234 if (dpcd_test_params.bits.CLR_FORMAT == 0) 1235 test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB; 1236 else 1237 test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? 1238 DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : 1239 DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; 1240 1241 switch (dpcd_test_params.bits.BPC) { 1242 case 0: // 6 bits 1243 requestColorDepth = COLOR_DEPTH_666; 1244 break; 1245 case 1: // 8 bits 1246 requestColorDepth = COLOR_DEPTH_888; 1247 break; 1248 case 2: // 10 bits 1249 requestColorDepth = COLOR_DEPTH_101010; 1250 break; 1251 case 3: // 12 bits 1252 requestColorDepth = COLOR_DEPTH_121212; 1253 break; 1254 default: 1255 break; 1256 } 1257 1258 switch (dpcd_test_params.bits.CLR_FORMAT) { 1259 case 0: 1260 requestPixelEncoding = PIXEL_ENCODING_RGB; 1261 break; 1262 case 1: 1263 requestPixelEncoding = PIXEL_ENCODING_YCBCR422; 1264 break; 1265 case 2: 1266 requestPixelEncoding = PIXEL_ENCODING_YCBCR444; 1267 break; 1268 default: 1269 requestPixelEncoding = PIXEL_ENCODING_RGB; 1270 break; 1271 } 1272 1273 if ((requestColorDepth != COLOR_DEPTH_UNDEFINED 1274 && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) 1275 || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED 1276 && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) { 1277 drm_dbg(dev, 1278 "original bpc %d pix encoding %d, changing to %d %d\n", 1279 pipe_ctx->stream->timing.display_color_depth, 1280 pipe_ctx->stream->timing.pixel_encoding, 1281 requestColorDepth, 1282 requestPixelEncoding); 1283 pipe_ctx->stream->timing.display_color_depth = requestColorDepth; 1284 pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding; 1285 1286 dc_link_update_dsc_config(pipe_ctx); 1287 1288 aconnector->timing_changed = true; 1289 /* store current timing */ 1290 if (aconnector->timing_requested) 1291 *aconnector->timing_requested = pipe_ctx->stream->timing; 1292 else 1293 drm_err(dev, "timing storage failed\n"); 1294 1295 } 1296 1297 pipe_ctx->stream->test_pattern.type = test_pattern; 1298 pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space; 1299 1300 /* Temp W/A for compliance test failure */ 1301 dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false; 1302 dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ? 1303 clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz; 1304 dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz; 1305 ctx->dc->clk_mgr->funcs->update_clocks( 1306 ctx->dc->clk_mgr, 1307 dc_state, 1308 false); 1309 1310 dc_link_dp_set_test_pattern( 1311 (struct dc_link *) link, 1312 test_pattern, 1313 test_pattern_color_space, 1314 NULL, 1315 NULL, 1316 0); 1317 1318 return false; 1319 } 1320 1321 void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) 1322 { 1323 // TODO 1324 } 1325 1326 void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) 1327 { 1328 struct amdgpu_device *adev = ctx->driver_context; 1329 1330 if (adev->dm.idle_workqueue) { 1331 adev->dm.idle_workqueue->enable = enable; 1332 if (enable && !adev->dm.idle_workqueue->running && amdgpu_dm_is_headless(adev)) 1333 schedule_work(&adev->dm.idle_workqueue->work); 1334 } 1335 } 1336 1337 void dm_helpers_dp_mst_update_branch_bandwidth( 1338 struct dc_context *ctx, 1339 struct dc_link *link) 1340 { 1341 // TODO 1342 } 1343 1344 static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id) 1345 { 1346 bool ret_val = false; 1347 1348 switch (branch_dev_id) { 1349 case DP_BRANCH_DEVICE_ID_0060AD: 1350 case DP_BRANCH_DEVICE_ID_00E04C: 1351 case DP_BRANCH_DEVICE_ID_90CC24: 1352 ret_val = true; 1353 break; 1354 default: 1355 break; 1356 } 1357 1358 return ret_val; 1359 } 1360 1361 enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link) 1362 { 1363 struct dpcd_caps *dpcd_caps = &link->dpcd_caps; 1364 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; 1365 1366 switch (dpcd_caps->dongle_type) { 1367 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 1368 if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true && 1369 dpcd_caps->allow_invalid_MSA_timing_param == true && 1370 dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id)) 1371 as_type = FREESYNC_TYPE_PCON_IN_WHITELIST; 1372 break; 1373 default: 1374 break; 1375 } 1376 1377 return as_type; 1378 } 1379 1380 bool dm_helpers_is_fullscreen(struct dc_context *ctx, struct dc_stream_state *stream) 1381 { 1382 // TODO 1383 return false; 1384 } 1385 1386 bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream) 1387 { 1388 // TODO 1389 return false; 1390 } 1391