1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "dm_services_types.h" 27 #include "dc.h" 28 29 #include "amdgpu.h" 30 #include "amdgpu_dm.h" 31 #include "amdgpu_dm_irq.h" 32 33 /** 34 * DOC: overview 35 * 36 * DM provides another layer of IRQ management on top of what the base driver 37 * already provides. This is something that could be cleaned up, and is a 38 * future TODO item. 39 * 40 * The base driver provides IRQ source registration with DRM, handler 41 * registration into the base driver's IRQ table, and a handler callback 42 * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic 43 * handler looks up the IRQ table, and calls the respective 44 * &amdgpu_irq_src_funcs.process hookups. 45 * 46 * What DM provides on top are two IRQ tables specifically for top-half and 47 * bottom-half IRQ handling, with the bottom-half implementing workqueues: 48 * 49 * - &amdgpu_display_manager.irq_handler_list_high_tab 50 * - &amdgpu_display_manager.irq_handler_list_low_tab 51 * 52 * They override the base driver's IRQ table, and the effect can be seen 53 * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They 54 * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up 55 * DM's IRQ tables. However, in order for base driver to recognize this hook, DM 56 * still needs to register the IRQ with the base driver. See 57 * dce110_register_irq_handlers() and dcn10_register_irq_handlers(). 58 * 59 * To expose DC's hardware interrupt toggle to the base driver, DM implements 60 * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through 61 * amdgpu_irq_update() to enable or disable the interrupt. 62 */ 63 64 /****************************************************************************** 65 * Private declarations. 66 *****************************************************************************/ 67 68 /** 69 * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers. 70 * 71 * @list: Linked list entry referencing the next/previous handler 72 * @handler: Handler function 73 * @handler_arg: Argument passed to the handler when triggered 74 * @dm: DM which this handler belongs to 75 * @irq_source: DC interrupt source that this handler is registered for 76 * @work: work struct 77 */ 78 struct amdgpu_dm_irq_handler_data { 79 struct list_head list; 80 interrupt_handler handler; 81 void *handler_arg; 82 83 struct amdgpu_display_manager *dm; 84 /* DAL irq source which registered for this interrupt. */ 85 enum dc_irq_source irq_source; 86 struct work_struct work; 87 }; 88 89 #define DM_IRQ_TABLE_LOCK(adev, flags) \ 90 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags) 91 92 #define DM_IRQ_TABLE_UNLOCK(adev, flags) \ 93 spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags) 94 95 /****************************************************************************** 96 * Private functions. 97 *****************************************************************************/ 98 99 static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, 100 void (*ih)(void *), 101 void *args, 102 struct amdgpu_display_manager *dm) 103 { 104 hcd->handler = ih; 105 hcd->handler_arg = args; 106 hcd->dm = dm; 107 } 108 109 /** 110 * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper. 111 * 112 * @work: work struct 113 */ 114 static void dm_irq_work_func(struct work_struct *work) 115 { 116 struct amdgpu_dm_irq_handler_data *handler_data = 117 container_of(work, struct amdgpu_dm_irq_handler_data, work); 118 119 handler_data->handler(handler_data->handler_arg); 120 121 /* Call a DAL subcomponent which registered for interrupt notification 122 * at INTERRUPT_LOW_IRQ_CONTEXT. 123 * (The most common use is HPD interrupt) 124 */ 125 } 126 127 /* 128 * Remove a handler and return a pointer to handler list from which the 129 * handler was removed. 130 */ 131 static struct list_head *remove_irq_handler(struct amdgpu_device *adev, 132 void *ih, 133 const struct dc_interrupt_params *int_params) 134 { 135 struct list_head *hnd_list; 136 struct list_head *entry, *tmp; 137 struct amdgpu_dm_irq_handler_data *handler; 138 unsigned long irq_table_flags; 139 bool handler_removed = false; 140 enum dc_irq_source irq_source; 141 142 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 143 144 irq_source = int_params->irq_source; 145 146 switch (int_params->int_context) { 147 case INTERRUPT_HIGH_IRQ_CONTEXT: 148 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; 149 break; 150 case INTERRUPT_LOW_IRQ_CONTEXT: 151 default: 152 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; 153 break; 154 } 155 156 list_for_each_safe(entry, tmp, hnd_list) { 157 158 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, 159 list); 160 161 if (handler == NULL) 162 continue; 163 164 if (ih == handler->handler) { 165 /* Found our handler. Remove it from the list. */ 166 list_del(&handler->list); 167 handler_removed = true; 168 break; 169 } 170 } 171 172 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 173 174 if (handler_removed == false) { 175 /* Not necessarily an error - caller may not 176 * know the context. 177 */ 178 return NULL; 179 } 180 181 kfree(handler); 182 183 DRM_DEBUG_KMS( 184 "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n", 185 ih, int_params->irq_source, int_params->int_context); 186 187 return hnd_list; 188 } 189 190 /** 191 * unregister_all_irq_handlers() - Cleans up handlers from the DM IRQ table 192 * @adev: The base driver device containing the DM device 193 * 194 * Go through low and high context IRQ tables and deallocate handlers. 195 */ 196 static void unregister_all_irq_handlers(struct amdgpu_device *adev) 197 { 198 struct list_head *hnd_list_low; 199 struct list_head *hnd_list_high; 200 struct list_head *entry, *tmp; 201 struct amdgpu_dm_irq_handler_data *handler; 202 unsigned long irq_table_flags; 203 int i; 204 205 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 206 207 for (i = 0; i < DAL_IRQ_SOURCES_NUMBER; i++) { 208 hnd_list_low = &adev->dm.irq_handler_list_low_tab[i]; 209 hnd_list_high = &adev->dm.irq_handler_list_high_tab[i]; 210 211 list_for_each_safe(entry, tmp, hnd_list_low) { 212 213 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, 214 list); 215 216 if (handler == NULL || handler->handler == NULL) 217 continue; 218 219 list_del(&handler->list); 220 kfree(handler); 221 } 222 223 list_for_each_safe(entry, tmp, hnd_list_high) { 224 225 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, 226 list); 227 228 if (handler == NULL || handler->handler == NULL) 229 continue; 230 231 list_del(&handler->list); 232 kfree(handler); 233 } 234 } 235 236 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 237 } 238 239 static bool 240 validate_irq_registration_params(struct dc_interrupt_params *int_params, 241 void (*ih)(void *)) 242 { 243 if (NULL == int_params || NULL == ih) { 244 DRM_ERROR("DM_IRQ: invalid input!\n"); 245 return false; 246 } 247 248 if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) { 249 DRM_ERROR("DM_IRQ: invalid context: %d!\n", 250 int_params->int_context); 251 return false; 252 } 253 254 if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) { 255 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n", 256 int_params->irq_source); 257 return false; 258 } 259 260 return true; 261 } 262 263 static bool validate_irq_unregistration_params(enum dc_irq_source irq_source, 264 irq_handler_idx handler_idx) 265 { 266 if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) { 267 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n"); 268 return false; 269 } 270 271 if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) { 272 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source); 273 return false; 274 } 275 276 return true; 277 } 278 /****************************************************************************** 279 * Public functions. 280 * 281 * Note: caller is responsible for input validation. 282 *****************************************************************************/ 283 284 /** 285 * amdgpu_dm_irq_register_interrupt() - Register a handler within DM. 286 * @adev: The base driver device containing the DM device. 287 * @int_params: Interrupt parameters containing the source, and handler context 288 * @ih: Function pointer to the interrupt handler to register 289 * @handler_args: Arguments passed to the handler when the interrupt occurs 290 * 291 * Register an interrupt handler for the given IRQ source, under the given 292 * context. The context can either be high or low. High context handlers are 293 * executed directly within ISR context, while low context is executed within a 294 * workqueue, thereby allowing operations that sleep. 295 * 296 * Registered handlers are called in a FIFO manner, i.e. the most recently 297 * registered handler will be called first. 298 * 299 * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ 300 * source, handler function, and args 301 */ 302 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, 303 struct dc_interrupt_params *int_params, 304 void (*ih)(void *), 305 void *handler_args) 306 { 307 struct list_head *hnd_list; 308 struct amdgpu_dm_irq_handler_data *handler_data; 309 unsigned long irq_table_flags; 310 enum dc_irq_source irq_source; 311 312 if (false == validate_irq_registration_params(int_params, ih)) 313 return DAL_INVALID_IRQ_HANDLER_IDX; 314 315 handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL); 316 if (!handler_data) { 317 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); 318 return DAL_INVALID_IRQ_HANDLER_IDX; 319 } 320 321 init_handler_common_data(handler_data, ih, handler_args, &adev->dm); 322 323 irq_source = int_params->irq_source; 324 325 handler_data->irq_source = irq_source; 326 327 /* Lock the list, add the handler. */ 328 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 329 330 switch (int_params->int_context) { 331 case INTERRUPT_HIGH_IRQ_CONTEXT: 332 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; 333 break; 334 case INTERRUPT_LOW_IRQ_CONTEXT: 335 default: 336 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; 337 INIT_WORK(&handler_data->work, dm_irq_work_func); 338 break; 339 } 340 341 list_add_tail(&handler_data->list, hnd_list); 342 343 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 344 345 /* This pointer will be stored by code which requested interrupt 346 * registration. 347 * The same pointer will be needed in order to unregister the 348 * interrupt. 349 */ 350 351 DRM_DEBUG_KMS( 352 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n", 353 handler_data, 354 irq_source, 355 int_params->int_context); 356 357 return handler_data; 358 } 359 360 /** 361 * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table 362 * @adev: The base driver device containing the DM device 363 * @irq_source: IRQ source to remove the given handler from 364 * @ih: Function pointer to the interrupt handler to unregister 365 * 366 * Go through both low and high context IRQ tables, and find the given handler 367 * for the given irq source. If found, remove it. Otherwise, do nothing. 368 */ 369 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, 370 enum dc_irq_source irq_source, 371 void *ih) 372 { 373 struct list_head *handler_list; 374 struct dc_interrupt_params int_params; 375 int i; 376 377 if (false == validate_irq_unregistration_params(irq_source, ih)) 378 return; 379 380 memset(&int_params, 0, sizeof(int_params)); 381 382 int_params.irq_source = irq_source; 383 384 for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) { 385 386 int_params.int_context = i; 387 388 handler_list = remove_irq_handler(adev, ih, &int_params); 389 390 if (handler_list != NULL) 391 break; 392 } 393 394 if (handler_list == NULL) { 395 /* If we got here, it means we searched all irq contexts 396 * for this irq source, but the handler was not found. 397 */ 398 DRM_ERROR( 399 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n", 400 ih, irq_source); 401 } 402 } 403 404 /** 405 * amdgpu_dm_irq_init() - Initialize DM IRQ management 406 * @adev: The base driver device containing the DM device 407 * 408 * Initialize DM's high and low context IRQ tables. 409 * 410 * The N by M table contains N IRQ sources, with M 411 * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The 412 * list_heads are initialized here. When an interrupt n is triggered, all m 413 * handlers are called in sequence, FIFO according to registration order. 414 * 415 * The low context table requires special steps to initialize, since handlers 416 * will be deferred to a workqueue. See &struct irq_list_head. 417 */ 418 int amdgpu_dm_irq_init(struct amdgpu_device *adev) 419 { 420 int src; 421 struct list_head *lh; 422 423 DRM_DEBUG_KMS("DM_IRQ\n"); 424 425 spin_lock_init(&adev->dm.irq_handler_list_table_lock); 426 427 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { 428 /* low context handler list init */ 429 lh = &adev->dm.irq_handler_list_low_tab[src]; 430 INIT_LIST_HEAD(lh); 431 /* high context handler init */ 432 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]); 433 } 434 435 return 0; 436 } 437 438 /** 439 * amdgpu_dm_irq_fini() - Tear down DM IRQ management 440 * @adev: The base driver device containing the DM device 441 * 442 * Flush all work within the low context IRQ table. 443 */ 444 void amdgpu_dm_irq_fini(struct amdgpu_device *adev) 445 { 446 int src; 447 struct list_head *lh; 448 struct list_head *entry, *tmp; 449 struct amdgpu_dm_irq_handler_data *handler; 450 unsigned long irq_table_flags; 451 452 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); 453 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { 454 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 455 /* The handler was removed from the table, 456 * it means it is safe to flush all the 'work' 457 * (because no code can schedule a new one). 458 */ 459 lh = &adev->dm.irq_handler_list_low_tab[src]; 460 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 461 462 if (!list_empty(lh)) { 463 list_for_each_safe(entry, tmp, lh) { 464 handler = list_entry( 465 entry, 466 struct amdgpu_dm_irq_handler_data, 467 list); 468 flush_work(&handler->work); 469 } 470 } 471 } 472 /* Deallocate handlers from the table. */ 473 unregister_all_irq_handlers(adev); 474 } 475 476 void amdgpu_dm_irq_suspend(struct amdgpu_device *adev) 477 { 478 int src; 479 struct list_head *hnd_list_h; 480 struct list_head *hnd_list_l; 481 unsigned long irq_table_flags; 482 struct list_head *entry, *tmp; 483 struct amdgpu_dm_irq_handler_data *handler; 484 485 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 486 487 DRM_DEBUG_KMS("DM_IRQ: suspend\n"); 488 489 /** 490 * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK 491 * will be disabled from manage_dm_interrupts on disable CRTC. 492 */ 493 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) { 494 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; 495 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; 496 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) 497 dc_interrupt_set(adev->dm.dc, src, false); 498 499 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 500 501 if (!list_empty(hnd_list_l)) { 502 list_for_each_safe(entry, tmp, hnd_list_l) { 503 handler = list_entry( 504 entry, 505 struct amdgpu_dm_irq_handler_data, 506 list); 507 flush_work(&handler->work); 508 } 509 } 510 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 511 } 512 513 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 514 } 515 516 void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) 517 { 518 int src; 519 struct list_head *hnd_list_h, *hnd_list_l; 520 unsigned long irq_table_flags; 521 522 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 523 524 drm_dbg(adev_to_drm(adev), "DM_IRQ: early resume\n"); 525 526 /* re-enable short pulse interrupts HW interrupt */ 527 for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) { 528 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; 529 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; 530 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) 531 dc_interrupt_set(adev->dm.dc, src, true); 532 } 533 534 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 535 } 536 537 void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) 538 { 539 int src; 540 struct list_head *hnd_list_h, *hnd_list_l; 541 unsigned long irq_table_flags; 542 543 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 544 545 drm_dbg(adev_to_drm(adev), "DM_IRQ: resume\n"); 546 547 /** 548 * Renable HW interrupt for HPD and only since FLIP and VBLANK 549 * will be enabled from manage_dm_interrupts on enable CRTC. 550 */ 551 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) { 552 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; 553 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; 554 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) 555 dc_interrupt_set(adev->dm.dc, src, true); 556 } 557 558 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 559 } 560 561 /* 562 * amdgpu_dm_irq_schedule_work - schedule all work items registered for the 563 * "irq_source". 564 */ 565 static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, 566 enum dc_irq_source irq_source) 567 { 568 struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source]; 569 struct amdgpu_dm_irq_handler_data *handler_data; 570 bool work_queued = false; 571 572 if (list_empty(handler_list)) 573 return; 574 575 list_for_each_entry(handler_data, handler_list, list) { 576 if (queue_work(system_highpri_wq, &handler_data->work)) { 577 work_queued = true; 578 break; 579 } 580 } 581 582 if (!work_queued) { 583 struct amdgpu_dm_irq_handler_data *handler_data_add; 584 /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/ 585 handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list); 586 587 /*allocate a new amdgpu_dm_irq_handler_data*/ 588 handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC); 589 if (!handler_data_add) { 590 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); 591 return; 592 } 593 594 /*copy new amdgpu_dm_irq_handler_data members from handler_data*/ 595 handler_data_add->handler = handler_data->handler; 596 handler_data_add->handler_arg = handler_data->handler_arg; 597 handler_data_add->dm = handler_data->dm; 598 handler_data_add->irq_source = irq_source; 599 600 list_add_tail(&handler_data_add->list, handler_list); 601 602 INIT_WORK(&handler_data_add->work, dm_irq_work_func); 603 604 if (queue_work(system_highpri_wq, &handler_data_add->work)) 605 DRM_DEBUG("Queued work for handling interrupt from " 606 "display for IRQ source %d\n", 607 irq_source); 608 else 609 DRM_ERROR("Failed to queue work for handling interrupt " 610 "from display for IRQ source %d\n", 611 irq_source); 612 } 613 } 614 615 /* 616 * amdgpu_dm_irq_immediate_work 617 * Callback high irq work immediately, don't send to work queue 618 */ 619 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, 620 enum dc_irq_source irq_source) 621 { 622 struct amdgpu_dm_irq_handler_data *handler_data; 623 unsigned long irq_table_flags; 624 625 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 626 627 list_for_each_entry(handler_data, 628 &adev->dm.irq_handler_list_high_tab[irq_source], 629 list) { 630 /* Call a subcomponent which registered for immediate 631 * interrupt notification 632 */ 633 handler_data->handler(handler_data->handler_arg); 634 } 635 636 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 637 } 638 639 /** 640 * amdgpu_dm_irq_handler - Generic DM IRQ handler 641 * @adev: amdgpu base driver device containing the DM device 642 * @source: Unused 643 * @entry: Data about the triggered interrupt 644 * 645 * Calls all registered high irq work immediately, and schedules work for low 646 * irq. The DM IRQ table is used to find the corresponding handlers. 647 */ 648 static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, 649 struct amdgpu_irq_src *source, 650 struct amdgpu_iv_entry *entry) 651 { 652 653 enum dc_irq_source src = 654 dc_interrupt_to_irq_source( 655 adev->dm.dc, 656 entry->src_id, 657 entry->src_data[0]); 658 659 dc_interrupt_ack(adev->dm.dc, src); 660 661 /* Call high irq work immediately */ 662 amdgpu_dm_irq_immediate_work(adev, src); 663 /*Schedule low_irq work */ 664 amdgpu_dm_irq_schedule_work(adev, src); 665 666 return 0; 667 } 668 669 static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type) 670 { 671 switch (type) { 672 case AMDGPU_HPD_1: 673 return DC_IRQ_SOURCE_HPD1; 674 case AMDGPU_HPD_2: 675 return DC_IRQ_SOURCE_HPD2; 676 case AMDGPU_HPD_3: 677 return DC_IRQ_SOURCE_HPD3; 678 case AMDGPU_HPD_4: 679 return DC_IRQ_SOURCE_HPD4; 680 case AMDGPU_HPD_5: 681 return DC_IRQ_SOURCE_HPD5; 682 case AMDGPU_HPD_6: 683 return DC_IRQ_SOURCE_HPD6; 684 default: 685 return DC_IRQ_SOURCE_INVALID; 686 } 687 } 688 689 static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, 690 struct amdgpu_irq_src *source, 691 unsigned int type, 692 enum amdgpu_interrupt_state state) 693 { 694 enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type); 695 bool st = (state == AMDGPU_IRQ_STATE_ENABLE); 696 697 dc_interrupt_set(adev->dm.dc, src, st); 698 return 0; 699 } 700 701 static inline int dm_irq_state(struct amdgpu_device *adev, 702 struct amdgpu_irq_src *source, 703 unsigned int crtc_id, 704 enum amdgpu_interrupt_state state, 705 const enum irq_type dal_irq_type, 706 const char *func) 707 { 708 bool st; 709 enum dc_irq_source irq_source; 710 struct dc *dc = adev->dm.dc; 711 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id]; 712 713 if (!acrtc) { 714 DRM_ERROR( 715 "%s: crtc is NULL at id :%d\n", 716 func, 717 crtc_id); 718 return 0; 719 } 720 721 if (acrtc->otg_inst == -1) 722 return 0; 723 724 irq_source = dal_irq_type + acrtc->otg_inst; 725 726 st = (state == AMDGPU_IRQ_STATE_ENABLE); 727 728 if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) 729 dc_allow_idle_optimizations(dc, false); 730 731 dc_interrupt_set(adev->dm.dc, irq_source, st); 732 return 0; 733 } 734 735 static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, 736 struct amdgpu_irq_src *source, 737 unsigned int crtc_id, 738 enum amdgpu_interrupt_state state) 739 { 740 return dm_irq_state( 741 adev, 742 source, 743 crtc_id, 744 state, 745 IRQ_TYPE_PFLIP, 746 __func__); 747 } 748 749 static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, 750 struct amdgpu_irq_src *source, 751 unsigned int crtc_id, 752 enum amdgpu_interrupt_state state) 753 { 754 return dm_irq_state( 755 adev, 756 source, 757 crtc_id, 758 state, 759 IRQ_TYPE_VBLANK, 760 __func__); 761 } 762 763 static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev, 764 struct amdgpu_irq_src *source, 765 unsigned int crtc_id, 766 enum amdgpu_interrupt_state state) 767 { 768 return dm_irq_state( 769 adev, 770 source, 771 crtc_id, 772 state, 773 IRQ_TYPE_VLINE0, 774 __func__); 775 } 776 777 static int amdgpu_dm_set_dmub_outbox_irq_state(struct amdgpu_device *adev, 778 struct amdgpu_irq_src *source, 779 unsigned int crtc_id, 780 enum amdgpu_interrupt_state state) 781 { 782 enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; 783 bool st = (state == AMDGPU_IRQ_STATE_ENABLE); 784 785 dc_interrupt_set(adev->dm.dc, irq_source, st); 786 return 0; 787 } 788 789 static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev, 790 struct amdgpu_irq_src *source, 791 unsigned int crtc_id, 792 enum amdgpu_interrupt_state state) 793 { 794 return dm_irq_state( 795 adev, 796 source, 797 crtc_id, 798 state, 799 IRQ_TYPE_VUPDATE, 800 __func__); 801 } 802 803 static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev, 804 struct amdgpu_irq_src *source, 805 unsigned int type, 806 enum amdgpu_interrupt_state state) 807 { 808 enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0; 809 bool st = (state == AMDGPU_IRQ_STATE_ENABLE); 810 811 dc_interrupt_set(adev->dm.dc, irq_source, st); 812 return 0; 813 } 814 815 static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = { 816 .set = amdgpu_dm_set_crtc_irq_state, 817 .process = amdgpu_dm_irq_handler, 818 }; 819 820 static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = { 821 .set = amdgpu_dm_set_vline0_irq_state, 822 .process = amdgpu_dm_irq_handler, 823 }; 824 825 static const struct amdgpu_irq_src_funcs dm_dmub_outbox_irq_funcs = { 826 .set = amdgpu_dm_set_dmub_outbox_irq_state, 827 .process = amdgpu_dm_irq_handler, 828 }; 829 830 static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = { 831 .set = amdgpu_dm_set_vupdate_irq_state, 832 .process = amdgpu_dm_irq_handler, 833 }; 834 835 static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = { 836 .set = amdgpu_dm_set_dmub_trace_irq_state, 837 .process = amdgpu_dm_irq_handler, 838 }; 839 840 static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = { 841 .set = amdgpu_dm_set_pflip_irq_state, 842 .process = amdgpu_dm_irq_handler, 843 }; 844 845 static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = { 846 .set = amdgpu_dm_set_hpd_irq_state, 847 .process = amdgpu_dm_irq_handler, 848 }; 849 850 void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) 851 { 852 adev->crtc_irq.num_types = adev->mode_info.num_crtc; 853 adev->crtc_irq.funcs = &dm_crtc_irq_funcs; 854 855 adev->vline0_irq.num_types = adev->mode_info.num_crtc; 856 adev->vline0_irq.funcs = &dm_vline0_irq_funcs; 857 858 adev->dmub_outbox_irq.num_types = 1; 859 adev->dmub_outbox_irq.funcs = &dm_dmub_outbox_irq_funcs; 860 861 adev->vupdate_irq.num_types = adev->mode_info.num_crtc; 862 adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs; 863 864 adev->dmub_trace_irq.num_types = 1; 865 adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs; 866 867 adev->pageflip_irq.num_types = adev->mode_info.num_crtc; 868 adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs; 869 870 adev->hpd_irq.num_types = adev->mode_info.num_hpd; 871 adev->hpd_irq.funcs = &dm_hpd_irq_funcs; 872 } 873 void amdgpu_dm_outbox_init(struct amdgpu_device *adev) 874 { 875 dc_interrupt_set(adev->dm.dc, 876 DC_IRQ_SOURCE_DMCUB_OUTBOX, 877 true); 878 } 879 880 /** 881 * amdgpu_dm_hpd_init - hpd setup callback. 882 * 883 * @adev: amdgpu_device pointer 884 * 885 * Setup the hpd pins used by the card (evergreen+). 886 * Enable the pin, set the polarity, and enable the hpd interrupts. 887 */ 888 void amdgpu_dm_hpd_init(struct amdgpu_device *adev) 889 { 890 struct drm_device *dev = adev_to_drm(adev); 891 struct drm_connector *connector; 892 struct drm_connector_list_iter iter; 893 int irq_type; 894 int i; 895 896 /* First, clear all hpd and hpdrx interrupts */ 897 for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) { 898 if (!dc_interrupt_set(adev->dm.dc, i, false)) 899 drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n", 900 i); 901 } 902 903 drm_connector_list_iter_begin(dev, &iter); 904 drm_for_each_connector_iter(connector, &iter) { 905 struct amdgpu_dm_connector *amdgpu_dm_connector; 906 const struct dc_link *dc_link; 907 908 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 909 continue; 910 911 amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 912 913 dc_link = amdgpu_dm_connector->dc_link; 914 915 /* 916 * Get a base driver irq reference for hpd ints for the lifetime 917 * of dm. Note that only hpd interrupt types are registered with 918 * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on 919 * hpd_rx isn't available. DM currently controls hpd_rx 920 * explicitly with dc_interrupt_set() 921 */ 922 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 923 irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1; 924 /* 925 * TODO: There's a mismatch between mode_info.num_hpd 926 * and what bios reports as the # of connectors with hpd 927 * sources. Since the # of hpd source types registered 928 * with base driver == mode_info.num_hpd, we have to 929 * fallback to dc_interrupt_set for the remaining types. 930 */ 931 if (irq_type < adev->mode_info.num_hpd) { 932 if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type)) 933 drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", 934 dc_link->irq_source_hpd); 935 } else { 936 dc_interrupt_set(adev->dm.dc, 937 dc_link->irq_source_hpd, 938 true); 939 } 940 } 941 942 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { 943 dc_interrupt_set(adev->dm.dc, 944 dc_link->irq_source_hpd_rx, 945 true); 946 } 947 } 948 drm_connector_list_iter_end(&iter); 949 } 950 951 /** 952 * amdgpu_dm_hpd_fini - hpd tear down callback. 953 * 954 * @adev: amdgpu_device pointer 955 * 956 * Tear down the hpd pins used by the card (evergreen+). 957 * Disable the hpd interrupts. 958 */ 959 void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) 960 { 961 struct drm_device *dev = adev_to_drm(adev); 962 struct drm_connector *connector; 963 struct drm_connector_list_iter iter; 964 int irq_type; 965 966 drm_connector_list_iter_begin(dev, &iter); 967 drm_for_each_connector_iter(connector, &iter) { 968 struct amdgpu_dm_connector *amdgpu_dm_connector; 969 const struct dc_link *dc_link; 970 971 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 972 continue; 973 974 amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 975 dc_link = amdgpu_dm_connector->dc_link; 976 977 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 978 irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1; 979 980 /* TODO: See same TODO in amdgpu_dm_hpd_init() */ 981 if (irq_type < adev->mode_info.num_hpd) { 982 if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type)) 983 drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", 984 dc_link->irq_source_hpd); 985 } else { 986 dc_interrupt_set(adev->dm.dc, 987 dc_link->irq_source_hpd, 988 false); 989 } 990 } 991 992 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { 993 dc_interrupt_set(adev->dm.dc, 994 dc_link->irq_source_hpd_rx, 995 false); 996 } 997 } 998 drm_connector_list_iter_end(&iter); 999 } 1000