1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <drm/drm_vblank.h> 7 8 #include "i915_drv.h" 9 #include "i915_irq.h" 10 #include "i915_reg.h" 11 #include "icl_dsi_regs.h" 12 #include "intel_atomic_plane.h" 13 #include "intel_crtc.h" 14 #include "intel_de.h" 15 #include "intel_display_irq.h" 16 #include "intel_display_rpm.h" 17 #include "intel_display_rps.h" 18 #include "intel_display_trace.h" 19 #include "intel_display_types.h" 20 #include "intel_dmc_wl.h" 21 #include "intel_dp_aux.h" 22 #include "intel_dsb.h" 23 #include "intel_fdi_regs.h" 24 #include "intel_fifo_underrun.h" 25 #include "intel_gmbus.h" 26 #include "intel_hotplug_irq.h" 27 #include "intel_pipe_crc_regs.h" 28 #include "intel_pmdemand.h" 29 #include "intel_psr.h" 30 #include "intel_psr_regs.h" 31 #include "intel_uncore.h" 32 33 static void 34 intel_display_irq_regs_init(struct intel_display *display, struct i915_irq_regs regs, 35 u32 imr_val, u32 ier_val) 36 { 37 intel_dmc_wl_get(display, regs.imr); 38 intel_dmc_wl_get(display, regs.ier); 39 intel_dmc_wl_get(display, regs.iir); 40 41 gen2_irq_init(to_intel_uncore(display->drm), regs, imr_val, ier_val); 42 43 intel_dmc_wl_put(display, regs.iir); 44 intel_dmc_wl_put(display, regs.ier); 45 intel_dmc_wl_put(display, regs.imr); 46 } 47 48 static void 49 intel_display_irq_regs_reset(struct intel_display *display, struct i915_irq_regs regs) 50 { 51 intel_dmc_wl_get(display, regs.imr); 52 intel_dmc_wl_get(display, regs.ier); 53 intel_dmc_wl_get(display, regs.iir); 54 55 gen2_irq_reset(to_intel_uncore(display->drm), regs); 56 57 intel_dmc_wl_put(display, regs.iir); 58 intel_dmc_wl_put(display, regs.ier); 59 intel_dmc_wl_put(display, regs.imr); 60 } 61 62 static void 63 intel_display_irq_regs_assert_irr_is_zero(struct intel_display *display, i915_reg_t reg) 64 { 65 intel_dmc_wl_get(display, reg); 66 67 gen2_assert_iir_is_zero(to_intel_uncore(display->drm), reg); 68 69 intel_dmc_wl_put(display, reg); 70 } 71 72 struct pipe_fault_handler { 73 bool (*handle)(struct intel_crtc *crtc, enum plane_id plane_id); 74 u32 fault; 75 enum plane_id plane_id; 76 }; 77 78 static bool handle_plane_fault(struct intel_crtc *crtc, enum plane_id plane_id) 79 { 80 struct intel_display *display = to_intel_display(crtc); 81 struct intel_plane_error error = {}; 82 struct intel_plane *plane; 83 84 plane = intel_crtc_get_plane(crtc, plane_id); 85 if (!plane || !plane->capture_error) 86 return false; 87 88 plane->capture_error(crtc, plane, &error); 89 90 drm_err_ratelimited(display->drm, 91 "[CRTC:%d:%s][PLANE:%d:%s] fault (CTL=0x%x, SURF=0x%x, SURFLIVE=0x%x)\n", 92 crtc->base.base.id, crtc->base.name, 93 plane->base.base.id, plane->base.name, 94 error.ctl, error.surf, error.surflive); 95 96 return true; 97 } 98 99 static void intel_pipe_fault_irq_handler(struct intel_display *display, 100 const struct pipe_fault_handler *handlers, 101 enum pipe pipe, u32 fault_errors) 102 { 103 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 104 const struct pipe_fault_handler *handler; 105 106 for (handler = handlers; handler && handler->fault; handler++) { 107 if ((fault_errors & handler->fault) == 0) 108 continue; 109 110 if (handler->handle(crtc, handler->plane_id)) 111 fault_errors &= ~handler->fault; 112 } 113 114 WARN_ONCE(fault_errors, "[CRTC:%d:%s] unreported faults 0x%x\n", 115 crtc->base.base.id, crtc->base.name, fault_errors); 116 } 117 118 static void 119 intel_handle_vblank(struct intel_display *display, enum pipe pipe) 120 { 121 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 122 123 drm_crtc_handle_vblank(&crtc->base); 124 } 125 126 /** 127 * ilk_update_display_irq - update DEIMR 128 * @display: display device 129 * @interrupt_mask: mask of interrupt bits to update 130 * @enabled_irq_mask: mask of interrupt bits to enable 131 */ 132 void ilk_update_display_irq(struct intel_display *display, 133 u32 interrupt_mask, u32 enabled_irq_mask) 134 { 135 struct drm_i915_private *dev_priv = to_i915(display->drm); 136 u32 new_val; 137 138 lockdep_assert_held(&display->irq.lock); 139 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 140 141 new_val = dev_priv->irq_mask; 142 new_val &= ~interrupt_mask; 143 new_val |= (~enabled_irq_mask & interrupt_mask); 144 145 if (new_val != dev_priv->irq_mask && 146 !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) { 147 dev_priv->irq_mask = new_val; 148 intel_de_write(display, DEIMR, dev_priv->irq_mask); 149 intel_de_posting_read(display, DEIMR); 150 } 151 } 152 153 void ilk_enable_display_irq(struct intel_display *display, u32 bits) 154 { 155 ilk_update_display_irq(display, bits, bits); 156 } 157 158 void ilk_disable_display_irq(struct intel_display *display, u32 bits) 159 { 160 ilk_update_display_irq(display, bits, 0); 161 } 162 163 /** 164 * bdw_update_port_irq - update DE port interrupt 165 * @display: display device 166 * @interrupt_mask: mask of interrupt bits to update 167 * @enabled_irq_mask: mask of interrupt bits to enable 168 */ 169 void bdw_update_port_irq(struct intel_display *display, 170 u32 interrupt_mask, u32 enabled_irq_mask) 171 { 172 struct drm_i915_private *dev_priv = to_i915(display->drm); 173 u32 new_val; 174 u32 old_val; 175 176 lockdep_assert_held(&display->irq.lock); 177 178 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 179 180 if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) 181 return; 182 183 old_val = intel_de_read(display, GEN8_DE_PORT_IMR); 184 185 new_val = old_val; 186 new_val &= ~interrupt_mask; 187 new_val |= (~enabled_irq_mask & interrupt_mask); 188 189 if (new_val != old_val) { 190 intel_de_write(display, GEN8_DE_PORT_IMR, new_val); 191 intel_de_posting_read(display, GEN8_DE_PORT_IMR); 192 } 193 } 194 195 /** 196 * bdw_update_pipe_irq - update DE pipe interrupt 197 * @display: display device 198 * @pipe: pipe whose interrupt to update 199 * @interrupt_mask: mask of interrupt bits to update 200 * @enabled_irq_mask: mask of interrupt bits to enable 201 */ 202 static void bdw_update_pipe_irq(struct intel_display *display, 203 enum pipe pipe, u32 interrupt_mask, 204 u32 enabled_irq_mask) 205 { 206 struct drm_i915_private *dev_priv = to_i915(display->drm); 207 u32 new_val; 208 209 lockdep_assert_held(&display->irq.lock); 210 211 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 212 213 if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) 214 return; 215 216 new_val = display->irq.de_irq_mask[pipe]; 217 new_val &= ~interrupt_mask; 218 new_val |= (~enabled_irq_mask & interrupt_mask); 219 220 if (new_val != display->irq.de_irq_mask[pipe]) { 221 display->irq.de_irq_mask[pipe] = new_val; 222 intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]); 223 intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe)); 224 } 225 } 226 227 void bdw_enable_pipe_irq(struct intel_display *display, 228 enum pipe pipe, u32 bits) 229 { 230 bdw_update_pipe_irq(display, pipe, bits, bits); 231 } 232 233 void bdw_disable_pipe_irq(struct intel_display *display, 234 enum pipe pipe, u32 bits) 235 { 236 bdw_update_pipe_irq(display, pipe, bits, 0); 237 } 238 239 /** 240 * ibx_display_interrupt_update - update SDEIMR 241 * @display: display device 242 * @interrupt_mask: mask of interrupt bits to update 243 * @enabled_irq_mask: mask of interrupt bits to enable 244 */ 245 void ibx_display_interrupt_update(struct intel_display *display, 246 u32 interrupt_mask, 247 u32 enabled_irq_mask) 248 { 249 struct drm_i915_private *dev_priv = to_i915(display->drm); 250 u32 sdeimr = intel_de_read(display, SDEIMR); 251 252 sdeimr &= ~interrupt_mask; 253 sdeimr |= (~enabled_irq_mask & interrupt_mask); 254 255 drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); 256 257 lockdep_assert_held(&display->irq.lock); 258 259 if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) 260 return; 261 262 intel_de_write(display, SDEIMR, sdeimr); 263 intel_de_posting_read(display, SDEIMR); 264 } 265 266 void ibx_enable_display_interrupt(struct intel_display *display, u32 bits) 267 { 268 ibx_display_interrupt_update(display, bits, bits); 269 } 270 271 void ibx_disable_display_interrupt(struct intel_display *display, u32 bits) 272 { 273 ibx_display_interrupt_update(display, bits, 0); 274 } 275 276 u32 i915_pipestat_enable_mask(struct intel_display *display, 277 enum pipe pipe) 278 { 279 u32 status_mask = display->irq.pipestat_irq_mask[pipe]; 280 u32 enable_mask = status_mask << 16; 281 282 lockdep_assert_held(&display->irq.lock); 283 284 if (DISPLAY_VER(display) < 5) 285 goto out; 286 287 /* 288 * On pipe A we don't support the PSR interrupt yet, 289 * on pipe B and C the same bit MBZ. 290 */ 291 if (drm_WARN_ON_ONCE(display->drm, 292 status_mask & PIPE_A_PSR_STATUS_VLV)) 293 return 0; 294 /* 295 * On pipe B and C we don't support the PSR interrupt yet, on pipe 296 * A the same bit is for perf counters which we don't use either. 297 */ 298 if (drm_WARN_ON_ONCE(display->drm, 299 status_mask & PIPE_B_PSR_STATUS_VLV)) 300 return 0; 301 302 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 303 SPRITE0_FLIP_DONE_INT_EN_VLV | 304 SPRITE1_FLIP_DONE_INT_EN_VLV); 305 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 306 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 307 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 308 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 309 310 out: 311 drm_WARN_ONCE(display->drm, 312 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 313 status_mask & ~PIPESTAT_INT_STATUS_MASK, 314 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 315 pipe_name(pipe), enable_mask, status_mask); 316 317 return enable_mask; 318 } 319 320 void i915_enable_pipestat(struct intel_display *display, 321 enum pipe pipe, u32 status_mask) 322 { 323 struct drm_i915_private *dev_priv = to_i915(display->drm); 324 i915_reg_t reg = PIPESTAT(display, pipe); 325 u32 enable_mask; 326 327 drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 328 "pipe %c: status_mask=0x%x\n", 329 pipe_name(pipe), status_mask); 330 331 lockdep_assert_held(&display->irq.lock); 332 drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)); 333 334 if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask) 335 return; 336 337 display->irq.pipestat_irq_mask[pipe] |= status_mask; 338 enable_mask = i915_pipestat_enable_mask(display, pipe); 339 340 intel_de_write(display, reg, enable_mask | status_mask); 341 intel_de_posting_read(display, reg); 342 } 343 344 void i915_disable_pipestat(struct intel_display *display, 345 enum pipe pipe, u32 status_mask) 346 { 347 struct drm_i915_private *dev_priv = to_i915(display->drm); 348 i915_reg_t reg = PIPESTAT(display, pipe); 349 u32 enable_mask; 350 351 drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 352 "pipe %c: status_mask=0x%x\n", 353 pipe_name(pipe), status_mask); 354 355 lockdep_assert_held(&display->irq.lock); 356 drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)); 357 358 if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0) 359 return; 360 361 display->irq.pipestat_irq_mask[pipe] &= ~status_mask; 362 enable_mask = i915_pipestat_enable_mask(display, pipe); 363 364 intel_de_write(display, reg, enable_mask | status_mask); 365 intel_de_posting_read(display, reg); 366 } 367 368 static bool i915_has_legacy_blc_interrupt(struct intel_display *display) 369 { 370 if (display->platform.i85x) 371 return true; 372 373 if (display->platform.pineview) 374 return true; 375 376 return IS_DISPLAY_VER(display, 3, 4) && display->platform.mobile; 377 } 378 379 /* enable ASLE pipestat for OpRegion */ 380 static void i915_enable_asle_pipestat(struct intel_display *display) 381 { 382 if (!intel_opregion_asle_present(display)) 383 return; 384 385 if (!i915_has_legacy_blc_interrupt(display)) 386 return; 387 388 spin_lock_irq(&display->irq.lock); 389 390 i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 391 if (DISPLAY_VER(display) >= 4) 392 i915_enable_pipestat(display, PIPE_A, 393 PIPE_LEGACY_BLC_EVENT_STATUS); 394 395 spin_unlock_irq(&display->irq.lock); 396 } 397 398 #if IS_ENABLED(CONFIG_DEBUG_FS) 399 static void display_pipe_crc_irq_handler(struct intel_display *display, 400 enum pipe pipe, 401 u32 crc0, u32 crc1, 402 u32 crc2, u32 crc3, 403 u32 crc4) 404 { 405 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 406 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 407 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 408 409 trace_intel_pipe_crc(crtc, crcs); 410 411 spin_lock(&pipe_crc->lock); 412 /* 413 * For some not yet identified reason, the first CRC is 414 * bonkers. So let's just wait for the next vblank and read 415 * out the buggy result. 416 * 417 * On GEN8+ sometimes the second CRC is bonkers as well, so 418 * don't trust that one either. 419 */ 420 if (pipe_crc->skipped <= 0 || 421 (DISPLAY_VER(display) >= 8 && pipe_crc->skipped == 1)) { 422 pipe_crc->skipped++; 423 spin_unlock(&pipe_crc->lock); 424 return; 425 } 426 spin_unlock(&pipe_crc->lock); 427 428 drm_crtc_add_crc_entry(&crtc->base, true, 429 drm_crtc_accurate_vblank_count(&crtc->base), 430 crcs); 431 } 432 #else 433 static inline void 434 display_pipe_crc_irq_handler(struct intel_display *display, 435 enum pipe pipe, 436 u32 crc0, u32 crc1, 437 u32 crc2, u32 crc3, 438 u32 crc4) {} 439 #endif 440 441 static void flip_done_handler(struct intel_display *display, 442 enum pipe pipe) 443 { 444 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 445 446 spin_lock(&display->drm->event_lock); 447 448 if (crtc->flip_done_event) { 449 trace_intel_crtc_flip_done(crtc); 450 drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event); 451 crtc->flip_done_event = NULL; 452 } 453 454 spin_unlock(&display->drm->event_lock); 455 } 456 457 static void hsw_pipe_crc_irq_handler(struct intel_display *display, 458 enum pipe pipe) 459 { 460 display_pipe_crc_irq_handler(display, pipe, 461 intel_de_read(display, PIPE_CRC_RES_HSW(pipe)), 462 0, 0, 0, 0); 463 } 464 465 static void ivb_pipe_crc_irq_handler(struct intel_display *display, 466 enum pipe pipe) 467 { 468 display_pipe_crc_irq_handler(display, pipe, 469 intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)), 470 intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)), 471 intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)), 472 intel_de_read(display, PIPE_CRC_RES_4_IVB(pipe)), 473 intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe))); 474 } 475 476 static void i9xx_pipe_crc_irq_handler(struct intel_display *display, 477 enum pipe pipe) 478 { 479 u32 res1, res2; 480 481 if (DISPLAY_VER(display) >= 3) 482 res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(display, pipe)); 483 else 484 res1 = 0; 485 486 if (DISPLAY_VER(display) >= 5 || display->platform.g4x) 487 res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(display, pipe)); 488 else 489 res2 = 0; 490 491 display_pipe_crc_irq_handler(display, pipe, 492 intel_de_read(display, PIPE_CRC_RES_RED(display, pipe)), 493 intel_de_read(display, PIPE_CRC_RES_GREEN(display, pipe)), 494 intel_de_read(display, PIPE_CRC_RES_BLUE(display, pipe)), 495 res1, res2); 496 } 497 498 static void i9xx_pipestat_irq_reset(struct intel_display *display) 499 { 500 enum pipe pipe; 501 502 for_each_pipe(display, pipe) { 503 intel_de_write(display, 504 PIPESTAT(display, pipe), 505 PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS); 506 507 display->irq.pipestat_irq_mask[pipe] = 0; 508 } 509 } 510 511 void i9xx_pipestat_irq_ack(struct intel_display *display, 512 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 513 { 514 enum pipe pipe; 515 516 spin_lock(&display->irq.lock); 517 518 if ((display->platform.valleyview || display->platform.cherryview) && 519 !display->irq.vlv_display_irqs_enabled) { 520 spin_unlock(&display->irq.lock); 521 return; 522 } 523 524 for_each_pipe(display, pipe) { 525 i915_reg_t reg; 526 u32 status_mask, enable_mask, iir_bit = 0; 527 528 /* 529 * PIPESTAT bits get signalled even when the interrupt is 530 * disabled with the mask bits, and some of the status bits do 531 * not generate interrupts at all (like the underrun bit). Hence 532 * we need to be careful that we only handle what we want to 533 * handle. 534 */ 535 536 /* fifo underruns are filterered in the underrun handler. */ 537 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 538 539 switch (pipe) { 540 default: 541 case PIPE_A: 542 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 543 break; 544 case PIPE_B: 545 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 546 break; 547 case PIPE_C: 548 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 549 break; 550 } 551 if (iir & iir_bit) 552 status_mask |= display->irq.pipestat_irq_mask[pipe]; 553 554 if (!status_mask) 555 continue; 556 557 reg = PIPESTAT(display, pipe); 558 pipe_stats[pipe] = intel_de_read(display, reg) & status_mask; 559 enable_mask = i915_pipestat_enable_mask(display, pipe); 560 561 /* 562 * Clear the PIPE*STAT regs before the IIR 563 * 564 * Toggle the enable bits to make sure we get an 565 * edge in the ISR pipe event bit if we don't clear 566 * all the enabled status bits. Otherwise the edge 567 * triggered IIR on i965/g4x wouldn't notice that 568 * an interrupt is still pending. 569 */ 570 if (pipe_stats[pipe]) { 571 intel_de_write(display, reg, pipe_stats[pipe]); 572 intel_de_write(display, reg, enable_mask); 573 } 574 } 575 spin_unlock(&display->irq.lock); 576 } 577 578 void i915_pipestat_irq_handler(struct intel_display *display, 579 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 580 { 581 bool blc_event = false; 582 enum pipe pipe; 583 584 for_each_pipe(display, pipe) { 585 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 586 intel_handle_vblank(display, pipe); 587 588 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 589 blc_event = true; 590 591 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 592 i9xx_pipe_crc_irq_handler(display, pipe); 593 594 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 595 intel_cpu_fifo_underrun_irq_handler(display, pipe); 596 } 597 598 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 599 intel_opregion_asle_intr(display); 600 } 601 602 void i965_pipestat_irq_handler(struct intel_display *display, 603 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 604 { 605 bool blc_event = false; 606 enum pipe pipe; 607 608 for_each_pipe(display, pipe) { 609 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 610 intel_handle_vblank(display, pipe); 611 612 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 613 blc_event = true; 614 615 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 616 i9xx_pipe_crc_irq_handler(display, pipe); 617 618 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 619 intel_cpu_fifo_underrun_irq_handler(display, pipe); 620 } 621 622 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 623 intel_opregion_asle_intr(display); 624 625 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 626 intel_gmbus_irq_handler(display); 627 } 628 629 void valleyview_pipestat_irq_handler(struct intel_display *display, 630 u32 pipe_stats[I915_MAX_PIPES]) 631 { 632 enum pipe pipe; 633 634 for_each_pipe(display, pipe) { 635 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 636 intel_handle_vblank(display, pipe); 637 638 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 639 flip_done_handler(display, pipe); 640 641 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 642 i9xx_pipe_crc_irq_handler(display, pipe); 643 644 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 645 intel_cpu_fifo_underrun_irq_handler(display, pipe); 646 } 647 648 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 649 intel_gmbus_irq_handler(display); 650 } 651 652 static void ibx_irq_handler(struct intel_display *display, u32 pch_iir) 653 { 654 enum pipe pipe; 655 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 656 657 ibx_hpd_irq_handler(display, hotplug_trigger); 658 659 if (pch_iir & SDE_AUDIO_POWER_MASK) { 660 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 661 SDE_AUDIO_POWER_SHIFT); 662 drm_dbg(display->drm, "PCH audio power change on port %d\n", 663 port_name(port)); 664 } 665 666 if (pch_iir & SDE_AUX_MASK) 667 intel_dp_aux_irq_handler(display); 668 669 if (pch_iir & SDE_GMBUS) 670 intel_gmbus_irq_handler(display); 671 672 if (pch_iir & SDE_AUDIO_HDCP_MASK) 673 drm_dbg(display->drm, "PCH HDCP audio interrupt\n"); 674 675 if (pch_iir & SDE_AUDIO_TRANS_MASK) 676 drm_dbg(display->drm, "PCH transcoder audio interrupt\n"); 677 678 if (pch_iir & SDE_POISON) 679 drm_err(display->drm, "PCH poison interrupt\n"); 680 681 if (pch_iir & SDE_FDI_MASK) { 682 for_each_pipe(display, pipe) 683 drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n", 684 pipe_name(pipe), 685 intel_de_read(display, FDI_RX_IIR(pipe))); 686 } 687 688 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 689 drm_dbg(display->drm, "PCH transcoder CRC done interrupt\n"); 690 691 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 692 drm_dbg(display->drm, 693 "PCH transcoder CRC error interrupt\n"); 694 695 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 696 intel_pch_fifo_underrun_irq_handler(display, PIPE_A); 697 698 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 699 intel_pch_fifo_underrun_irq_handler(display, PIPE_B); 700 } 701 702 static u32 ivb_err_int_pipe_fault_mask(enum pipe pipe) 703 { 704 switch (pipe) { 705 case PIPE_A: 706 return ERR_INT_SPRITE_A_FAULT | 707 ERR_INT_PRIMARY_A_FAULT | 708 ERR_INT_CURSOR_A_FAULT; 709 case PIPE_B: 710 return ERR_INT_SPRITE_B_FAULT | 711 ERR_INT_PRIMARY_B_FAULT | 712 ERR_INT_CURSOR_B_FAULT; 713 case PIPE_C: 714 return ERR_INT_SPRITE_C_FAULT | 715 ERR_INT_PRIMARY_C_FAULT | 716 ERR_INT_CURSOR_C_FAULT; 717 default: 718 return 0; 719 } 720 } 721 722 static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = { 723 { .fault = ERR_INT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 724 { .fault = ERR_INT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 725 { .fault = ERR_INT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 726 { .fault = ERR_INT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 727 { .fault = ERR_INT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 728 { .fault = ERR_INT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 729 { .fault = ERR_INT_SPRITE_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 730 { .fault = ERR_INT_PRIMARY_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 731 { .fault = ERR_INT_CURSOR_C_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 732 {} 733 }; 734 735 static void ivb_err_int_handler(struct intel_display *display) 736 { 737 u32 err_int = intel_de_read(display, GEN7_ERR_INT); 738 enum pipe pipe; 739 740 if (err_int & ERR_INT_POISON) 741 drm_err(display->drm, "Poison interrupt\n"); 742 743 if (err_int & ERR_INT_INVALID_GTT_PTE) 744 drm_err_ratelimited(display->drm, "Invalid GTT PTE\n"); 745 746 if (err_int & ERR_INT_INVALID_PTE_DATA) 747 drm_err_ratelimited(display->drm, "Invalid PTE data\n"); 748 749 for_each_pipe(display, pipe) { 750 u32 fault_errors; 751 752 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 753 intel_cpu_fifo_underrun_irq_handler(display, pipe); 754 755 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 756 if (display->platform.ivybridge) 757 ivb_pipe_crc_irq_handler(display, pipe); 758 else 759 hsw_pipe_crc_irq_handler(display, pipe); 760 } 761 762 fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe); 763 if (fault_errors) 764 intel_pipe_fault_irq_handler(display, ivb_pipe_fault_handlers, 765 pipe, fault_errors); 766 } 767 768 intel_de_write(display, GEN7_ERR_INT, err_int); 769 } 770 771 static void cpt_serr_int_handler(struct intel_display *display) 772 { 773 u32 serr_int = intel_de_read(display, SERR_INT); 774 enum pipe pipe; 775 776 if (serr_int & SERR_INT_POISON) 777 drm_err(display->drm, "PCH poison interrupt\n"); 778 779 for_each_pipe(display, pipe) 780 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 781 intel_pch_fifo_underrun_irq_handler(display, pipe); 782 783 intel_de_write(display, SERR_INT, serr_int); 784 } 785 786 static void cpt_irq_handler(struct intel_display *display, u32 pch_iir) 787 { 788 enum pipe pipe; 789 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 790 791 ibx_hpd_irq_handler(display, hotplug_trigger); 792 793 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 794 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 795 SDE_AUDIO_POWER_SHIFT_CPT); 796 drm_dbg(display->drm, "PCH audio power change on port %c\n", 797 port_name(port)); 798 } 799 800 if (pch_iir & SDE_AUX_MASK_CPT) 801 intel_dp_aux_irq_handler(display); 802 803 if (pch_iir & SDE_GMBUS_CPT) 804 intel_gmbus_irq_handler(display); 805 806 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 807 drm_dbg(display->drm, "Audio CP request interrupt\n"); 808 809 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 810 drm_dbg(display->drm, "Audio CP change interrupt\n"); 811 812 if (pch_iir & SDE_FDI_MASK_CPT) { 813 for_each_pipe(display, pipe) 814 drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n", 815 pipe_name(pipe), 816 intel_de_read(display, FDI_RX_IIR(pipe))); 817 } 818 819 if (pch_iir & SDE_ERROR_CPT) 820 cpt_serr_int_handler(display); 821 } 822 823 static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe) 824 { 825 switch (pipe) { 826 case PIPE_A: 827 return GTT_FAULT_SPRITE_A_FAULT | 828 GTT_FAULT_PRIMARY_A_FAULT | 829 GTT_FAULT_CURSOR_A_FAULT; 830 case PIPE_B: 831 return GTT_FAULT_SPRITE_B_FAULT | 832 GTT_FAULT_PRIMARY_B_FAULT | 833 GTT_FAULT_CURSOR_B_FAULT; 834 default: 835 return 0; 836 } 837 } 838 839 static const struct pipe_fault_handler ilk_pipe_fault_handlers[] = { 840 { .fault = GTT_FAULT_SPRITE_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 841 { .fault = GTT_FAULT_SPRITE_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 842 { .fault = GTT_FAULT_PRIMARY_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 843 { .fault = GTT_FAULT_PRIMARY_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 844 { .fault = GTT_FAULT_CURSOR_A_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 845 { .fault = GTT_FAULT_CURSOR_B_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 846 {} 847 }; 848 849 static void ilk_gtt_fault_irq_handler(struct intel_display *display) 850 { 851 enum pipe pipe; 852 u32 gtt_fault; 853 854 gtt_fault = intel_de_read(display, ILK_GTT_FAULT); 855 intel_de_write(display, ILK_GTT_FAULT, gtt_fault); 856 857 if (gtt_fault & GTT_FAULT_INVALID_GTT_PTE) 858 drm_err_ratelimited(display->drm, "Invalid GTT PTE\n"); 859 860 if (gtt_fault & GTT_FAULT_INVALID_PTE_DATA) 861 drm_err_ratelimited(display->drm, "Invalid PTE data\n"); 862 863 for_each_pipe(display, pipe) { 864 u32 fault_errors; 865 866 fault_errors = gtt_fault & ilk_gtt_fault_pipe_fault_mask(pipe); 867 if (fault_errors) 868 intel_pipe_fault_irq_handler(display, ilk_pipe_fault_handlers, 869 pipe, fault_errors); 870 } 871 } 872 873 void ilk_display_irq_handler(struct intel_display *display, u32 de_iir) 874 { 875 enum pipe pipe; 876 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 877 878 if (hotplug_trigger) 879 ilk_hpd_irq_handler(display, hotplug_trigger); 880 881 if (de_iir & DE_AUX_CHANNEL_A) 882 intel_dp_aux_irq_handler(display); 883 884 if (de_iir & DE_GSE) 885 intel_opregion_asle_intr(display); 886 887 if (de_iir & DE_POISON) 888 drm_err(display->drm, "Poison interrupt\n"); 889 890 if (de_iir & DE_GTT_FAULT) 891 ilk_gtt_fault_irq_handler(display); 892 893 for_each_pipe(display, pipe) { 894 if (de_iir & DE_PIPE_VBLANK(pipe)) 895 intel_handle_vblank(display, pipe); 896 897 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 898 flip_done_handler(display, pipe); 899 900 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 901 intel_cpu_fifo_underrun_irq_handler(display, pipe); 902 903 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 904 i9xx_pipe_crc_irq_handler(display, pipe); 905 } 906 907 /* check event from PCH */ 908 if (de_iir & DE_PCH_EVENT) { 909 u32 pch_iir = intel_de_read(display, SDEIIR); 910 911 if (HAS_PCH_CPT(display)) 912 cpt_irq_handler(display, pch_iir); 913 else 914 ibx_irq_handler(display, pch_iir); 915 916 /* should clear PCH hotplug event before clear CPU irq */ 917 intel_de_write(display, SDEIIR, pch_iir); 918 } 919 920 if (DISPLAY_VER(display) == 5 && de_iir & DE_PCU_EVENT) 921 ilk_display_rps_irq_handler(display); 922 } 923 924 void ivb_display_irq_handler(struct intel_display *display, u32 de_iir) 925 { 926 enum pipe pipe; 927 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 928 929 if (hotplug_trigger) 930 ilk_hpd_irq_handler(display, hotplug_trigger); 931 932 if (de_iir & DE_ERR_INT_IVB) 933 ivb_err_int_handler(display); 934 935 if (de_iir & DE_EDP_PSR_INT_HSW) { 936 struct intel_encoder *encoder; 937 938 for_each_intel_encoder_with_psr(display->drm, encoder) { 939 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 940 u32 psr_iir; 941 942 psr_iir = intel_de_rmw(display, EDP_PSR_IIR, 0, 0); 943 intel_psr_irq_handler(intel_dp, psr_iir); 944 break; 945 } 946 } 947 948 if (de_iir & DE_AUX_CHANNEL_A_IVB) 949 intel_dp_aux_irq_handler(display); 950 951 if (de_iir & DE_GSE_IVB) 952 intel_opregion_asle_intr(display); 953 954 for_each_pipe(display, pipe) { 955 if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) 956 intel_handle_vblank(display, pipe); 957 958 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 959 flip_done_handler(display, pipe); 960 } 961 962 /* check event from PCH */ 963 if (!HAS_PCH_NOP(display) && (de_iir & DE_PCH_EVENT_IVB)) { 964 u32 pch_iir = intel_de_read(display, SDEIIR); 965 966 cpt_irq_handler(display, pch_iir); 967 968 /* clear PCH hotplug event before clear CPU irq */ 969 intel_de_write(display, SDEIIR, pch_iir); 970 } 971 } 972 973 static u32 gen8_de_port_aux_mask(struct intel_display *display) 974 { 975 u32 mask; 976 977 if (DISPLAY_VER(display) >= 20) 978 return 0; 979 else if (DISPLAY_VER(display) >= 14) 980 return TGL_DE_PORT_AUX_DDIA | 981 TGL_DE_PORT_AUX_DDIB; 982 else if (DISPLAY_VER(display) >= 13) 983 return TGL_DE_PORT_AUX_DDIA | 984 TGL_DE_PORT_AUX_DDIB | 985 TGL_DE_PORT_AUX_DDIC | 986 XELPD_DE_PORT_AUX_DDID | 987 XELPD_DE_PORT_AUX_DDIE | 988 TGL_DE_PORT_AUX_USBC1 | 989 TGL_DE_PORT_AUX_USBC2 | 990 TGL_DE_PORT_AUX_USBC3 | 991 TGL_DE_PORT_AUX_USBC4; 992 else if (DISPLAY_VER(display) >= 12) 993 return TGL_DE_PORT_AUX_DDIA | 994 TGL_DE_PORT_AUX_DDIB | 995 TGL_DE_PORT_AUX_DDIC | 996 TGL_DE_PORT_AUX_USBC1 | 997 TGL_DE_PORT_AUX_USBC2 | 998 TGL_DE_PORT_AUX_USBC3 | 999 TGL_DE_PORT_AUX_USBC4 | 1000 TGL_DE_PORT_AUX_USBC5 | 1001 TGL_DE_PORT_AUX_USBC6; 1002 1003 mask = GEN8_AUX_CHANNEL_A; 1004 if (DISPLAY_VER(display) >= 9) 1005 mask |= GEN9_AUX_CHANNEL_B | 1006 GEN9_AUX_CHANNEL_C | 1007 GEN9_AUX_CHANNEL_D; 1008 1009 if (DISPLAY_VER(display) == 11) { 1010 mask |= ICL_AUX_CHANNEL_F; 1011 mask |= ICL_AUX_CHANNEL_E; 1012 } 1013 1014 return mask; 1015 } 1016 1017 static u32 gen8_de_pipe_fault_mask(struct intel_display *display) 1018 { 1019 if (DISPLAY_VER(display) >= 14) 1020 return MTL_PIPEDMC_ATS_FAULT | 1021 MTL_PLANE_ATS_FAULT | 1022 GEN12_PIPEDMC_FAULT | 1023 GEN9_PIPE_CURSOR_FAULT | 1024 GEN11_PIPE_PLANE5_FAULT | 1025 GEN9_PIPE_PLANE4_FAULT | 1026 GEN9_PIPE_PLANE3_FAULT | 1027 GEN9_PIPE_PLANE2_FAULT | 1028 GEN9_PIPE_PLANE1_FAULT; 1029 else if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display)) 1030 return GEN12_PIPEDMC_FAULT | 1031 GEN9_PIPE_CURSOR_FAULT | 1032 GEN11_PIPE_PLANE5_FAULT | 1033 GEN9_PIPE_PLANE4_FAULT | 1034 GEN9_PIPE_PLANE3_FAULT | 1035 GEN9_PIPE_PLANE2_FAULT | 1036 GEN9_PIPE_PLANE1_FAULT; 1037 else if (DISPLAY_VER(display) == 12) 1038 return GEN12_PIPEDMC_FAULT | 1039 GEN9_PIPE_CURSOR_FAULT | 1040 GEN11_PIPE_PLANE7_FAULT | 1041 GEN11_PIPE_PLANE6_FAULT | 1042 GEN11_PIPE_PLANE5_FAULT | 1043 GEN9_PIPE_PLANE4_FAULT | 1044 GEN9_PIPE_PLANE3_FAULT | 1045 GEN9_PIPE_PLANE2_FAULT | 1046 GEN9_PIPE_PLANE1_FAULT; 1047 else if (DISPLAY_VER(display) == 11) 1048 return GEN9_PIPE_CURSOR_FAULT | 1049 GEN11_PIPE_PLANE7_FAULT | 1050 GEN11_PIPE_PLANE6_FAULT | 1051 GEN11_PIPE_PLANE5_FAULT | 1052 GEN9_PIPE_PLANE4_FAULT | 1053 GEN9_PIPE_PLANE3_FAULT | 1054 GEN9_PIPE_PLANE2_FAULT | 1055 GEN9_PIPE_PLANE1_FAULT; 1056 else if (DISPLAY_VER(display) >= 9) 1057 return GEN9_PIPE_CURSOR_FAULT | 1058 GEN9_PIPE_PLANE4_FAULT | 1059 GEN9_PIPE_PLANE3_FAULT | 1060 GEN9_PIPE_PLANE2_FAULT | 1061 GEN9_PIPE_PLANE1_FAULT; 1062 else 1063 return GEN8_PIPE_CURSOR_FAULT | 1064 GEN8_PIPE_SPRITE_FAULT | 1065 GEN8_PIPE_PRIMARY_FAULT; 1066 } 1067 1068 static bool handle_plane_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id) 1069 { 1070 struct intel_display *display = to_intel_display(crtc); 1071 1072 drm_err_ratelimited(display->drm, 1073 "[CRTC:%d:%s] PLANE ATS fault\n", 1074 crtc->base.base.id, crtc->base.name); 1075 1076 return true; 1077 } 1078 1079 static bool handle_pipedmc_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id) 1080 { 1081 struct intel_display *display = to_intel_display(crtc); 1082 1083 drm_err_ratelimited(display->drm, 1084 "[CRTC:%d:%s] PIPEDMC ATS fault\n", 1085 crtc->base.base.id, crtc->base.name); 1086 1087 return true; 1088 } 1089 1090 static bool handle_pipedmc_fault(struct intel_crtc *crtc, enum plane_id plane_id) 1091 { 1092 struct intel_display *display = to_intel_display(crtc); 1093 1094 drm_err_ratelimited(display->drm, 1095 "[CRTC:%d:%s] PIPEDMC fault\n", 1096 crtc->base.base.id, crtc->base.name); 1097 1098 return true; 1099 } 1100 1101 static const struct pipe_fault_handler mtl_pipe_fault_handlers[] = { 1102 { .fault = MTL_PLANE_ATS_FAULT, .handle = handle_plane_ats_fault, }, 1103 { .fault = MTL_PIPEDMC_ATS_FAULT, .handle = handle_pipedmc_ats_fault, }, 1104 { .fault = GEN12_PIPEDMC_FAULT, .handle = handle_pipedmc_fault, }, 1105 { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, }, 1106 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1107 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1108 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1109 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1110 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1111 {} 1112 }; 1113 1114 static const struct pipe_fault_handler tgl_pipe_fault_handlers[] = { 1115 { .fault = GEN12_PIPEDMC_FAULT, .handle = handle_pipedmc_fault, }, 1116 { .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, }, 1117 { .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, }, 1118 { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, }, 1119 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1120 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1121 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1122 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1123 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1124 {} 1125 }; 1126 1127 static const struct pipe_fault_handler icl_pipe_fault_handlers[] = { 1128 { .fault = GEN11_PIPE_PLANE7_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_7, }, 1129 { .fault = GEN11_PIPE_PLANE6_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_6, }, 1130 { .fault = GEN11_PIPE_PLANE5_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_5, }, 1131 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1132 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1133 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1134 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1135 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1136 {} 1137 }; 1138 1139 static const struct pipe_fault_handler skl_pipe_fault_handlers[] = { 1140 { .fault = GEN9_PIPE_PLANE4_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_4, }, 1141 { .fault = GEN9_PIPE_PLANE3_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_3, }, 1142 { .fault = GEN9_PIPE_PLANE2_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_2, }, 1143 { .fault = GEN9_PIPE_PLANE1_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_1, }, 1144 { .fault = GEN9_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1145 {} 1146 }; 1147 1148 static const struct pipe_fault_handler bdw_pipe_fault_handlers[] = { 1149 { .fault = GEN8_PIPE_SPRITE_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1150 { .fault = GEN8_PIPE_PRIMARY_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1151 { .fault = GEN8_PIPE_CURSOR_FAULT, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1152 {} 1153 }; 1154 1155 static const struct pipe_fault_handler * 1156 gen8_pipe_fault_handlers(struct intel_display *display) 1157 { 1158 if (DISPLAY_VER(display) >= 14) 1159 return mtl_pipe_fault_handlers; 1160 else if (DISPLAY_VER(display) >= 12) 1161 return tgl_pipe_fault_handlers; 1162 else if (DISPLAY_VER(display) >= 11) 1163 return icl_pipe_fault_handlers; 1164 else if (DISPLAY_VER(display) >= 9) 1165 return skl_pipe_fault_handlers; 1166 else 1167 return bdw_pipe_fault_handlers; 1168 } 1169 1170 static void intel_pmdemand_irq_handler(struct intel_display *display) 1171 { 1172 wake_up_all(&display->pmdemand.waitqueue); 1173 } 1174 1175 static void 1176 gen8_de_misc_irq_handler(struct intel_display *display, u32 iir) 1177 { 1178 bool found = false; 1179 1180 if (HAS_DBUF_OVERLAP_DETECTION(display)) { 1181 if (iir & XE2LPD_DBUF_OVERLAP_DETECTED) { 1182 drm_warn(display->drm, "DBuf overlap detected\n"); 1183 found = true; 1184 } 1185 } 1186 1187 if (DISPLAY_VER(display) >= 14) { 1188 if (iir & (XELPDP_PMDEMAND_RSP | 1189 XELPDP_PMDEMAND_RSPTOUT_ERR)) { 1190 if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR) 1191 drm_dbg(display->drm, 1192 "Error waiting for Punit PM Demand Response\n"); 1193 1194 intel_pmdemand_irq_handler(display); 1195 found = true; 1196 } 1197 1198 if (iir & XELPDP_RM_TIMEOUT) { 1199 u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE); 1200 drm_warn(display->drm, "Register Access Timeout = 0x%x\n", val); 1201 found = true; 1202 } 1203 } else if (iir & GEN8_DE_MISC_GSE) { 1204 intel_opregion_asle_intr(display); 1205 found = true; 1206 } 1207 1208 if (iir & GEN8_DE_EDP_PSR) { 1209 struct intel_encoder *encoder; 1210 u32 psr_iir; 1211 i915_reg_t iir_reg; 1212 1213 for_each_intel_encoder_with_psr(display->drm, encoder) { 1214 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1215 1216 if (DISPLAY_VER(display) >= 12) 1217 iir_reg = TRANS_PSR_IIR(display, 1218 intel_dp->psr.transcoder); 1219 else 1220 iir_reg = EDP_PSR_IIR; 1221 1222 psr_iir = intel_de_rmw(display, iir_reg, 0, 0); 1223 1224 if (psr_iir) 1225 found = true; 1226 1227 intel_psr_irq_handler(intel_dp, psr_iir); 1228 1229 /* prior GEN12 only have one EDP PSR */ 1230 if (DISPLAY_VER(display) < 12) 1231 break; 1232 } 1233 } 1234 1235 if (!found) 1236 drm_err(display->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir); 1237 } 1238 1239 static void gen11_dsi_te_interrupt_handler(struct intel_display *display, 1240 u32 te_trigger) 1241 { 1242 enum pipe pipe = INVALID_PIPE; 1243 enum transcoder dsi_trans; 1244 enum port port; 1245 u32 val; 1246 1247 /* 1248 * Incase of dual link, TE comes from DSI_1 1249 * this is to check if dual link is enabled 1250 */ 1251 val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_DSI_0)); 1252 val &= PORT_SYNC_MODE_ENABLE; 1253 1254 /* 1255 * if dual link is enabled, then read DSI_0 1256 * transcoder registers 1257 */ 1258 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? 1259 PORT_A : PORT_B; 1260 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 1261 1262 /* Check if DSI configured in command mode */ 1263 val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)); 1264 val = val & OP_MODE_MASK; 1265 1266 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { 1267 drm_err(display->drm, "DSI trancoder not configured in command mode\n"); 1268 return; 1269 } 1270 1271 /* Get PIPE for handling VBLANK event */ 1272 val = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans)); 1273 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 1274 case TRANS_DDI_EDP_INPUT_A_ON: 1275 pipe = PIPE_A; 1276 break; 1277 case TRANS_DDI_EDP_INPUT_B_ONOFF: 1278 pipe = PIPE_B; 1279 break; 1280 case TRANS_DDI_EDP_INPUT_C_ONOFF: 1281 pipe = PIPE_C; 1282 break; 1283 default: 1284 drm_err(display->drm, "Invalid PIPE\n"); 1285 return; 1286 } 1287 1288 intel_handle_vblank(display, pipe); 1289 1290 /* clear TE in dsi IIR */ 1291 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 1292 intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0); 1293 } 1294 1295 static u32 gen8_de_pipe_flip_done_mask(struct intel_display *display) 1296 { 1297 if (DISPLAY_VER(display) >= 9) 1298 return GEN9_PIPE_PLANE1_FLIP_DONE; 1299 else 1300 return GEN8_PIPE_PRIMARY_FLIP_DONE; 1301 } 1302 1303 static void gen8_read_and_ack_pch_irqs(struct intel_display *display, u32 *pch_iir, u32 *pica_iir) 1304 { 1305 u32 pica_ier = 0; 1306 1307 *pica_iir = 0; 1308 *pch_iir = intel_de_read(display, SDEIIR); 1309 if (!*pch_iir) 1310 return; 1311 1312 /** 1313 * PICA IER must be disabled/re-enabled around clearing PICA IIR and 1314 * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set 1315 * their flags both in the PICA and SDE IIR. 1316 */ 1317 if (*pch_iir & SDE_PICAINTERRUPT) { 1318 drm_WARN_ON(display->drm, INTEL_PCH_TYPE(display) < PCH_MTL); 1319 1320 pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0); 1321 *pica_iir = intel_de_read(display, PICAINTERRUPT_IIR); 1322 intel_de_write(display, PICAINTERRUPT_IIR, *pica_iir); 1323 } 1324 1325 intel_de_write(display, SDEIIR, *pch_iir); 1326 1327 if (pica_ier) 1328 intel_de_write(display, PICAINTERRUPT_IER, pica_ier); 1329 } 1330 1331 void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl) 1332 { 1333 u32 iir; 1334 enum pipe pipe; 1335 1336 drm_WARN_ON_ONCE(display->drm, !HAS_DISPLAY(display)); 1337 1338 if (master_ctl & GEN8_DE_MISC_IRQ) { 1339 iir = intel_de_read(display, GEN8_DE_MISC_IIR); 1340 if (iir) { 1341 intel_de_write(display, GEN8_DE_MISC_IIR, iir); 1342 gen8_de_misc_irq_handler(display, iir); 1343 } else { 1344 drm_err_ratelimited(display->drm, 1345 "The master control interrupt lied (DE MISC)!\n"); 1346 } 1347 } 1348 1349 if (DISPLAY_VER(display) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 1350 iir = intel_de_read(display, GEN11_DE_HPD_IIR); 1351 if (iir) { 1352 intel_de_write(display, GEN11_DE_HPD_IIR, iir); 1353 gen11_hpd_irq_handler(display, iir); 1354 } else { 1355 drm_err_ratelimited(display->drm, 1356 "The master control interrupt lied, (DE HPD)!\n"); 1357 } 1358 } 1359 1360 if (master_ctl & GEN8_DE_PORT_IRQ) { 1361 iir = intel_de_read(display, GEN8_DE_PORT_IIR); 1362 if (iir) { 1363 bool found = false; 1364 1365 intel_de_write(display, GEN8_DE_PORT_IIR, iir); 1366 1367 if (iir & gen8_de_port_aux_mask(display)) { 1368 intel_dp_aux_irq_handler(display); 1369 found = true; 1370 } 1371 1372 if (display->platform.geminilake || display->platform.broxton) { 1373 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; 1374 1375 if (hotplug_trigger) { 1376 bxt_hpd_irq_handler(display, hotplug_trigger); 1377 found = true; 1378 } 1379 } else if (display->platform.broadwell) { 1380 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; 1381 1382 if (hotplug_trigger) { 1383 ilk_hpd_irq_handler(display, hotplug_trigger); 1384 found = true; 1385 } 1386 } 1387 1388 if ((display->platform.geminilake || display->platform.broxton) && 1389 (iir & BXT_DE_PORT_GMBUS)) { 1390 intel_gmbus_irq_handler(display); 1391 found = true; 1392 } 1393 1394 if (DISPLAY_VER(display) >= 11) { 1395 u32 te_trigger = iir & (DSI0_TE | DSI1_TE); 1396 1397 if (te_trigger) { 1398 gen11_dsi_te_interrupt_handler(display, te_trigger); 1399 found = true; 1400 } 1401 } 1402 1403 if (!found) 1404 drm_err_ratelimited(display->drm, 1405 "Unexpected DE Port interrupt\n"); 1406 } else { 1407 drm_err_ratelimited(display->drm, 1408 "The master control interrupt lied (DE PORT)!\n"); 1409 } 1410 } 1411 1412 for_each_pipe(display, pipe) { 1413 u32 fault_errors; 1414 1415 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1416 continue; 1417 1418 iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe)); 1419 if (!iir) { 1420 drm_err_ratelimited(display->drm, 1421 "The master control interrupt lied (DE PIPE)!\n"); 1422 continue; 1423 } 1424 1425 intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir); 1426 1427 if (iir & GEN8_PIPE_VBLANK) 1428 intel_handle_vblank(display, pipe); 1429 1430 if (iir & gen8_de_pipe_flip_done_mask(display)) 1431 flip_done_handler(display, pipe); 1432 1433 if (HAS_DSB(display)) { 1434 if (iir & GEN12_DSB_INT(INTEL_DSB_0)) 1435 intel_dsb_irq_handler(display, pipe, INTEL_DSB_0); 1436 1437 if (iir & GEN12_DSB_INT(INTEL_DSB_1)) 1438 intel_dsb_irq_handler(display, pipe, INTEL_DSB_1); 1439 1440 if (iir & GEN12_DSB_INT(INTEL_DSB_2)) 1441 intel_dsb_irq_handler(display, pipe, INTEL_DSB_2); 1442 } 1443 1444 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 1445 hsw_pipe_crc_irq_handler(display, pipe); 1446 1447 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 1448 intel_cpu_fifo_underrun_irq_handler(display, pipe); 1449 1450 fault_errors = iir & gen8_de_pipe_fault_mask(display); 1451 if (fault_errors) 1452 intel_pipe_fault_irq_handler(display, 1453 gen8_pipe_fault_handlers(display), 1454 pipe, fault_errors); 1455 } 1456 1457 if (HAS_PCH_SPLIT(display) && !HAS_PCH_NOP(display) && 1458 master_ctl & GEN8_DE_PCH_IRQ) { 1459 u32 pica_iir; 1460 1461 /* 1462 * FIXME(BDW): Assume for now that the new interrupt handling 1463 * scheme also closed the SDE interrupt handling race we've seen 1464 * on older pch-split platforms. But this needs testing. 1465 */ 1466 gen8_read_and_ack_pch_irqs(display, &iir, &pica_iir); 1467 if (iir) { 1468 if (pica_iir) 1469 xelpdp_pica_irq_handler(display, pica_iir); 1470 1471 if (INTEL_PCH_TYPE(display) >= PCH_ICP) 1472 icp_irq_handler(display, iir); 1473 else if (INTEL_PCH_TYPE(display) >= PCH_SPT) 1474 spt_irq_handler(display, iir); 1475 else 1476 cpt_irq_handler(display, iir); 1477 } else { 1478 /* 1479 * Like on previous PCH there seems to be something 1480 * fishy going on with forwarding PCH interrupts. 1481 */ 1482 drm_dbg(display->drm, 1483 "The master control interrupt lied (SDE)!\n"); 1484 } 1485 } 1486 } 1487 1488 u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl) 1489 { 1490 u32 iir; 1491 1492 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 1493 return 0; 1494 1495 iir = intel_de_read(display, GEN11_GU_MISC_IIR); 1496 if (likely(iir)) 1497 intel_de_write(display, GEN11_GU_MISC_IIR, iir); 1498 1499 return iir; 1500 } 1501 1502 void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir) 1503 { 1504 if (iir & GEN11_GU_MISC_GSE) 1505 intel_opregion_asle_intr(display); 1506 } 1507 1508 void gen11_display_irq_handler(struct intel_display *display) 1509 { 1510 u32 disp_ctl; 1511 1512 intel_display_rpm_assert_block(display); 1513 /* 1514 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 1515 * for the display related bits. 1516 */ 1517 disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL); 1518 1519 intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0); 1520 gen8_de_irq_handler(display, disp_ctl); 1521 intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 1522 1523 intel_display_rpm_assert_unblock(display); 1524 } 1525 1526 static void i915gm_irq_cstate_wa_enable(struct intel_display *display) 1527 { 1528 lockdep_assert_held(&display->drm->vblank_time_lock); 1529 1530 /* 1531 * Vblank/CRC interrupts fail to wake the device up from C2+. 1532 * Disabling render clock gating during C-states avoids 1533 * the problem. There is a small power cost so we do this 1534 * only when vblank/CRC interrupts are actually enabled. 1535 */ 1536 if (display->irq.vblank_enabled++ == 0) 1537 intel_de_write(display, SCPD0, 1538 _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1539 } 1540 1541 static void i915gm_irq_cstate_wa_disable(struct intel_display *display) 1542 { 1543 lockdep_assert_held(&display->drm->vblank_time_lock); 1544 1545 if (--display->irq.vblank_enabled == 0) 1546 intel_de_write(display, SCPD0, 1547 _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1548 } 1549 1550 void i915gm_irq_cstate_wa(struct intel_display *display, bool enable) 1551 { 1552 spin_lock_irq(&display->drm->vblank_time_lock); 1553 1554 if (enable) 1555 i915gm_irq_cstate_wa_enable(display); 1556 else 1557 i915gm_irq_cstate_wa_disable(display); 1558 1559 spin_unlock_irq(&display->drm->vblank_time_lock); 1560 } 1561 1562 int i8xx_enable_vblank(struct drm_crtc *crtc) 1563 { 1564 struct intel_display *display = to_intel_display(crtc->dev); 1565 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1566 unsigned long irqflags; 1567 1568 spin_lock_irqsave(&display->irq.lock, irqflags); 1569 i915_enable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1570 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1571 1572 return 0; 1573 } 1574 1575 void i8xx_disable_vblank(struct drm_crtc *crtc) 1576 { 1577 struct intel_display *display = to_intel_display(crtc->dev); 1578 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1579 unsigned long irqflags; 1580 1581 spin_lock_irqsave(&display->irq.lock, irqflags); 1582 i915_disable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1583 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1584 } 1585 1586 int i915gm_enable_vblank(struct drm_crtc *crtc) 1587 { 1588 struct intel_display *display = to_intel_display(crtc->dev); 1589 1590 i915gm_irq_cstate_wa_enable(display); 1591 1592 return i8xx_enable_vblank(crtc); 1593 } 1594 1595 void i915gm_disable_vblank(struct drm_crtc *crtc) 1596 { 1597 struct intel_display *display = to_intel_display(crtc->dev); 1598 1599 i8xx_disable_vblank(crtc); 1600 1601 i915gm_irq_cstate_wa_disable(display); 1602 } 1603 1604 int i965_enable_vblank(struct drm_crtc *crtc) 1605 { 1606 struct intel_display *display = to_intel_display(crtc->dev); 1607 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1608 unsigned long irqflags; 1609 1610 spin_lock_irqsave(&display->irq.lock, irqflags); 1611 i915_enable_pipestat(display, pipe, 1612 PIPE_START_VBLANK_INTERRUPT_STATUS); 1613 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1614 1615 return 0; 1616 } 1617 1618 void i965_disable_vblank(struct drm_crtc *crtc) 1619 { 1620 struct intel_display *display = to_intel_display(crtc->dev); 1621 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1622 unsigned long irqflags; 1623 1624 spin_lock_irqsave(&display->irq.lock, irqflags); 1625 i915_disable_pipestat(display, pipe, 1626 PIPE_START_VBLANK_INTERRUPT_STATUS); 1627 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1628 } 1629 1630 int ilk_enable_vblank(struct drm_crtc *crtc) 1631 { 1632 struct intel_display *display = to_intel_display(crtc->dev); 1633 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1634 unsigned long irqflags; 1635 u32 bit = DISPLAY_VER(display) >= 7 ? 1636 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1637 1638 spin_lock_irqsave(&display->irq.lock, irqflags); 1639 ilk_enable_display_irq(display, bit); 1640 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1641 1642 /* Even though there is no DMC, frame counter can get stuck when 1643 * PSR is active as no frames are generated. 1644 */ 1645 if (HAS_PSR(display)) 1646 drm_crtc_vblank_restore(crtc); 1647 1648 return 0; 1649 } 1650 1651 void ilk_disable_vblank(struct drm_crtc *crtc) 1652 { 1653 struct intel_display *display = to_intel_display(crtc->dev); 1654 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1655 unsigned long irqflags; 1656 u32 bit = DISPLAY_VER(display) >= 7 ? 1657 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1658 1659 spin_lock_irqsave(&display->irq.lock, irqflags); 1660 ilk_disable_display_irq(display, bit); 1661 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1662 } 1663 1664 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, 1665 bool enable) 1666 { 1667 struct intel_display *display = to_intel_display(intel_crtc); 1668 enum port port; 1669 1670 if (!(intel_crtc->mode_flags & 1671 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) 1672 return false; 1673 1674 /* for dual link cases we consider TE from slave */ 1675 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) 1676 port = PORT_B; 1677 else 1678 port = PORT_A; 1679 1680 intel_de_rmw(display, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, enable ? 0 : DSI_TE_EVENT); 1681 1682 intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0); 1683 1684 return true; 1685 } 1686 1687 static void intel_display_vblank_notify_work(struct work_struct *work) 1688 { 1689 struct intel_display *display = 1690 container_of(work, typeof(*display), irq.vblank_notify_work); 1691 int vblank_enable_count = READ_ONCE(display->irq.vblank_enable_count); 1692 1693 intel_psr_notify_vblank_enable_disable(display, vblank_enable_count); 1694 } 1695 1696 int bdw_enable_vblank(struct drm_crtc *_crtc) 1697 { 1698 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1699 struct intel_display *display = to_intel_display(crtc); 1700 enum pipe pipe = crtc->pipe; 1701 unsigned long irqflags; 1702 1703 if (gen11_dsi_configure_te(crtc, true)) 1704 return 0; 1705 1706 if (crtc->vblank_psr_notify && display->irq.vblank_enable_count++ == 0) 1707 schedule_work(&display->irq.vblank_notify_work); 1708 1709 spin_lock_irqsave(&display->irq.lock, irqflags); 1710 bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK); 1711 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1712 1713 /* Even if there is no DMC, frame counter can get stuck when 1714 * PSR is active as no frames are generated, so check only for PSR. 1715 */ 1716 if (HAS_PSR(display)) 1717 drm_crtc_vblank_restore(&crtc->base); 1718 1719 return 0; 1720 } 1721 1722 void bdw_disable_vblank(struct drm_crtc *_crtc) 1723 { 1724 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1725 struct intel_display *display = to_intel_display(crtc); 1726 enum pipe pipe = crtc->pipe; 1727 unsigned long irqflags; 1728 1729 if (gen11_dsi_configure_te(crtc, false)) 1730 return; 1731 1732 spin_lock_irqsave(&display->irq.lock, irqflags); 1733 bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK); 1734 spin_unlock_irqrestore(&display->irq.lock, irqflags); 1735 1736 if (crtc->vblank_psr_notify && --display->irq.vblank_enable_count == 0) 1737 schedule_work(&display->irq.vblank_notify_work); 1738 } 1739 1740 static u32 vlv_dpinvgtt_pipe_fault_mask(enum pipe pipe) 1741 { 1742 switch (pipe) { 1743 case PIPE_A: 1744 return SPRITEB_INVALID_GTT_STATUS | 1745 SPRITEA_INVALID_GTT_STATUS | 1746 PLANEA_INVALID_GTT_STATUS | 1747 CURSORA_INVALID_GTT_STATUS; 1748 case PIPE_B: 1749 return SPRITED_INVALID_GTT_STATUS | 1750 SPRITEC_INVALID_GTT_STATUS | 1751 PLANEB_INVALID_GTT_STATUS | 1752 CURSORB_INVALID_GTT_STATUS; 1753 case PIPE_C: 1754 return SPRITEF_INVALID_GTT_STATUS | 1755 SPRITEE_INVALID_GTT_STATUS | 1756 PLANEC_INVALID_GTT_STATUS | 1757 CURSORC_INVALID_GTT_STATUS; 1758 default: 1759 return 0; 1760 } 1761 } 1762 1763 static const struct pipe_fault_handler vlv_pipe_fault_handlers[] = { 1764 { .fault = SPRITEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, }, 1765 { .fault = SPRITEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1766 { .fault = PLANEA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1767 { .fault = CURSORA_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1768 { .fault = SPRITED_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, }, 1769 { .fault = SPRITEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1770 { .fault = PLANEB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1771 { .fault = CURSORB_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1772 { .fault = SPRITEF_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE1, }, 1773 { .fault = SPRITEE_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_SPRITE0, }, 1774 { .fault = PLANEC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_PRIMARY, }, 1775 { .fault = CURSORC_INVALID_GTT_STATUS, .handle = handle_plane_fault, .plane_id = PLANE_CURSOR, }, 1776 {} 1777 }; 1778 1779 static void vlv_page_table_error_irq_ack(struct intel_display *display, u32 *dpinvgtt) 1780 { 1781 u32 status, enable, tmp; 1782 1783 tmp = intel_de_read(display, DPINVGTT); 1784 1785 enable = tmp >> 16; 1786 status = tmp & 0xffff; 1787 1788 /* 1789 * Despite what the docs claim, the status bits seem to get 1790 * stuck permanently (similar the old PGTBL_ER register), so 1791 * we have to disable and ignore them once set. They do get 1792 * reset if the display power well goes down, so no need to 1793 * track the enable mask explicitly. 1794 */ 1795 *dpinvgtt = status & enable; 1796 enable &= ~status; 1797 1798 /* customary ack+disable then re-enable to guarantee an edge */ 1799 intel_de_write(display, DPINVGTT, status); 1800 intel_de_write(display, DPINVGTT, enable << 16); 1801 } 1802 1803 static void vlv_page_table_error_irq_handler(struct intel_display *display, u32 dpinvgtt) 1804 { 1805 enum pipe pipe; 1806 1807 for_each_pipe(display, pipe) { 1808 u32 fault_errors; 1809 1810 fault_errors = dpinvgtt & vlv_dpinvgtt_pipe_fault_mask(pipe); 1811 if (fault_errors) 1812 intel_pipe_fault_irq_handler(display, vlv_pipe_fault_handlers, 1813 pipe, fault_errors); 1814 } 1815 } 1816 1817 void vlv_display_error_irq_ack(struct intel_display *display, 1818 u32 *eir, u32 *dpinvgtt) 1819 { 1820 u32 emr; 1821 1822 *eir = intel_de_read(display, VLV_EIR); 1823 1824 if (*eir & VLV_ERROR_PAGE_TABLE) 1825 vlv_page_table_error_irq_ack(display, dpinvgtt); 1826 1827 intel_de_write(display, VLV_EIR, *eir); 1828 1829 /* 1830 * Toggle all EMR bits to make sure we get an edge 1831 * in the ISR master error bit if we don't clear 1832 * all the EIR bits. 1833 */ 1834 emr = intel_de_read(display, VLV_EMR); 1835 intel_de_write(display, VLV_EMR, 0xffffffff); 1836 intel_de_write(display, VLV_EMR, emr); 1837 } 1838 1839 void vlv_display_error_irq_handler(struct intel_display *display, 1840 u32 eir, u32 dpinvgtt) 1841 { 1842 drm_dbg(display->drm, "Master Error, EIR 0x%08x\n", eir); 1843 1844 if (eir & VLV_ERROR_PAGE_TABLE) 1845 vlv_page_table_error_irq_handler(display, dpinvgtt); 1846 } 1847 1848 static void _vlv_display_irq_reset(struct intel_display *display) 1849 { 1850 struct drm_i915_private *dev_priv = to_i915(display->drm); 1851 1852 if (display->platform.cherryview) 1853 intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 1854 else 1855 intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); 1856 1857 gen2_error_reset(to_intel_uncore(display->drm), 1858 VLV_ERROR_REGS); 1859 1860 i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0); 1861 intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0); 1862 1863 i9xx_pipestat_irq_reset(display); 1864 1865 intel_display_irq_regs_reset(display, VLV_IRQ_REGS); 1866 dev_priv->irq_mask = ~0u; 1867 } 1868 1869 void vlv_display_irq_reset(struct intel_display *display) 1870 { 1871 spin_lock_irq(&display->irq.lock); 1872 if (display->irq.vlv_display_irqs_enabled) 1873 _vlv_display_irq_reset(display); 1874 spin_unlock_irq(&display->irq.lock); 1875 } 1876 1877 void i9xx_display_irq_reset(struct intel_display *display) 1878 { 1879 if (HAS_HOTPLUG(display)) { 1880 i915_hotplug_interrupt_update(display, 0xffffffff, 0); 1881 intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0); 1882 } 1883 1884 i9xx_pipestat_irq_reset(display); 1885 } 1886 1887 void i915_display_irq_postinstall(struct intel_display *display) 1888 { 1889 /* 1890 * Interrupt setup is already guaranteed to be single-threaded, this is 1891 * just to make the assert_spin_locked check happy. 1892 */ 1893 spin_lock_irq(&display->irq.lock); 1894 i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 1895 i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 1896 spin_unlock_irq(&display->irq.lock); 1897 1898 i915_enable_asle_pipestat(display); 1899 } 1900 1901 void i965_display_irq_postinstall(struct intel_display *display) 1902 { 1903 /* 1904 * Interrupt setup is already guaranteed to be single-threaded, this is 1905 * just to make the assert_spin_locked check happy. 1906 */ 1907 spin_lock_irq(&display->irq.lock); 1908 i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 1909 i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 1910 i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 1911 spin_unlock_irq(&display->irq.lock); 1912 1913 i915_enable_asle_pipestat(display); 1914 } 1915 1916 static u32 vlv_error_mask(void) 1917 { 1918 /* TODO enable other errors too? */ 1919 return VLV_ERROR_PAGE_TABLE; 1920 } 1921 1922 static void _vlv_display_irq_postinstall(struct intel_display *display) 1923 { 1924 struct drm_i915_private *dev_priv = to_i915(display->drm); 1925 u32 pipestat_mask; 1926 u32 enable_mask; 1927 enum pipe pipe; 1928 1929 if (display->platform.cherryview) 1930 intel_de_write(display, DPINVGTT, 1931 DPINVGTT_STATUS_MASK_CHV | 1932 DPINVGTT_EN_MASK_CHV); 1933 else 1934 intel_de_write(display, DPINVGTT, 1935 DPINVGTT_STATUS_MASK_VLV | 1936 DPINVGTT_EN_MASK_VLV); 1937 1938 gen2_error_init(to_intel_uncore(display->drm), 1939 VLV_ERROR_REGS, ~vlv_error_mask()); 1940 1941 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 1942 1943 i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 1944 for_each_pipe(display, pipe) 1945 i915_enable_pipestat(display, pipe, pipestat_mask); 1946 1947 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 1948 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1949 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1950 I915_LPE_PIPE_A_INTERRUPT | 1951 I915_LPE_PIPE_B_INTERRUPT | 1952 I915_MASTER_ERROR_INTERRUPT; 1953 1954 if (display->platform.cherryview) 1955 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 1956 I915_LPE_PIPE_C_INTERRUPT; 1957 1958 drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u); 1959 1960 dev_priv->irq_mask = ~enable_mask; 1961 1962 intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask); 1963 } 1964 1965 void vlv_display_irq_postinstall(struct intel_display *display) 1966 { 1967 spin_lock_irq(&display->irq.lock); 1968 if (display->irq.vlv_display_irqs_enabled) 1969 _vlv_display_irq_postinstall(display); 1970 spin_unlock_irq(&display->irq.lock); 1971 } 1972 1973 void ibx_display_irq_reset(struct intel_display *display) 1974 { 1975 struct drm_i915_private *i915 = to_i915(display->drm); 1976 1977 if (HAS_PCH_NOP(i915)) 1978 return; 1979 1980 gen2_irq_reset(to_intel_uncore(display->drm), SDE_IRQ_REGS); 1981 1982 if (HAS_PCH_CPT(i915) || HAS_PCH_LPT(i915)) 1983 intel_de_write(display, SERR_INT, 0xffffffff); 1984 } 1985 1986 void gen8_display_irq_reset(struct intel_display *display) 1987 { 1988 struct drm_i915_private *i915 = to_i915(display->drm); 1989 enum pipe pipe; 1990 1991 if (!HAS_DISPLAY(display)) 1992 return; 1993 1994 intel_de_write(display, EDP_PSR_IMR, 0xffffffff); 1995 intel_de_write(display, EDP_PSR_IIR, 0xffffffff); 1996 1997 for_each_pipe(display, pipe) 1998 if (intel_display_power_is_enabled(display, 1999 POWER_DOMAIN_PIPE(pipe))) 2000 intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); 2001 2002 intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS); 2003 intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS); 2004 2005 if (HAS_PCH_SPLIT(i915)) 2006 ibx_display_irq_reset(display); 2007 } 2008 2009 void gen11_display_irq_reset(struct intel_display *display) 2010 { 2011 enum pipe pipe; 2012 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 2013 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 2014 2015 if (!HAS_DISPLAY(display)) 2016 return; 2017 2018 intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0); 2019 2020 if (DISPLAY_VER(display) >= 12) { 2021 enum transcoder trans; 2022 2023 for_each_cpu_transcoder_masked(display, trans, trans_mask) { 2024 enum intel_display_power_domain domain; 2025 2026 domain = POWER_DOMAIN_TRANSCODER(trans); 2027 if (!intel_display_power_is_enabled(display, domain)) 2028 continue; 2029 2030 intel_de_write(display, 2031 TRANS_PSR_IMR(display, trans), 2032 0xffffffff); 2033 intel_de_write(display, 2034 TRANS_PSR_IIR(display, trans), 2035 0xffffffff); 2036 } 2037 } else { 2038 intel_de_write(display, EDP_PSR_IMR, 0xffffffff); 2039 intel_de_write(display, EDP_PSR_IIR, 0xffffffff); 2040 } 2041 2042 for_each_pipe(display, pipe) 2043 if (intel_display_power_is_enabled(display, 2044 POWER_DOMAIN_PIPE(pipe))) 2045 intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); 2046 2047 intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS); 2048 intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS); 2049 2050 if (DISPLAY_VER(display) >= 14) 2051 intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS); 2052 else 2053 intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS); 2054 2055 if (INTEL_PCH_TYPE(display) >= PCH_ICP) 2056 intel_display_irq_regs_reset(display, SDE_IRQ_REGS); 2057 } 2058 2059 void gen8_irq_power_well_post_enable(struct intel_display *display, 2060 u8 pipe_mask) 2061 { 2062 struct drm_i915_private *dev_priv = to_i915(display->drm); 2063 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | 2064 gen8_de_pipe_flip_done_mask(display); 2065 enum pipe pipe; 2066 2067 spin_lock_irq(&display->irq.lock); 2068 2069 if (!intel_irqs_enabled(dev_priv)) { 2070 spin_unlock_irq(&display->irq.lock); 2071 return; 2072 } 2073 2074 for_each_pipe_masked(display, pipe, pipe_mask) 2075 intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), 2076 display->irq.de_irq_mask[pipe], 2077 ~display->irq.de_irq_mask[pipe] | extra_ier); 2078 2079 spin_unlock_irq(&display->irq.lock); 2080 } 2081 2082 void gen8_irq_power_well_pre_disable(struct intel_display *display, 2083 u8 pipe_mask) 2084 { 2085 struct drm_i915_private *dev_priv = to_i915(display->drm); 2086 enum pipe pipe; 2087 2088 spin_lock_irq(&display->irq.lock); 2089 2090 if (!intel_irqs_enabled(dev_priv)) { 2091 spin_unlock_irq(&display->irq.lock); 2092 return; 2093 } 2094 2095 for_each_pipe_masked(display, pipe, pipe_mask) 2096 intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); 2097 2098 spin_unlock_irq(&display->irq.lock); 2099 2100 /* make sure we're done processing display irqs */ 2101 intel_synchronize_irq(dev_priv); 2102 } 2103 2104 /* 2105 * SDEIER is also touched by the interrupt handler to work around missed PCH 2106 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2107 * instead we unconditionally enable all PCH interrupt sources here, but then 2108 * only unmask them as needed with SDEIMR. 2109 * 2110 * Note that we currently do this after installing the interrupt handler, 2111 * but before we enable the master interrupt. That should be sufficient 2112 * to avoid races with the irq handler, assuming we have MSI. Shared legacy 2113 * interrupts could still race. 2114 */ 2115 static void ibx_irq_postinstall(struct intel_display *display) 2116 { 2117 u32 mask; 2118 2119 if (HAS_PCH_NOP(display)) 2120 return; 2121 2122 if (HAS_PCH_IBX(display)) 2123 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 2124 else if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display)) 2125 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 2126 else 2127 mask = SDE_GMBUS_CPT; 2128 2129 intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff); 2130 } 2131 2132 void valleyview_enable_display_irqs(struct intel_display *display) 2133 { 2134 struct drm_i915_private *dev_priv = to_i915(display->drm); 2135 2136 spin_lock_irq(&display->irq.lock); 2137 2138 if (display->irq.vlv_display_irqs_enabled) 2139 goto out; 2140 2141 display->irq.vlv_display_irqs_enabled = true; 2142 2143 if (intel_irqs_enabled(dev_priv)) { 2144 _vlv_display_irq_reset(display); 2145 _vlv_display_irq_postinstall(display); 2146 } 2147 2148 out: 2149 spin_unlock_irq(&display->irq.lock); 2150 } 2151 2152 void valleyview_disable_display_irqs(struct intel_display *display) 2153 { 2154 struct drm_i915_private *dev_priv = to_i915(display->drm); 2155 2156 spin_lock_irq(&display->irq.lock); 2157 2158 if (!display->irq.vlv_display_irqs_enabled) 2159 goto out; 2160 2161 display->irq.vlv_display_irqs_enabled = false; 2162 2163 if (intel_irqs_enabled(dev_priv)) 2164 _vlv_display_irq_reset(display); 2165 out: 2166 spin_unlock_irq(&display->irq.lock); 2167 } 2168 2169 void ilk_de_irq_postinstall(struct intel_display *display) 2170 { 2171 struct drm_i915_private *i915 = to_i915(display->drm); 2172 2173 u32 display_mask, extra_mask; 2174 2175 if (DISPLAY_VER(display) >= 7) { 2176 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2177 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 2178 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2179 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 2180 DE_PLANE_FLIP_DONE_IVB(PLANE_C) | 2181 DE_PLANE_FLIP_DONE_IVB(PLANE_B) | 2182 DE_PLANE_FLIP_DONE_IVB(PLANE_A) | 2183 DE_DP_A_HOTPLUG_IVB); 2184 } else { 2185 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | 2186 DE_PCH_EVENT | DE_GTT_FAULT | 2187 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 2188 DE_PIPEA_CRC_DONE | DE_POISON); 2189 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 2190 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 2191 DE_PLANE_FLIP_DONE(PLANE_A) | 2192 DE_PLANE_FLIP_DONE(PLANE_B) | 2193 DE_DP_A_HOTPLUG); 2194 } 2195 2196 if (display->platform.haswell) { 2197 intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR); 2198 display_mask |= DE_EDP_PSR_INT_HSW; 2199 } 2200 2201 if (display->platform.ironlake && display->platform.mobile) 2202 extra_mask |= DE_PCU_EVENT; 2203 2204 i915->irq_mask = ~display_mask; 2205 2206 ibx_irq_postinstall(display); 2207 2208 intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask, 2209 display_mask | extra_mask); 2210 } 2211 2212 static void mtp_irq_postinstall(struct intel_display *display); 2213 static void icp_irq_postinstall(struct intel_display *display); 2214 2215 void gen8_de_irq_postinstall(struct intel_display *display) 2216 { 2217 u32 de_pipe_masked = gen8_de_pipe_fault_mask(display) | 2218 GEN8_PIPE_CDCLK_CRC_DONE; 2219 u32 de_pipe_enables; 2220 u32 de_port_masked = gen8_de_port_aux_mask(display); 2221 u32 de_port_enables; 2222 u32 de_misc_masked = GEN8_DE_EDP_PSR; 2223 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 2224 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 2225 enum pipe pipe; 2226 2227 if (!HAS_DISPLAY(display)) 2228 return; 2229 2230 if (DISPLAY_VER(display) >= 14) 2231 mtp_irq_postinstall(display); 2232 else if (INTEL_PCH_TYPE(display) >= PCH_ICP) 2233 icp_irq_postinstall(display); 2234 else if (HAS_PCH_SPLIT(display)) 2235 ibx_irq_postinstall(display); 2236 2237 if (DISPLAY_VER(display) < 11) 2238 de_misc_masked |= GEN8_DE_MISC_GSE; 2239 2240 if (display->platform.geminilake || display->platform.broxton) 2241 de_port_masked |= BXT_DE_PORT_GMBUS; 2242 2243 if (DISPLAY_VER(display) >= 14) { 2244 de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR | 2245 XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT; 2246 } else if (DISPLAY_VER(display) >= 11) { 2247 enum port port; 2248 2249 if (intel_bios_is_dsi_present(display, &port)) 2250 de_port_masked |= DSI0_TE | DSI1_TE; 2251 } 2252 2253 if (HAS_DBUF_OVERLAP_DETECTION(display)) 2254 de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED; 2255 2256 if (HAS_DSB(display)) 2257 de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) | 2258 GEN12_DSB_INT(INTEL_DSB_1) | 2259 GEN12_DSB_INT(INTEL_DSB_2); 2260 2261 de_pipe_enables = de_pipe_masked | 2262 GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | 2263 gen8_de_pipe_flip_done_mask(display); 2264 2265 de_port_enables = de_port_masked; 2266 if (display->platform.geminilake || display->platform.broxton) 2267 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 2268 else if (display->platform.broadwell) 2269 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; 2270 2271 if (DISPLAY_VER(display) >= 12) { 2272 enum transcoder trans; 2273 2274 for_each_cpu_transcoder_masked(display, trans, trans_mask) { 2275 enum intel_display_power_domain domain; 2276 2277 domain = POWER_DOMAIN_TRANSCODER(trans); 2278 if (!intel_display_power_is_enabled(display, domain)) 2279 continue; 2280 2281 intel_display_irq_regs_assert_irr_is_zero(display, 2282 TRANS_PSR_IIR(display, trans)); 2283 } 2284 } else { 2285 intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR); 2286 } 2287 2288 for_each_pipe(display, pipe) { 2289 display->irq.de_irq_mask[pipe] = ~de_pipe_masked; 2290 2291 if (intel_display_power_is_enabled(display, 2292 POWER_DOMAIN_PIPE(pipe))) 2293 intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), 2294 display->irq.de_irq_mask[pipe], 2295 de_pipe_enables); 2296 } 2297 2298 intel_display_irq_regs_init(display, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked, 2299 de_port_enables); 2300 intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, 2301 de_misc_masked); 2302 2303 if (IS_DISPLAY_VER(display, 11, 13)) { 2304 u32 de_hpd_masked = 0; 2305 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 2306 GEN11_DE_TBT_HOTPLUG_MASK; 2307 2308 intel_display_irq_regs_init(display, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked, 2309 de_hpd_enables); 2310 } 2311 } 2312 2313 static void mtp_irq_postinstall(struct intel_display *display) 2314 { 2315 u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT; 2316 u32 de_hpd_mask = XELPDP_AUX_TC_MASK; 2317 u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | 2318 XELPDP_TBT_HOTPLUG_MASK; 2319 2320 intel_display_irq_regs_init(display, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask, 2321 de_hpd_enables); 2322 2323 intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff); 2324 } 2325 2326 static void icp_irq_postinstall(struct intel_display *display) 2327 { 2328 u32 mask = SDE_GMBUS_ICP; 2329 2330 intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff); 2331 } 2332 2333 void gen11_de_irq_postinstall(struct intel_display *display) 2334 { 2335 if (!HAS_DISPLAY(display)) 2336 return; 2337 2338 gen8_de_irq_postinstall(display); 2339 2340 intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 2341 } 2342 2343 void dg1_de_irq_postinstall(struct intel_display *display) 2344 { 2345 if (!HAS_DISPLAY(display)) 2346 return; 2347 2348 gen8_de_irq_postinstall(display); 2349 intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 2350 } 2351 2352 void intel_display_irq_init(struct intel_display *display) 2353 { 2354 spin_lock_init(&display->irq.lock); 2355 2356 display->drm->vblank_disable_immediate = true; 2357 2358 intel_hotplug_irq_init(display); 2359 2360 INIT_WORK(&display->irq.vblank_notify_work, 2361 intel_display_vblank_notify_work); 2362 } 2363 2364 struct intel_display_irq_snapshot { 2365 u32 derrmr; 2366 }; 2367 2368 struct intel_display_irq_snapshot * 2369 intel_display_irq_snapshot_capture(struct intel_display *display) 2370 { 2371 struct intel_display_irq_snapshot *snapshot; 2372 2373 snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC); 2374 if (!snapshot) 2375 return NULL; 2376 2377 if (DISPLAY_VER(display) >= 6 && DISPLAY_VER(display) < 20 && !HAS_GMCH(display)) 2378 snapshot->derrmr = intel_de_read(display, DERRMR); 2379 2380 return snapshot; 2381 } 2382 2383 void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot, 2384 struct drm_printer *p) 2385 { 2386 if (!snapshot) 2387 return; 2388 2389 drm_printf(p, "DERRMR: 0x%08x\n", snapshot->derrmr); 2390 } 2391