Lines Matching +full:0 +full:xe
30 #define IMR(offset) XE_REG(offset + 0x4)
31 #define IIR(offset) XE_REG(offset + 0x8)
32 #define IER(offset) XE_REG(offset + 0xc)
34 static int xe_irq_msix_init(struct xe_device *xe);
35 static void xe_irq_msix_free(struct xe_device *xe);
36 static int xe_irq_msix_request_irqs(struct xe_device *xe);
37 static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
43 if (val == 0) in assert_iir_is_zero()
46 drm_WARN(&mmio->tile->xe->drm, 1, in assert_iir_is_zero()
47 "Interrupt register 0x%x is not zero: 0x%08x\n", in assert_iir_is_zero()
49 xe_mmio_write32(mmio, reg, 0xffffffff); in assert_iir_is_zero()
51 xe_mmio_write32(mmio, reg, 0xffffffff); in assert_iir_is_zero()
81 xe_mmio_write32(mmio, IMR(irqregs), ~0); in mask_and_disable()
85 xe_mmio_write32(mmio, IER(irqregs), 0); in mask_and_disable()
88 xe_mmio_write32(mmio, IIR(irqregs), ~0); in mask_and_disable()
90 xe_mmio_write32(mmio, IIR(irqregs), ~0); in mask_and_disable()
94 static u32 xelp_intr_disable(struct xe_device *xe) in xelp_intr_disable() argument
96 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in xelp_intr_disable()
98 xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0); in xelp_intr_disable()
110 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl) in gu_misc_irq_ack() argument
112 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in gu_misc_irq_ack()
116 return 0; in gu_misc_irq_ack()
125 static inline void xelp_intr_enable(struct xe_device *xe, bool stall) in xelp_intr_enable() argument
127 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in xelp_intr_enable()
137 struct xe_device *xe = gt_to_xe(gt); in xe_irq_enable_hwe() local
141 u32 gsc_mask = 0; in xe_irq_enable_hwe()
142 u32 heci_mask = 0; in xe_irq_enable_hwe()
144 if (xe_device_uses_memirq(xe)) in xe_irq_enable_hwe()
147 if (xe_device_uc_enabled(xe)) { in xe_irq_enable_hwe()
180 if (ccs_mask & (BIT(0)|BIT(1))) in xe_irq_enable_hwe()
186 if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) { in xe_irq_enable_hwe()
202 } else if (xe->info.has_heci_gscfi) { in xe_irq_enable_hwe()
213 if (xe_pxp_is_supported(xe)) { in xe_irq_enable_hwe()
225 gt_engine_identity(struct xe_device *xe, in gt_engine_identity() argument
233 lockdep_assert_held(&xe->irq.lock); in gt_engine_identity()
248 drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", in gt_engine_identity()
250 return 0; in gt_engine_identity()
272 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", in gt_other_irq_handler()
281 struct xe_device *xe = tile_to_xe(tile); in pick_engine_gt() local
283 if (MEDIA_VER(xe) < 13) in pick_engine_gt()
309 struct xe_device *xe = tile_to_xe(tile); in gt_irq_handler() local
316 spin_lock(&xe->irq.lock); in gt_irq_handler()
318 for (bank = 0; bank < 2; bank++) { in gt_irq_handler()
324 identity[bit] = gt_engine_identity(xe, mmio, bank, bit); in gt_irq_handler()
348 if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE) in gt_irq_handler()
349 xe_heci_gsc_irq_handler(xe, intr_vec); in gt_irq_handler()
351 xe_pxp_irq_handler(xe, intr_vec); in gt_irq_handler()
358 spin_unlock(&xe->irq.lock); in gt_irq_handler()
367 struct xe_device *xe = arg; in xelp_irq_handler() local
368 struct xe_tile *tile = xe_device_get_root_tile(xe); in xelp_irq_handler()
373 if (!atomic_read(&xe->irq.enabled)) in xelp_irq_handler()
376 master_ctl = xelp_intr_disable(xe); in xelp_irq_handler()
378 xelp_intr_enable(xe, false); in xelp_irq_handler()
384 xe_display_irq_handler(xe, master_ctl); in xelp_irq_handler()
386 gu_misc_iir = gu_misc_irq_ack(xe, master_ctl); in xelp_irq_handler()
388 xelp_intr_enable(xe, false); in xelp_irq_handler()
390 xe_display_irq_enable(xe, gu_misc_iir); in xelp_irq_handler()
395 static u32 dg1_intr_disable(struct xe_device *xe) in dg1_intr_disable() argument
397 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in dg1_intr_disable()
401 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, 0); in dg1_intr_disable()
406 return 0; in dg1_intr_disable()
413 static void dg1_intr_enable(struct xe_device *xe, bool stall) in dg1_intr_enable() argument
415 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in dg1_intr_enable()
429 struct xe_device *xe = arg; in dg1_irq_handler() local
431 u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0; in dg1_irq_handler()
438 if (!atomic_read(&xe->irq.enabled)) in dg1_irq_handler()
441 master_tile_ctl = dg1_intr_disable(xe); in dg1_irq_handler()
443 dg1_intr_enable(xe, false); in dg1_irq_handler()
447 for_each_tile(tile, xe, id) { in dg1_irq_handler()
450 if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0) in dg1_irq_handler()
460 if (master_ctl == REG_GENMASK(31, 0)) { in dg1_irq_handler()
475 if (id == 0) { in dg1_irq_handler()
476 if (xe->info.has_heci_cscfi) in dg1_irq_handler()
477 xe_heci_csc_irq_handler(xe, master_ctl); in dg1_irq_handler()
478 xe_display_irq_handler(xe, master_ctl); in dg1_irq_handler()
479 gu_misc_iir = gu_misc_irq_ack(xe, master_ctl); in dg1_irq_handler()
483 dg1_intr_enable(xe, false); in dg1_irq_handler()
484 xe_display_irq_enable(xe, gu_misc_iir); in dg1_irq_handler()
499 xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0); in gt_irq_reset()
500 xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, 0); in gt_irq_reset()
502 xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, 0); in gt_irq_reset()
505 xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~0); in gt_irq_reset()
506 xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~0); in gt_irq_reset()
508 xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~0); in gt_irq_reset()
510 xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~0); in gt_irq_reset()
512 xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~0); in gt_irq_reset()
514 xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~0); in gt_irq_reset()
515 xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~0); in gt_irq_reset()
516 xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~0); in gt_irq_reset()
517 xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~0); in gt_irq_reset()
518 if (ccs_mask & (BIT(0)|BIT(1))) in gt_irq_reset()
519 xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0); in gt_irq_reset()
521 xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0); in gt_irq_reset()
526 xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0); in gt_irq_reset()
527 xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0); in gt_irq_reset()
528 xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0); in gt_irq_reset()
529 xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, 0); in gt_irq_reset()
530 xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~0); in gt_irq_reset()
533 xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0); in gt_irq_reset()
534 xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_MASK, ~0); in gt_irq_reset()
535 xe_mmio_write32(mmio, GUC_SG_INTR_ENABLE, 0); in gt_irq_reset()
536 xe_mmio_write32(mmio, GUC_SG_INTR_MASK, ~0); in gt_irq_reset()
553 if (tile->id == 0) in dg1_irq_reset()
568 xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0); in dg1_irq_reset_mstr()
571 static void vf_irq_reset(struct xe_device *xe) in vf_irq_reset() argument
576 xe_assert(xe, IS_SRIOV_VF(xe)); in vf_irq_reset()
578 if (GRAPHICS_VERx100(xe) < 1210) in vf_irq_reset()
579 xelp_intr_disable(xe); in vf_irq_reset()
581 xe_assert(xe, xe_device_has_memirq(xe)); in vf_irq_reset()
583 for_each_tile(tile, xe, id) { in vf_irq_reset()
584 if (xe_device_has_memirq(xe)) in vf_irq_reset()
591 static void xe_irq_reset(struct xe_device *xe) in xe_irq_reset() argument
596 if (IS_SRIOV_VF(xe)) in xe_irq_reset()
597 return vf_irq_reset(xe); in xe_irq_reset()
599 if (xe_device_uses_memirq(xe)) { in xe_irq_reset()
600 for_each_tile(tile, xe, id) in xe_irq_reset()
604 for_each_tile(tile, xe, id) { in xe_irq_reset()
605 if (GRAPHICS_VERx100(xe) >= 1210) in xe_irq_reset()
611 tile = xe_device_get_root_tile(xe); in xe_irq_reset()
613 xe_display_irq_reset(xe); in xe_irq_reset()
620 if (GRAPHICS_VERx100(xe) >= 1210) { in xe_irq_reset()
621 for_each_tile(tile, xe, id) in xe_irq_reset()
626 static void vf_irq_postinstall(struct xe_device *xe) in vf_irq_postinstall() argument
631 for_each_tile(tile, xe, id) in vf_irq_postinstall()
632 if (xe_device_has_memirq(xe)) in vf_irq_postinstall()
635 if (GRAPHICS_VERx100(xe) < 1210) in vf_irq_postinstall()
636 xelp_intr_enable(xe, true); in vf_irq_postinstall()
638 xe_assert(xe, xe_device_has_memirq(xe)); in vf_irq_postinstall()
641 static void xe_irq_postinstall(struct xe_device *xe) in xe_irq_postinstall() argument
643 if (IS_SRIOV_VF(xe)) in xe_irq_postinstall()
644 return vf_irq_postinstall(xe); in xe_irq_postinstall()
646 if (xe_device_uses_memirq(xe)) { in xe_irq_postinstall()
650 for_each_tile(tile, xe, id) in xe_irq_postinstall()
654 xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe)); in xe_irq_postinstall()
660 unmask_and_enable(xe_device_get_root_tile(xe), in xe_irq_postinstall()
664 if (GRAPHICS_VERx100(xe) >= 1210) in xe_irq_postinstall()
665 dg1_intr_enable(xe, true); in xe_irq_postinstall()
667 xelp_intr_enable(xe, true); in xe_irq_postinstall()
672 struct xe_device *xe = arg; in vf_mem_irq_handler() local
676 if (!atomic_read(&xe->irq.enabled)) in vf_mem_irq_handler()
679 for_each_tile(tile, xe, id) in vf_mem_irq_handler()
685 static irq_handler_t xe_irq_handler(struct xe_device *xe) in xe_irq_handler() argument
687 if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) in xe_irq_handler()
690 if (GRAPHICS_VERx100(xe) >= 1210) in xe_irq_handler()
696 static int xe_irq_msi_request_irqs(struct xe_device *xe) in xe_irq_msi_request_irqs() argument
698 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_irq_msi_request_irqs()
702 irq_handler = xe_irq_handler(xe); in xe_irq_msi_request_irqs()
704 drm_err(&xe->drm, "No supported interrupt handler"); in xe_irq_msi_request_irqs()
708 irq = pci_irq_vector(pdev, 0); in xe_irq_msi_request_irqs()
709 err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe); in xe_irq_msi_request_irqs()
710 if (err < 0) { in xe_irq_msi_request_irqs()
711 drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err); in xe_irq_msi_request_irqs()
715 return 0; in xe_irq_msi_request_irqs()
718 static void xe_irq_msi_free(struct xe_device *xe) in xe_irq_msi_free() argument
720 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_irq_msi_free()
723 irq = pci_irq_vector(pdev, 0); in xe_irq_msi_free()
724 free_irq(irq, xe); in xe_irq_msi_free()
729 struct xe_device *xe = arg; in irq_uninstall() local
731 if (!atomic_xchg(&xe->irq.enabled, 0)) in irq_uninstall()
734 xe_irq_reset(xe); in irq_uninstall()
736 if (xe_device_has_msix(xe)) in irq_uninstall()
737 xe_irq_msix_free(xe); in irq_uninstall()
739 xe_irq_msi_free(xe); in irq_uninstall()
742 int xe_irq_init(struct xe_device *xe) in xe_irq_init() argument
744 spin_lock_init(&xe->irq.lock); in xe_irq_init()
746 return xe_irq_msix_init(xe); in xe_irq_init()
749 int xe_irq_install(struct xe_device *xe) in xe_irq_install() argument
751 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_irq_install()
756 xe_irq_reset(xe); in xe_irq_install()
758 if (xe_device_has_msix(xe)) { in xe_irq_install()
759 nvec = xe->irq.msix.nvec; in xe_irq_install()
764 if (err < 0) { in xe_irq_install()
765 drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err); in xe_irq_install()
769 err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) : in xe_irq_install()
770 xe_irq_msi_request_irqs(xe); in xe_irq_install()
774 atomic_set(&xe->irq.enabled, 1); in xe_irq_install()
776 xe_irq_postinstall(xe); in xe_irq_install()
778 return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe); in xe_irq_install()
781 static void xe_irq_msi_synchronize_irq(struct xe_device *xe) in xe_irq_msi_synchronize_irq() argument
783 synchronize_irq(to_pci_dev(xe->drm.dev)->irq); in xe_irq_msi_synchronize_irq()
786 void xe_irq_suspend(struct xe_device *xe) in xe_irq_suspend() argument
788 atomic_set(&xe->irq.enabled, 0); /* no new irqs */ in xe_irq_suspend()
791 if (xe_device_has_msix(xe)) in xe_irq_suspend()
792 xe_irq_msix_synchronize_irq(xe); in xe_irq_suspend()
794 xe_irq_msi_synchronize_irq(xe); in xe_irq_suspend()
795 xe_irq_reset(xe); /* turn irqs off */ in xe_irq_suspend()
798 void xe_irq_resume(struct xe_device *xe) in xe_irq_resume() argument
808 atomic_set(&xe->irq.enabled, 1); in xe_irq_resume()
809 xe_irq_reset(xe); in xe_irq_resume()
810 xe_irq_postinstall(xe); /* turn irqs on */ in xe_irq_resume()
812 for_each_gt(gt, xe, id) in xe_irq_resume()
819 GUC2HOST_MSIX = 0,
825 static int xe_irq_msix_init(struct xe_device *xe) in xe_irq_msix_init() argument
827 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_irq_msix_init()
831 return 0; /* MSI */ in xe_irq_msix_init()
833 if (nvec < 0) { in xe_irq_msix_init()
834 drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec); in xe_irq_msix_init()
838 xe->irq.msix.nvec = nvec; in xe_irq_msix_init()
839 xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC); in xe_irq_msix_init()
840 return 0; in xe_irq_msix_init()
845 struct xe_device *xe = arg; in guc2host_irq_handler() local
849 if (!atomic_read(&xe->irq.enabled)) in guc2host_irq_handler()
852 for_each_tile(tile, xe, id) in guc2host_irq_handler()
862 struct xe_device *xe = arg; in xe_irq_msix_default_hwe_handler() local
869 if (!atomic_read(&xe->irq.enabled)) in xe_irq_msix_default_hwe_handler()
872 for_each_tile(tile, xe, tile_id) { in xe_irq_msix_default_hwe_handler()
877 for_each_gt(gt, xe, gt_id) { in xe_irq_msix_default_hwe_handler()
889 static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf, in xe_irq_msix_alloc_vector() argument
896 limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) : in xe_irq_msix_alloc_vector()
898 ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL); in xe_irq_msix_alloc_vector()
905 return 0; in xe_irq_msix_alloc_vector()
908 static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix) in xe_irq_msix_release_vector() argument
910 xa_erase(&xe->irq.msix.indexes, msix); in xe_irq_msix_release_vector()
913 static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler, in xe_irq_msix_request_irq_internal() argument
916 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_irq_msix_request_irq_internal()
920 if (irq < 0) in xe_irq_msix_request_irq_internal()
924 if (ret < 0) in xe_irq_msix_request_irq_internal()
927 return 0; in xe_irq_msix_request_irq_internal()
930 int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf, in xe_irq_msix_request_irq() argument
935 ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix); in xe_irq_msix_request_irq()
939 ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix); in xe_irq_msix_request_irq()
941 drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix); in xe_irq_msix_request_irq()
942 xe_irq_msix_release_vector(xe, *msix); in xe_irq_msix_request_irq()
946 return 0; in xe_irq_msix_request_irq()
949 void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix) in xe_irq_msix_free_irq() argument
951 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_irq_msix_free_irq()
955 irq_buf = xa_load(&xe->irq.msix.indexes, msix); in xe_irq_msix_free_irq()
960 if (irq < 0) { in xe_irq_msix_free_irq()
961 drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix); in xe_irq_msix_free_irq()
966 xe_irq_msix_release_vector(xe, msix); in xe_irq_msix_free_irq()
969 int xe_irq_msix_request_irqs(struct xe_device *xe) in xe_irq_msix_request_irqs() argument
975 err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe, in xe_irq_msix_request_irqs()
981 err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe, in xe_irq_msix_request_irqs()
984 xe_irq_msix_free_irq(xe, GUC2HOST_MSIX); in xe_irq_msix_request_irqs()
988 return 0; in xe_irq_msix_request_irqs()
991 void xe_irq_msix_free(struct xe_device *xe) in xe_irq_msix_free() argument
996 xa_for_each(&xe->irq.msix.indexes, msix, dummy) in xe_irq_msix_free()
997 xe_irq_msix_free_irq(xe, msix); in xe_irq_msix_free()
998 xa_destroy(&xe->irq.msix.indexes); in xe_irq_msix_free()
1001 void xe_irq_msix_synchronize_irq(struct xe_device *xe) in xe_irq_msix_synchronize_irq() argument
1003 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_irq_msix_synchronize_irq()
1007 xa_for_each(&xe->irq.msix.indexes, msix, dummy) in xe_irq_msix_synchronize_irq()