Lines Matching +full:0 +full:xe

71 	struct xe_device *xe = to_xe_device(dev);  in xe_file_open()  local
89 xef->xe = xe; in xe_file_open()
107 return 0; in xe_file_open()
125 * xe_file_get() - Take a reference to the xe file object
126 * @xef: Pointer to the xe file
128 * Anyone with a pointer to xef must take a reference to the xe file
131 * Return: xe file pointer
140 * xe_file_put() - Drop a reference to the xe file object
141 * @xef: Pointer to the xe file
152 struct xe_device *xe = to_xe_device(dev); in xe_file_close() local
158 xe_pm_runtime_get(xe); in xe_file_close()
177 xe_pm_runtime_put(xe); in xe_file_close()
203 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_ioctl() local
206 if (xe_device_wedged(xe)) in xe_drm_ioctl()
209 ret = xe_pm_runtime_get_ioctl(xe); in xe_drm_ioctl()
210 if (ret >= 0) in xe_drm_ioctl()
212 xe_pm_runtime_put(xe); in xe_drm_ioctl()
221 struct xe_device *xe = to_xe_device(file_priv->minor->dev); in xe_drm_compat_ioctl() local
224 if (xe_device_wedged(xe)) in xe_drm_compat_ioctl()
227 ret = xe_pm_runtime_get_ioctl(xe); in xe_drm_compat_ioctl()
228 if (ret >= 0) in xe_drm_compat_ioctl()
230 xe_pm_runtime_put(xe); in xe_drm_compat_ioctl()
269 #define LAST_DB_PAGE_OFFSET 0x7ff001 in barrier_fault()
270 pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) + in barrier_fault()
305 struct xe_device *xe = to_xe_device(dev); in xe_pci_barrier_mmap() local
307 if (!IS_DGFX(xe)) in xe_pci_barrier_mmap()
325 return 0; in xe_pci_barrier_mmap()
390 struct xe_device *xe = to_xe_device(dev); in xe_device_destroy() local
392 xe_bo_dev_fini(&xe->bo_device); in xe_device_destroy()
394 if (xe->preempt_fence_wq) in xe_device_destroy()
395 destroy_workqueue(xe->preempt_fence_wq); in xe_device_destroy()
397 if (xe->ordered_wq) in xe_device_destroy()
398 destroy_workqueue(xe->ordered_wq); in xe_device_destroy()
400 if (xe->unordered_wq) in xe_device_destroy()
401 destroy_workqueue(xe->unordered_wq); in xe_device_destroy()
403 if (!IS_ERR_OR_NULL(xe->mem.shrinker)) in xe_device_destroy()
404 xe_shrinker_destroy(xe->mem.shrinker); in xe_device_destroy()
406 if (xe->destroy_wq) in xe_device_destroy()
407 destroy_workqueue(xe->destroy_wq); in xe_device_destroy()
409 ttm_device_fini(&xe->ttm); in xe_device_destroy()
415 struct xe_device *xe; in xe_device_create() local
424 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm); in xe_device_create()
425 if (IS_ERR(xe)) in xe_device_create()
426 return xe; in xe_device_create()
428 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, in xe_device_create()
429 xe->drm.anon_inode->i_mapping, in xe_device_create()
430 xe->drm.vma_offset_manager, false, false); in xe_device_create()
434 xe_bo_dev_init(&xe->bo_device); in xe_device_create()
435 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); in xe_device_create()
439 xe->mem.shrinker = xe_shrinker_create(xe); in xe_device_create()
440 if (IS_ERR(xe->mem.shrinker)) in xe_device_create()
441 return ERR_CAST(xe->mem.shrinker); in xe_device_create()
443 xe->info.devid = pdev->device; in xe_device_create()
444 xe->info.revid = pdev->revision; in xe_device_create()
445 xe->info.force_execlist = xe_modparam.force_execlist; in xe_device_create()
447 err = xe_irq_init(xe); in xe_device_create()
451 init_waitqueue_head(&xe->ufence_wq); in xe_device_create()
453 init_rwsem(&xe->usm.lock); in xe_device_create()
455 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); in xe_device_create()
462 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL, in xe_device_create()
464 &xe->usm.next_asid, GFP_KERNEL); in xe_device_create()
465 drm_WARN_ON(&xe->drm, err); in xe_device_create()
466 if (err >= 0) in xe_device_create()
467 xa_erase(&xe->usm.asid_to_vm, asid); in xe_device_create()
470 spin_lock_init(&xe->pinned.lock); in xe_device_create()
471 INIT_LIST_HEAD(&xe->pinned.kernel_bo_present); in xe_device_create()
472 INIT_LIST_HEAD(&xe->pinned.external_vram); in xe_device_create()
473 INIT_LIST_HEAD(&xe->pinned.evicted); in xe_device_create()
475 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", in xe_device_create()
477 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); in xe_device_create()
478 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); in xe_device_create()
479 xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0); in xe_device_create()
480 if (!xe->ordered_wq || !xe->unordered_wq || in xe_device_create()
481 !xe->preempt_fence_wq || !xe->destroy_wq) { in xe_device_create()
486 drm_err(&xe->drm, "Failed to allocate xe workqueues\n"); in xe_device_create()
491 err = drmm_mutex_init(&xe->drm, &xe->pmt.lock); in xe_device_create()
495 err = xe_display_create(xe); in xe_device_create()
499 return xe; in xe_device_create()
506 static bool xe_driver_flr_disabled(struct xe_device *xe) in xe_driver_flr_disabled() argument
508 return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS; in xe_driver_flr_disabled()
524 static void __xe_driver_flr(struct xe_device *xe) in __xe_driver_flr() argument
527 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in __xe_driver_flr()
530 drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); in __xe_driver_flr()
541 ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); in __xe_driver_flr()
543 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); in __xe_driver_flr()
549 xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR); in __xe_driver_flr()
552 ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); in __xe_driver_flr()
554 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); in __xe_driver_flr()
562 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); in __xe_driver_flr()
570 static void xe_driver_flr(struct xe_device *xe) in xe_driver_flr() argument
572 if (xe_driver_flr_disabled(xe)) { in xe_driver_flr()
573 drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); in xe_driver_flr()
577 __xe_driver_flr(xe); in xe_driver_flr()
582 struct xe_device *xe = arg; in xe_driver_flr_fini() local
584 if (xe->needs_flr_on_fini) in xe_driver_flr_fini()
585 xe_driver_flr(xe); in xe_driver_flr_fini()
590 struct xe_device *xe = arg; in xe_device_sanitize() local
594 for_each_gt(gt, xe, id) in xe_device_sanitize()
598 static int xe_set_dma_info(struct xe_device *xe) in xe_set_dma_info() argument
600 unsigned int mask_size = xe->info.dma_mask_size; in xe_set_dma_info()
603 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); in xe_set_dma_info()
605 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); in xe_set_dma_info()
609 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); in xe_set_dma_info()
613 return 0; in xe_set_dma_info()
616 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err); in xe_set_dma_info()
620 static bool verify_lmem_ready(struct xe_device *xe) in verify_lmem_ready() argument
622 u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT; in verify_lmem_ready()
627 static int wait_for_lmem_ready(struct xe_device *xe) in wait_for_lmem_ready() argument
631 if (!IS_DGFX(xe)) in wait_for_lmem_ready()
632 return 0; in wait_for_lmem_ready()
634 if (IS_SRIOV_VF(xe)) in wait_for_lmem_ready()
635 return 0; in wait_for_lmem_ready()
637 if (verify_lmem_ready(xe)) in wait_for_lmem_ready()
638 return 0; in wait_for_lmem_ready()
640 drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); in wait_for_lmem_ready()
660 drm_dbg(&xe->drm, "lmem not initialized by firmware\n"); in wait_for_lmem_ready()
666 } while (!verify_lmem_ready(xe)); in wait_for_lmem_ready()
668 drm_dbg(&xe->drm, "lmem ready after %ums", in wait_for_lmem_ready()
671 return 0; in wait_for_lmem_ready()
675 static void sriov_update_device_info(struct xe_device *xe) in sriov_update_device_info() argument
678 if (IS_SRIOV_VF(xe)) { in sriov_update_device_info()
679 xe->info.probe_display = 0; in sriov_update_device_info()
680 xe->info.has_heci_gscfi = 0; in sriov_update_device_info()
681 xe->info.skip_guc_pc = 1; in sriov_update_device_info()
682 xe->info.skip_pcode = 1; in sriov_update_device_info()
688 * @xe: xe device instance
694 * Return: 0 on success, error code on failure
696 int xe_device_probe_early(struct xe_device *xe) in xe_device_probe_early() argument
700 err = xe_mmio_probe_early(xe); in xe_device_probe_early()
704 xe_sriov_probe_early(xe); in xe_device_probe_early()
706 sriov_update_device_info(xe); in xe_device_probe_early()
708 err = xe_pcode_probe_early(xe); in xe_device_probe_early()
717 err = xe_survivability_mode_enable(xe); in xe_device_probe_early()
724 err = wait_for_lmem_ready(xe); in xe_device_probe_early()
728 xe->wedged.mode = xe_modparam.wedged_mode; in xe_device_probe_early()
730 return 0; in xe_device_probe_early()
733 static int probe_has_flat_ccs(struct xe_device *xe) in probe_has_flat_ccs() argument
740 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe)) in probe_has_flat_ccs()
741 return 0; in probe_has_flat_ccs()
743 gt = xe_root_mmio_gt(xe); in probe_has_flat_ccs()
750 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); in probe_has_flat_ccs()
752 if (!xe->info.has_flat_ccs) in probe_has_flat_ccs()
753 drm_dbg(&xe->drm, in probe_has_flat_ccs()
758 return 0; in probe_has_flat_ccs()
761 int xe_device_probe(struct xe_device *xe) in xe_device_probe() argument
768 xe_pat_init_early(xe); in xe_device_probe()
770 err = xe_sriov_init(xe); in xe_device_probe()
774 xe->info.mem_region_mask = 1; in xe_device_probe()
776 err = xe_set_dma_info(xe); in xe_device_probe()
780 err = xe_mmio_probe_tiles(xe); in xe_device_probe()
784 err = xe_ttm_sys_mgr_init(xe); in xe_device_probe()
788 for_each_gt(gt, xe, id) { in xe_device_probe()
801 for_each_tile(tile, xe, id) { in xe_device_probe()
802 if (IS_SRIOV_VF(xe)) { in xe_device_probe()
819 for_each_gt(gt, xe, id) { in xe_device_probe()
825 err = xe_devcoredump_init(xe); in xe_device_probe()
832 err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe); in xe_device_probe()
836 err = probe_has_flat_ccs(xe); in xe_device_probe()
840 err = xe_vram_probe(xe); in xe_device_probe()
844 for_each_tile(tile, xe, id) { in xe_device_probe()
851 err = xe_ttm_stolen_mgr_init(xe); in xe_device_probe()
861 err = xe_display_init_early(xe); in xe_device_probe()
865 for_each_tile(tile, xe, id) { in xe_device_probe()
871 err = xe_irq_install(xe); in xe_device_probe()
875 for_each_gt(gt, xe, id) { in xe_device_probe()
881 err = xe_heci_gsc_init(xe); in xe_device_probe()
885 err = xe_oa_init(xe); in xe_device_probe()
889 err = xe_display_init(xe); in xe_device_probe()
893 err = xe_pxp_init(xe); in xe_device_probe()
897 err = drm_dev_register(&xe->drm, 0); in xe_device_probe()
901 xe_display_register(xe); in xe_device_probe()
903 err = xe_oa_register(xe); in xe_device_probe()
907 err = xe_pmu_register(&xe->pmu); in xe_device_probe()
911 xe_debugfs_register(xe); in xe_device_probe()
913 err = xe_hwmon_register(xe); in xe_device_probe()
917 for_each_gt(gt, xe, id) in xe_device_probe()
920 xe_vsec_init(xe); in xe_device_probe()
922 return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe); in xe_device_probe()
925 xe_display_unregister(xe); in xe_device_probe()
930 void xe_device_remove(struct xe_device *xe) in xe_device_remove() argument
932 xe_display_unregister(xe); in xe_device_remove()
934 drm_dev_unplug(&xe->drm); in xe_device_remove()
937 void xe_device_shutdown(struct xe_device *xe) in xe_device_shutdown() argument
942 drm_dbg(&xe->drm, "Shutting down device\n"); in xe_device_shutdown()
944 if (xe_driver_flr_disabled(xe)) { in xe_device_shutdown()
945 xe_display_pm_shutdown(xe); in xe_device_shutdown()
947 xe_irq_suspend(xe); in xe_device_shutdown()
949 for_each_gt(gt, xe, id) in xe_device_shutdown()
952 xe_display_pm_shutdown_late(xe); in xe_device_shutdown()
955 __xe_driver_flr(xe); in xe_device_shutdown()
961 * @xe: the &xe_device
968 void xe_device_wmb(struct xe_device *xe) in xe_device_wmb() argument
971 if (IS_DGFX(xe)) in xe_device_wmb()
972 xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0); in xe_device_wmb()
977 * @xe: The device
993 void xe_device_td_flush(struct xe_device *xe) in xe_device_td_flush() argument
999 if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) in xe_device_td_flush()
1002 if (XE_WA(xe_root_mmio_gt(xe), 16023588340)) { in xe_device_td_flush()
1003 xe_device_l2_flush(xe); in xe_device_td_flush()
1007 for_each_gt(gt, xe, id) { in xe_device_td_flush()
1023 if (xe_mmio_wait32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, in xe_device_td_flush()
1031 void xe_device_l2_flush(struct xe_device *xe) in xe_device_l2_flush() argument
1036 gt = xe_root_mmio_gt(xe); in xe_device_l2_flush()
1046 xe_mmio_write32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1); in xe_device_l2_flush()
1048 if (xe_mmio_wait32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) in xe_device_l2_flush()
1055 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) in xe_device_ccs_bytes() argument
1057 return xe_device_has_flat_ccs(xe) ? in xe_device_ccs_bytes()
1058 DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0; in xe_device_ccs_bytes()
1063 * @xe: xe device instance
1067 * that the device is going to remain awake. Xe PM runtime get and put
1072 void xe_device_assert_mem_access(struct xe_device *xe) in xe_device_assert_mem_access() argument
1074 xe_assert(xe, !xe_pm_runtime_suspended(xe)); in xe_device_assert_mem_access()
1077 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p) in xe_device_snapshot_print() argument
1082 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid); in xe_device_snapshot_print()
1083 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid); in xe_device_snapshot_print()
1085 for_each_gt(gt, xe, id) { in xe_device_snapshot_print()
1098 u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address) in xe_device_canonicalize_addr() argument
1100 return sign_extend64(address, xe->info.va_bits - 1); in xe_device_canonicalize_addr()
1103 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address) in xe_device_uncanonicalize_addr() argument
1105 return address & GENMASK_ULL(xe->info.va_bits - 1, 0); in xe_device_uncanonicalize_addr()
1110 struct xe_device *xe = arg; in xe_device_wedged_fini() local
1112 xe_pm_runtime_put(xe); in xe_device_wedged_fini()
1117 * @xe: xe device instance
1125 * If xe.wedged module parameter is set to 2, this function will be called
1130 void xe_device_declare_wedged(struct xe_device *xe) in xe_device_declare_wedged() argument
1135 if (xe->wedged.mode == 0) { in xe_device_declare_wedged()
1136 drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n"); in xe_device_declare_wedged()
1140 xe_pm_runtime_get_noresume(xe); in xe_device_declare_wedged()
1142 if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { in xe_device_declare_wedged()
1143 …drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n… in xe_device_declare_wedged()
1147 if (!atomic_xchg(&xe->wedged.flag, 1)) { in xe_device_declare_wedged()
1148 xe->needs_flr_on_fini = true; in xe_device_declare_wedged()
1149 drm_err(&xe->drm, in xe_device_declare_wedged()
1150 "CRITICAL: Xe has declared device %s as wedged.\n" in xe_device_declare_wedged()
1152 "Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n", in xe_device_declare_wedged()
1153 dev_name(xe->drm.dev)); in xe_device_declare_wedged()
1156 drm_dev_wedged_event(&xe->drm, in xe_device_declare_wedged()
1160 for_each_gt(gt, xe, id) in xe_device_declare_wedged()