Lines Matching +full:0 +full:xe

30  * DOC: Xe Power Management
32 * Xe PM implements the main routines for both system level suspend states and
52 * to perform the transition from D3hot to D3cold. Xe may disallow this
60 * (PC-states), and/or other low level power states. Xe PM component provides
64 * Also, Xe PM provides get and put functions that Xe driver will use to
87 * @xe: The xe device.
92 bool xe_rpm_reclaim_safe(const struct xe_device *xe) in xe_rpm_reclaim_safe() argument
94 return !xe->d3cold.capable; in xe_rpm_reclaim_safe()
97 static void xe_rpm_lockmap_acquire(const struct xe_device *xe) in xe_rpm_lockmap_acquire() argument
99 lock_map_acquire(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_acquire()
104 static void xe_rpm_lockmap_release(const struct xe_device *xe) in xe_rpm_lockmap_release() argument
106 lock_map_release(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_release()
113 * @xe: xe device instance
115 * Return: 0 on success
117 int xe_pm_suspend(struct xe_device *xe) in xe_pm_suspend() argument
123 drm_dbg(&xe->drm, "Suspending device\n"); in xe_pm_suspend()
124 trace_xe_pm_suspend(xe, __builtin_return_address(0)); in xe_pm_suspend()
126 err = xe_pxp_pm_suspend(xe->pxp); in xe_pm_suspend()
130 for_each_gt(gt, xe, id) in xe_pm_suspend()
133 xe_display_pm_suspend(xe); in xe_pm_suspend()
136 err = xe_bo_evict_all(xe); in xe_pm_suspend()
140 for_each_gt(gt, xe, id) { in xe_pm_suspend()
146 xe_irq_suspend(xe); in xe_pm_suspend()
148 xe_display_pm_suspend_late(xe); in xe_pm_suspend()
150 drm_dbg(&xe->drm, "Device suspended\n"); in xe_pm_suspend()
151 return 0; in xe_pm_suspend()
154 xe_display_pm_resume(xe); in xe_pm_suspend()
156 xe_pxp_pm_resume(xe->pxp); in xe_pm_suspend()
158 drm_dbg(&xe->drm, "Device suspend failed %d\n", err); in xe_pm_suspend()
164 * @xe: xe device instance
166 * Return: 0 on success
168 int xe_pm_resume(struct xe_device *xe) in xe_pm_resume() argument
175 drm_dbg(&xe->drm, "Resuming device\n"); in xe_pm_resume()
176 trace_xe_pm_resume(xe, __builtin_return_address(0)); in xe_pm_resume()
178 for_each_tile(tile, xe, id) in xe_pm_resume()
181 err = xe_pcode_ready(xe, true); in xe_pm_resume()
185 xe_display_pm_resume_early(xe); in xe_pm_resume()
191 err = xe_bo_restore_kernel(xe); in xe_pm_resume()
195 xe_irq_resume(xe); in xe_pm_resume()
197 for_each_gt(gt, xe, id) in xe_pm_resume()
200 xe_display_pm_resume(xe); in xe_pm_resume()
202 err = xe_bo_restore_user(xe); in xe_pm_resume()
206 xe_pxp_pm_resume(xe->pxp); in xe_pm_resume()
208 drm_dbg(&xe->drm, "Device resumed\n"); in xe_pm_resume()
209 return 0; in xe_pm_resume()
211 drm_dbg(&xe->drm, "Device resume failed %d\n", err); in xe_pm_resume()
215 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe) in xe_pm_pci_d3cold_capable() argument
217 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_pm_pci_d3cold_capable()
226 drm_dbg(&xe->drm, "d3cold: PME# not supported\n"); in xe_pm_pci_d3cold_capable()
232 drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n"); in xe_pm_pci_d3cold_capable()
239 static void xe_pm_runtime_init(struct xe_device *xe) in xe_pm_runtime_init() argument
241 struct device *dev = xe->drm.dev; in xe_pm_runtime_init()
251 if (IS_DGFX(xe)) in xe_pm_runtime_init()
262 int xe_pm_init_early(struct xe_device *xe) in xe_pm_init_early() argument
266 INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list); in xe_pm_init_early()
268 err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock); in xe_pm_init_early()
272 err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock); in xe_pm_init_early()
276 return 0; in xe_pm_init_early()
280 static u32 vram_threshold_value(struct xe_device *xe) in vram_threshold_value() argument
283 if (xe->info.platform == XE_BATTLEMAGE) in vram_threshold_value()
284 return 0; in vram_threshold_value()
290 * xe_pm_init - Initialize Xe Power Management
291 * @xe: xe device instance
295 * Returns 0 for success, negative error code otherwise.
297 int xe_pm_init(struct xe_device *xe) in xe_pm_init() argument
303 if (!xe_device_uc_enabled(xe)) in xe_pm_init()
304 return 0; in xe_pm_init()
306 xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe); in xe_pm_init()
308 if (xe->d3cold.capable) { in xe_pm_init()
309 err = xe_device_sysfs_init(xe); in xe_pm_init()
313 vram_threshold = vram_threshold_value(xe); in xe_pm_init()
314 err = xe_pm_set_vram_threshold(xe, vram_threshold); in xe_pm_init()
319 xe_pm_runtime_init(xe); in xe_pm_init()
321 return 0; in xe_pm_init()
326 * @xe: xe device instance
328 void xe_pm_runtime_fini(struct xe_device *xe) in xe_pm_runtime_fini() argument
330 struct device *dev = xe->drm.dev; in xe_pm_runtime_fini()
336 static void xe_pm_write_callback_task(struct xe_device *xe, in xe_pm_write_callback_task() argument
339 WRITE_ONCE(xe->pm_callback_task, task); in xe_pm_write_callback_task()
350 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe) in xe_pm_read_callback_task() argument
354 return READ_ONCE(xe->pm_callback_task); in xe_pm_read_callback_task()
359 * @xe: xe device instance
368 bool xe_pm_runtime_suspended(struct xe_device *xe) in xe_pm_runtime_suspended() argument
370 return pm_runtime_suspended(xe->drm.dev); in xe_pm_runtime_suspended()
375 * @xe: xe device instance
377 * Returns 0 for success, negative error code otherwise.
379 int xe_pm_runtime_suspend(struct xe_device *xe) in xe_pm_runtime_suspend() argument
384 int err = 0; in xe_pm_runtime_suspend()
386 trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0)); in xe_pm_runtime_suspend()
388 xe_pm_write_callback_task(xe, current); in xe_pm_runtime_suspend()
411 xe_rpm_lockmap_acquire(xe); in xe_pm_runtime_suspend()
413 err = xe_pxp_pm_suspend(xe->pxp); in xe_pm_runtime_suspend()
421 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_pm_runtime_suspend()
423 &xe->mem_access.vram_userfault.list, vram_userfault_link) in xe_pm_runtime_suspend()
425 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_pm_runtime_suspend()
427 xe_display_pm_runtime_suspend(xe); in xe_pm_runtime_suspend()
429 if (xe->d3cold.allowed) { in xe_pm_runtime_suspend()
430 err = xe_bo_evict_all(xe); in xe_pm_runtime_suspend()
435 for_each_gt(gt, xe, id) { in xe_pm_runtime_suspend()
441 xe_irq_suspend(xe); in xe_pm_runtime_suspend()
443 xe_display_pm_runtime_suspend_late(xe); in xe_pm_runtime_suspend()
445 xe_rpm_lockmap_release(xe); in xe_pm_runtime_suspend()
446 xe_pm_write_callback_task(xe, NULL); in xe_pm_runtime_suspend()
447 return 0; in xe_pm_runtime_suspend()
450 xe_display_pm_runtime_resume(xe); in xe_pm_runtime_suspend()
451 xe_pxp_pm_resume(xe->pxp); in xe_pm_runtime_suspend()
453 xe_rpm_lockmap_release(xe); in xe_pm_runtime_suspend()
454 xe_pm_write_callback_task(xe, NULL); in xe_pm_runtime_suspend()
460 * @xe: xe device instance
462 * Returns 0 for success, negative error code otherwise.
464 int xe_pm_runtime_resume(struct xe_device *xe) in xe_pm_runtime_resume() argument
468 int err = 0; in xe_pm_runtime_resume()
470 trace_xe_pm_runtime_resume(xe, __builtin_return_address(0)); in xe_pm_runtime_resume()
472 xe_pm_write_callback_task(xe, current); in xe_pm_runtime_resume()
474 xe_rpm_lockmap_acquire(xe); in xe_pm_runtime_resume()
476 if (xe->d3cold.allowed) { in xe_pm_runtime_resume()
477 err = xe_pcode_ready(xe, true); in xe_pm_runtime_resume()
481 xe_display_pm_resume_early(xe); in xe_pm_runtime_resume()
487 err = xe_bo_restore_kernel(xe); in xe_pm_runtime_resume()
492 xe_irq_resume(xe); in xe_pm_runtime_resume()
494 for_each_gt(gt, xe, id) in xe_pm_runtime_resume()
497 xe_display_pm_runtime_resume(xe); in xe_pm_runtime_resume()
499 if (xe->d3cold.allowed) { in xe_pm_runtime_resume()
500 err = xe_bo_restore_user(xe); in xe_pm_runtime_resume()
505 xe_pxp_pm_resume(xe->pxp); in xe_pm_runtime_resume()
508 xe_rpm_lockmap_release(xe); in xe_pm_runtime_resume()
509 xe_pm_write_callback_task(xe, NULL); in xe_pm_runtime_resume()
516 * sensitive to ever see the 0 -> 1 transition with the callers locks
527 static void xe_rpm_might_enter_cb(const struct xe_device *xe) in xe_rpm_might_enter_cb() argument
529 xe_rpm_lockmap_acquire(xe); in xe_rpm_might_enter_cb()
530 xe_rpm_lockmap_release(xe); in xe_rpm_might_enter_cb()
557 * @xe: xe device instance
559 void xe_pm_runtime_get(struct xe_device *xe) in xe_pm_runtime_get() argument
561 trace_xe_pm_runtime_get(xe, __builtin_return_address(0)); in xe_pm_runtime_get()
562 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get()
564 if (xe_pm_read_callback_task(xe) == current) in xe_pm_runtime_get()
567 xe_rpm_might_enter_cb(xe); in xe_pm_runtime_get()
568 pm_runtime_resume(xe->drm.dev); in xe_pm_runtime_get()
573 * @xe: xe device instance
575 void xe_pm_runtime_put(struct xe_device *xe) in xe_pm_runtime_put() argument
577 trace_xe_pm_runtime_put(xe, __builtin_return_address(0)); in xe_pm_runtime_put()
578 if (xe_pm_read_callback_task(xe) == current) { in xe_pm_runtime_put()
579 pm_runtime_put_noidle(xe->drm.dev); in xe_pm_runtime_put()
581 pm_runtime_mark_last_busy(xe->drm.dev); in xe_pm_runtime_put()
582 pm_runtime_put(xe->drm.dev); in xe_pm_runtime_put()
588 * @xe: xe device instance
590 * Returns: Any number greater than or equal to 0 for success, negative error
593 int xe_pm_runtime_get_ioctl(struct xe_device *xe) in xe_pm_runtime_get_ioctl() argument
595 trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0)); in xe_pm_runtime_get_ioctl()
596 if (WARN_ON(xe_pm_read_callback_task(xe) == current)) in xe_pm_runtime_get_ioctl()
599 xe_rpm_might_enter_cb(xe); in xe_pm_runtime_get_ioctl()
600 return pm_runtime_get_sync(xe->drm.dev); in xe_pm_runtime_get_ioctl()
605 * @xe: xe device instance
610 bool xe_pm_runtime_get_if_active(struct xe_device *xe) in xe_pm_runtime_get_if_active() argument
612 return pm_runtime_get_if_active(xe->drm.dev) > 0; in xe_pm_runtime_get_if_active()
617 * @xe: xe device instance
622 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe) in xe_pm_runtime_get_if_in_use() argument
624 if (xe_pm_read_callback_task(xe) == current) { in xe_pm_runtime_get_if_in_use()
626 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get_if_in_use()
630 return pm_runtime_get_if_in_use(xe->drm.dev) > 0; in xe_pm_runtime_get_if_in_use()
637 static bool xe_pm_suspending_or_resuming(struct xe_device *xe) in xe_pm_suspending_or_resuming() argument
640 struct device *dev = xe->drm.dev; in xe_pm_suspending_or_resuming()
652 * @xe: xe device instance
660 void xe_pm_runtime_get_noresume(struct xe_device *xe) in xe_pm_runtime_get_noresume() argument
664 ref = xe_pm_runtime_get_if_in_use(xe); in xe_pm_runtime_get_noresume()
667 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get_noresume()
668 drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe), in xe_pm_runtime_get_noresume()
675 * @xe: xe device instance
679 bool xe_pm_runtime_resume_and_get(struct xe_device *xe) in xe_pm_runtime_resume_and_get() argument
681 if (xe_pm_read_callback_task(xe) == current) { in xe_pm_runtime_resume_and_get()
683 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_resume_and_get()
687 xe_rpm_might_enter_cb(xe); in xe_pm_runtime_resume_and_get()
688 return pm_runtime_resume_and_get(xe->drm.dev) >= 0; in xe_pm_runtime_resume_and_get()
693 * @xe: xe device instance
695 void xe_pm_assert_unbounded_bridge(struct xe_device *xe) in xe_pm_assert_unbounded_bridge() argument
697 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_pm_assert_unbounded_bridge()
704 drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n"); in xe_pm_assert_unbounded_bridge()
711 * @xe: xe device instance
714 * Returns 0 for success, negative error code otherwise.
716 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) in xe_pm_set_vram_threshold() argument
719 u32 vram_total_mb = 0; in xe_pm_set_vram_threshold()
723 man = ttm_manager_type(&xe->ttm, i); in xe_pm_set_vram_threshold()
728 drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb); in xe_pm_set_vram_threshold()
733 mutex_lock(&xe->d3cold.lock); in xe_pm_set_vram_threshold()
734 xe->d3cold.vram_threshold = threshold; in xe_pm_set_vram_threshold()
735 mutex_unlock(&xe->d3cold.lock); in xe_pm_set_vram_threshold()
737 return 0; in xe_pm_set_vram_threshold()
742 * @xe: xe device instance
747 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe) in xe_pm_d3cold_allowed_toggle() argument
750 u32 total_vram_used_mb = 0; in xe_pm_d3cold_allowed_toggle()
754 if (!xe->d3cold.capable) { in xe_pm_d3cold_allowed_toggle()
755 xe->d3cold.allowed = false; in xe_pm_d3cold_allowed_toggle()
760 man = ttm_manager_type(&xe->ttm, i); in xe_pm_d3cold_allowed_toggle()
767 mutex_lock(&xe->d3cold.lock); in xe_pm_d3cold_allowed_toggle()
769 if (total_vram_used_mb < xe->d3cold.vram_threshold) in xe_pm_d3cold_allowed_toggle()
770 xe->d3cold.allowed = true; in xe_pm_d3cold_allowed_toggle()
772 xe->d3cold.allowed = false; in xe_pm_d3cold_allowed_toggle()
774 mutex_unlock(&xe->d3cold.lock); in xe_pm_d3cold_allowed_toggle()
780 * Return: 0 on success. Currently doesn't fail.
785 return 0; in xe_pm_module_init()