Lines Matching +full:autosuspend +full:- +full:delay

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
29 MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
35 struct ivpu_fw_info *fw = vdev->fw; in ivpu_pm_prepare_cold_boot()
40 fw->entry_point = fw->cold_boot_entry_point; in ivpu_pm_prepare_cold_boot()
45 struct ivpu_fw_info *fw = vdev->fw; in ivpu_pm_prepare_warm_boot()
46 struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem); in ivpu_pm_prepare_warm_boot()
48 if (!bp->save_restore_ret_address) { in ivpu_pm_prepare_warm_boot()
53 ivpu_dbg(vdev, FW_BOOT, "Save/restore entry point %llx", bp->save_restore_ret_address); in ivpu_pm_prepare_warm_boot()
54 fw->entry_point = bp->save_restore_ret_address; in ivpu_pm_prepare_warm_boot()
62 pci_save_state(to_pci_dev(vdev->drm.dev)); in ivpu_suspend()
68 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); in ivpu_suspend()
77 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0); in ivpu_resume()
78 pci_restore_state(to_pci_dev(vdev->drm.dev)); in ivpu_resume()
117 struct ivpu_device *vdev = pm->vdev; in ivpu_pm_recovery_work()
121 ivpu_err(vdev, "Recovering the VPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter)); in ivpu_pm_recovery_work()
123 ret = pm_runtime_resume_and_get(vdev->drm.dev); in ivpu_pm_recovery_work()
129 atomic_inc(&vdev->pm->reset_counter); in ivpu_pm_recovery_work()
130 atomic_set(&vdev->pm->reset_pending, 1); in ivpu_pm_recovery_work()
131 down_write(&vdev->pm->reset_lock); in ivpu_pm_recovery_work()
141 up_write(&vdev->pm->reset_lock); in ivpu_pm_recovery_work()
142 atomic_set(&vdev->pm->reset_pending, 0); in ivpu_pm_recovery_work()
144 kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt); in ivpu_pm_recovery_work()
145 pm_runtime_mark_last_busy(vdev->drm.dev); in ivpu_pm_recovery_work()
146 pm_runtime_put_autosuspend(vdev->drm.dev); in ivpu_pm_recovery_work()
164 if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) { in ivpu_pm_trigger_recovery()
167 queue_work(system_long_wq, &vdev->pm->recovery_work); in ivpu_pm_trigger_recovery()
174 struct ivpu_device *vdev = pm->vdev; in ivpu_job_timeout_work()
181 unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr; in ivpu_start_job_timeout_detection()
183 /* No-op if already queued */ in ivpu_start_job_timeout_detection()
184 queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms)); in ivpu_start_job_timeout_detection()
189 cancel_delayed_work_sync(&vdev->pm->job_timeout_work); in ivpu_stop_job_timeout_detection()
200 timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr); in ivpu_pm_suspend_cb()
205 return -EBUSY; in ivpu_pm_suspend_cb()
243 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); in ivpu_pm_runtime_suspend_cb()
244 drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work)); in ivpu_pm_runtime_suspend_cb()
248 if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) { in ivpu_pm_runtime_suspend_cb()
250 vdev->pm->suspend_reschedule_counter); in ivpu_pm_runtime_suspend_cb()
251 pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend); in ivpu_pm_runtime_suspend_cb()
252 vdev->pm->suspend_reschedule_counter--; in ivpu_pm_runtime_suspend_cb()
253 return -EAGAIN; in ivpu_pm_runtime_suspend_cb()
256 if (!vdev->pm->suspend_reschedule_counter) in ivpu_pm_runtime_suspend_cb()
273 vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; in ivpu_pm_runtime_suspend_cb()
301 ret = pm_runtime_resume_and_get(vdev->drm.dev); in ivpu_rpm_get()
302 if (!drm_WARN_ON(&vdev->drm, ret < 0)) in ivpu_rpm_get()
303 vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; in ivpu_rpm_get()
312 ret = pm_runtime_get_if_active(vdev->drm.dev, false); in ivpu_rpm_get_if_active()
313 drm_WARN_ON(&vdev->drm, ret < 0); in ivpu_rpm_get_if_active()
320 pm_runtime_mark_last_busy(vdev->drm.dev); in ivpu_rpm_put()
321 pm_runtime_put_autosuspend(vdev->drm.dev); in ivpu_rpm_put()
328 ivpu_dbg(vdev, PM, "Pre-reset..\n"); in ivpu_pm_reset_prepare_cb()
329 atomic_inc(&vdev->pm->reset_counter); in ivpu_pm_reset_prepare_cb()
330 atomic_set(&vdev->pm->reset_pending, 1); in ivpu_pm_reset_prepare_cb()
332 pm_runtime_get_sync(vdev->drm.dev); in ivpu_pm_reset_prepare_cb()
333 down_write(&vdev->pm->reset_lock); in ivpu_pm_reset_prepare_cb()
338 ivpu_dbg(vdev, PM, "Pre-reset done.\n"); in ivpu_pm_reset_prepare_cb()
346 ivpu_dbg(vdev, PM, "Post-reset..\n"); in ivpu_pm_reset_done_cb()
350 up_write(&vdev->pm->reset_lock); in ivpu_pm_reset_done_cb()
351 atomic_set(&vdev->pm->reset_pending, 0); in ivpu_pm_reset_done_cb()
352 ivpu_dbg(vdev, PM, "Post-reset done.\n"); in ivpu_pm_reset_done_cb()
354 pm_runtime_mark_last_busy(vdev->drm.dev); in ivpu_pm_reset_done_cb()
355 pm_runtime_put_autosuspend(vdev->drm.dev); in ivpu_pm_reset_done_cb()
360 struct device *dev = vdev->drm.dev; in ivpu_pm_init()
361 struct ivpu_pm_info *pm = vdev->pm; in ivpu_pm_init()
362 int delay; in ivpu_pm_init() local
364 pm->vdev = vdev; in ivpu_pm_init()
365 pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; in ivpu_pm_init()
367 init_rwsem(&pm->reset_lock); in ivpu_pm_init()
368 atomic_set(&pm->reset_pending, 0); in ivpu_pm_init()
369 atomic_set(&pm->reset_counter, 0); in ivpu_pm_init()
371 INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work); in ivpu_pm_init()
372 INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work); in ivpu_pm_init()
375 delay = -1; in ivpu_pm_init()
377 delay = vdev->timeout.autosuspend; in ivpu_pm_init()
380 pm_runtime_set_autosuspend_delay(dev, delay); in ivpu_pm_init()
382 ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay); in ivpu_pm_init()
387 drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work)); in ivpu_pm_cancel_recovery()
388 cancel_work_sync(&vdev->pm->recovery_work); in ivpu_pm_cancel_recovery()
393 struct device *dev = vdev->drm.dev; in ivpu_pm_enable()
403 pm_runtime_get_noresume(vdev->drm.dev); in ivpu_pm_disable()
404 pm_runtime_forbid(vdev->drm.dev); in ivpu_pm_disable()