Lines Matching full:power

3  * drivers/base/power/runtime.c - Helper functions for device runtime PM
18 #include "power.h"
56 * update_pm_runtime_accounting - Update the time accounting of power states
59 * In order to be able to have time accounting of the various power states
70 if (dev->power.disable_depth > 0) in update_pm_runtime_accounting()
73 last = dev->power.accounting_timestamp; in update_pm_runtime_accounting()
76 dev->power.accounting_timestamp = now; in update_pm_runtime_accounting()
88 if (dev->power.runtime_status == RPM_SUSPENDED) in update_pm_runtime_accounting()
89 dev->power.suspended_time += delta; in update_pm_runtime_accounting()
91 dev->power.active_time += delta; in update_pm_runtime_accounting()
97 dev->power.runtime_status = status; in __update_runtime_status()
105 spin_lock_irqsave(&dev->power.lock, flags); in rpm_get_accounted_time()
108 time = suspended ? dev->power.suspended_time : dev->power.active_time; in rpm_get_accounted_time()
110 spin_unlock_irqrestore(&dev->power.lock, flags); in rpm_get_accounted_time()
132 if (dev->power.timer_expires > 0) { in pm_runtime_deactivate_timer()
133 hrtimer_try_to_cancel(&dev->power.suspend_timer); in pm_runtime_deactivate_timer()
134 dev->power.timer_expires = 0; in pm_runtime_deactivate_timer()
149 dev->power.request = RPM_REQ_NONE; in pm_runtime_cancel_pending()
157 * power.last_busy time. If the delay has already expired or is disabled
158 * (negative) or the power.use_autosuspend flag isn't set, return 0.
161 * This function may be called either with or without dev->power.lock held.
162 * Either way it can be racy, since power.last_busy may be updated at any time.
169 if (!dev->power.use_autosuspend) in pm_runtime_autosuspend_expiration()
172 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); in pm_runtime_autosuspend_expiration()
176 expires = READ_ONCE(dev->power.last_busy); in pm_runtime_autosuspend_expiration()
187 return dev->power.memalloc_noio; in dev_memalloc_noio()
226 /* hold power lock since bitfield is not SMP-safe. */ in pm_runtime_set_memalloc_noio()
227 spin_lock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
228 enabled = dev->power.memalloc_noio; in pm_runtime_set_memalloc_noio()
229 dev->power.memalloc_noio = enable; in pm_runtime_set_memalloc_noio()
230 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
262 if (dev->power.runtime_error) in rpm_check_suspend_allowed()
264 else if (dev->power.disable_depth > 0) in rpm_check_suspend_allowed()
266 else if (atomic_read(&dev->power.usage_count)) in rpm_check_suspend_allowed()
268 else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count)) in rpm_check_suspend_allowed()
272 else if ((dev->power.deferred_resume && in rpm_check_suspend_allowed()
273 dev->power.runtime_status == RPM_SUSPENDING) || in rpm_check_suspend_allowed()
274 (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME)) in rpm_check_suspend_allowed()
278 else if (dev->power.runtime_status == RPM_SUSPENDED) in rpm_check_suspend_allowed()
317 * The additional power.usage_count check is a safety net in case in pm_runtime_release_supplier()
323 atomic_read(&supplier->power.usage_count) > 0) in pm_runtime_release_supplier()
362 __releases(&dev->power.lock) __acquires(&dev->power.lock) in __rpm_callback()
365 bool use_links = dev->power.links_count > 0; in __rpm_callback()
367 if (dev->power.irq_safe) { in __rpm_callback()
368 spin_unlock(&dev->power.lock); in __rpm_callback()
370 spin_unlock_irq(&dev->power.lock); in __rpm_callback()
379 if (use_links && dev->power.runtime_status == RPM_RESUMING) { in __rpm_callback()
395 if (dev->power.irq_safe) { in __rpm_callback()
396 spin_lock(&dev->power.lock); in __rpm_callback()
406 ((dev->power.runtime_status == RPM_SUSPENDING && !retval) || in __rpm_callback()
407 (dev->power.runtime_status == RPM_RESUMING && retval))) { in __rpm_callback()
416 spin_lock_irq(&dev->power.lock); in __rpm_callback()
431 if (dev->power.memalloc_noio) { in rpm_callback()
450 dev->power.runtime_error = retval; in rpm_callback()
465 * This function must be called under dev->power.lock with interrupts disabled.
478 else if (dev->power.runtime_status != RPM_ACTIVE) in rpm_idle()
485 else if (dev->power.request_pending && in rpm_idle()
486 dev->power.request > RPM_REQ_IDLE) in rpm_idle()
490 else if (dev->power.idle_notification) in rpm_idle()
497 dev->power.request = RPM_REQ_NONE; in rpm_idle()
502 if (!callback || dev->power.no_callbacks) in rpm_idle()
507 dev->power.request = RPM_REQ_IDLE; in rpm_idle()
508 if (!dev->power.request_pending) { in rpm_idle()
509 dev->power.request_pending = true; in rpm_idle()
510 queue_work(pm_wq, &dev->power.work); in rpm_idle()
516 dev->power.idle_notification = true; in rpm_idle()
518 if (dev->power.irq_safe) in rpm_idle()
519 spin_unlock(&dev->power.lock); in rpm_idle()
521 spin_unlock_irq(&dev->power.lock); in rpm_idle()
525 if (dev->power.irq_safe) in rpm_idle()
526 spin_lock(&dev->power.lock); in rpm_idle()
528 spin_lock_irq(&dev->power.lock); in rpm_idle()
530 dev->power.idle_notification = false; in rpm_idle()
531 wake_up_all(&dev->power.wait_queue); in rpm_idle()
552 * ignore_children of parent->power and irq_safe of dev->power are not set).
557 * This function must be called under dev->power.lock with interrupts disabled.
560 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_suspend()
574 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) in rpm_suspend()
581 if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) { in rpm_suspend()
586 dev->power.request = RPM_REQ_NONE; in rpm_suspend()
595 if (!(dev->power.timer_expires && in rpm_suspend()
596 dev->power.timer_expires <= expires)) { in rpm_suspend()
601 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * in rpm_suspend()
604 dev->power.timer_expires = expires; in rpm_suspend()
605 hrtimer_start_range_ns(&dev->power.suspend_timer, in rpm_suspend()
610 dev->power.timer_autosuspends = 1; in rpm_suspend()
618 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_suspend()
626 if (dev->power.irq_safe) { in rpm_suspend()
627 spin_unlock(&dev->power.lock); in rpm_suspend()
631 spin_lock(&dev->power.lock); in rpm_suspend()
637 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_suspend()
639 if (dev->power.runtime_status != RPM_SUSPENDING) in rpm_suspend()
642 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
646 spin_lock_irq(&dev->power.lock); in rpm_suspend()
648 finish_wait(&dev->power.wait_queue, &wait); in rpm_suspend()
652 if (dev->power.no_callbacks) in rpm_suspend()
657 dev->power.request = (rpmflags & RPM_AUTO) ? in rpm_suspend()
659 if (!dev->power.request_pending) { in rpm_suspend()
660 dev->power.request_pending = true; in rpm_suspend()
661 queue_work(pm_wq, &dev->power.work); in rpm_suspend()
683 atomic_add_unless(&parent->power.child_count, -1, 0); in rpm_suspend()
685 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
687 if (dev->power.deferred_resume) { in rpm_suspend()
688 dev->power.deferred_resume = false; in rpm_suspend()
694 if (dev->power.irq_safe) in rpm_suspend()
698 if (parent && !parent->power.ignore_children) { in rpm_suspend()
699 spin_unlock(&dev->power.lock); in rpm_suspend()
701 spin_lock(&parent->power.lock); in rpm_suspend()
703 spin_unlock(&parent->power.lock); in rpm_suspend()
705 spin_lock(&dev->power.lock); in rpm_suspend()
708 if (dev->power.links_count > 0) { in rpm_suspend()
709 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
713 spin_lock_irq(&dev->power.lock); in rpm_suspend()
724 dev->power.deferred_resume = false; in rpm_suspend()
725 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
728 dev->power.runtime_error = 0; in rpm_suspend()
760 * This function must be called under dev->power.lock with interrupts disabled.
763 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_resume()
772 if (dev->power.runtime_error) { in rpm_resume()
774 } else if (dev->power.disable_depth > 0) { in rpm_resume()
775 if (dev->power.runtime_status == RPM_ACTIVE && in rpm_resume()
776 dev->power.last_status == RPM_ACTIVE) in rpm_resume()
790 dev->power.request = RPM_REQ_NONE; in rpm_resume()
791 if (!dev->power.timer_autosuspends) in rpm_resume()
794 if (dev->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
799 if (dev->power.runtime_status == RPM_RESUMING || in rpm_resume()
800 dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
804 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
805 dev->power.deferred_resume = true; in rpm_resume()
814 if (dev->power.irq_safe) { in rpm_resume()
815 spin_unlock(&dev->power.lock); in rpm_resume()
819 spin_lock(&dev->power.lock); in rpm_resume()
825 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_resume()
827 if (dev->power.runtime_status != RPM_RESUMING && in rpm_resume()
828 dev->power.runtime_status != RPM_SUSPENDING) in rpm_resume()
831 spin_unlock_irq(&dev->power.lock); in rpm_resume()
835 spin_lock_irq(&dev->power.lock); in rpm_resume()
837 finish_wait(&dev->power.wait_queue, &wait); in rpm_resume()
843 * power.no_callbacks is set, because otherwise we don't know whether in rpm_resume()
846 if (dev->power.no_callbacks && !parent && dev->parent) { in rpm_resume()
847 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); in rpm_resume()
848 if (dev->parent->power.disable_depth > 0 || in rpm_resume()
849 dev->parent->power.ignore_children || in rpm_resume()
850 dev->parent->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
851 atomic_inc(&dev->parent->power.child_count); in rpm_resume()
852 spin_unlock(&dev->parent->power.lock); in rpm_resume()
856 spin_unlock(&dev->parent->power.lock); in rpm_resume()
861 dev->power.request = RPM_REQ_RESUME; in rpm_resume()
862 if (!dev->power.request_pending) { in rpm_resume()
863 dev->power.request_pending = true; in rpm_resume()
864 queue_work(pm_wq, &dev->power.work); in rpm_resume()
877 if (dev->power.irq_safe) in rpm_resume()
880 spin_unlock(&dev->power.lock); in rpm_resume()
884 spin_lock(&parent->power.lock); in rpm_resume()
889 if (!parent->power.disable_depth && in rpm_resume()
890 !parent->power.ignore_children) { in rpm_resume()
892 if (parent->power.runtime_status != RPM_ACTIVE) in rpm_resume()
895 spin_unlock(&parent->power.lock); in rpm_resume()
897 spin_lock(&dev->power.lock); in rpm_resume()
905 if (dev->power.no_callbacks) in rpm_resume()
923 atomic_inc(&parent->power.child_count); in rpm_resume()
925 wake_up_all(&dev->power.wait_queue); in rpm_resume()
931 if (parent && !dev->power.irq_safe) { in rpm_resume()
932 spin_unlock_irq(&dev->power.lock); in rpm_resume()
936 spin_lock_irq(&dev->power.lock); in rpm_resume()
953 struct device *dev = container_of(work, struct device, power.work); in pm_runtime_work()
956 spin_lock_irq(&dev->power.lock); in pm_runtime_work()
958 if (!dev->power.request_pending) in pm_runtime_work()
961 req = dev->power.request; in pm_runtime_work()
962 dev->power.request = RPM_REQ_NONE; in pm_runtime_work()
963 dev->power.request_pending = false; in pm_runtime_work()
983 spin_unlock_irq(&dev->power.lock); in pm_runtime_work()
994 struct device *dev = container_of(timer, struct device, power.suspend_timer); in pm_suspend_timer_fn()
998 spin_lock_irqsave(&dev->power.lock, flags); in pm_suspend_timer_fn()
1000 expires = dev->power.timer_expires; in pm_suspend_timer_fn()
1006 dev->power.timer_expires = 0; in pm_suspend_timer_fn()
1007 rpm_suspend(dev, dev->power.timer_autosuspends ? in pm_suspend_timer_fn()
1011 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_suspend_timer_fn()
1027 spin_lock_irqsave(&dev->power.lock, flags); in pm_schedule_suspend()
1042 dev->power.timer_expires = expires; in pm_schedule_suspend()
1043 dev->power.timer_autosuspends = 0; in pm_schedule_suspend()
1044 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); in pm_schedule_suspend()
1047 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_schedule_suspend()
1057 ret = atomic_sub_return(1, &dev->power.usage_count); in rpm_drop_usage_count()
1067 atomic_inc(&dev->power.usage_count); in rpm_drop_usage_count()
1100 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_idle()
1102 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_idle()
1104 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_idle()
1138 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_suspend()
1140 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_suspend()
1142 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_suspend()
1164 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && in __pm_runtime_resume()
1165 dev->power.runtime_status != RPM_ACTIVE); in __pm_runtime_resume()
1168 atomic_inc(&dev->power.usage_count); in __pm_runtime_resume()
1170 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_resume()
1172 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_resume()
1205 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_get_if_active()
1206 if (dev->power.disable_depth > 0) { in pm_runtime_get_if_active()
1208 } else if (dev->power.runtime_status != RPM_ACTIVE) { in pm_runtime_get_if_active()
1212 atomic_inc(&dev->power.usage_count); in pm_runtime_get_if_active()
1214 retval = atomic_inc_not_zero(&dev->power.usage_count); in pm_runtime_get_if_active()
1217 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_get_if_active()
1228 * If runtime PM of the device is disabled or its power.runtime_error field is
1232 * parent's power.ignore_children flag is unset, the device's status cannot be
1235 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1257 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_set_status()
1263 if (dev->power.runtime_error || dev->power.disable_depth) in __pm_runtime_set_status()
1264 dev->power.disable_depth++; in __pm_runtime_set_status()
1268 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_set_status()
1289 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_set_status()
1291 if (dev->power.runtime_status == status || !parent) in __pm_runtime_set_status()
1295 atomic_add_unless(&parent->power.child_count, -1, 0); in __pm_runtime_set_status()
1296 notify_parent = !parent->power.ignore_children; in __pm_runtime_set_status()
1298 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); in __pm_runtime_set_status()
1303 * 'power.ignore_children' flag unset. in __pm_runtime_set_status()
1305 if (!parent->power.disable_depth && in __pm_runtime_set_status()
1306 !parent->power.ignore_children && in __pm_runtime_set_status()
1307 parent->power.runtime_status != RPM_ACTIVE) { in __pm_runtime_set_status()
1312 } else if (dev->power.runtime_status == RPM_SUSPENDED) { in __pm_runtime_set_status()
1313 atomic_inc(&parent->power.child_count); in __pm_runtime_set_status()
1316 spin_unlock(&parent->power.lock); in __pm_runtime_set_status()
1327 dev->power.runtime_error = 0; in __pm_runtime_set_status()
1330 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_set_status()
1356 * Should be called under dev->power.lock with interrupts disabled.
1362 if (dev->power.request_pending) { in __pm_runtime_barrier()
1363 dev->power.request = RPM_REQ_NONE; in __pm_runtime_barrier()
1364 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1366 cancel_work_sync(&dev->power.work); in __pm_runtime_barrier()
1368 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1369 dev->power.request_pending = false; in __pm_runtime_barrier()
1372 if (dev->power.runtime_status == RPM_SUSPENDING || in __pm_runtime_barrier()
1373 dev->power.runtime_status == RPM_RESUMING || in __pm_runtime_barrier()
1374 dev->power.idle_notification) { in __pm_runtime_barrier()
1379 prepare_to_wait(&dev->power.wait_queue, &wait, in __pm_runtime_barrier()
1381 if (dev->power.runtime_status != RPM_SUSPENDING in __pm_runtime_barrier()
1382 && dev->power.runtime_status != RPM_RESUMING in __pm_runtime_barrier()
1383 && !dev->power.idle_notification) in __pm_runtime_barrier()
1385 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1389 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1391 finish_wait(&dev->power.wait_queue, &wait); in __pm_runtime_barrier()
1414 spin_lock_irq(&dev->power.lock); in pm_runtime_barrier()
1416 if (dev->power.request_pending in pm_runtime_barrier()
1417 && dev->power.request == RPM_REQ_RESUME) { in pm_runtime_barrier()
1424 spin_unlock_irq(&dev->power.lock); in pm_runtime_barrier()
1436 * Increment power.disable_depth for the device and if it was zero previously,
1442 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1447 spin_lock_irq(&dev->power.lock); in __pm_runtime_disable()
1449 if (dev->power.disable_depth > 0) { in __pm_runtime_disable()
1450 dev->power.disable_depth++; in __pm_runtime_disable()
1459 if (check_resume && dev->power.request_pending && in __pm_runtime_disable()
1460 dev->power.request == RPM_REQ_RESUME) { in __pm_runtime_disable()
1475 if (!dev->power.disable_depth++) { in __pm_runtime_disable()
1477 dev->power.last_status = dev->power.runtime_status; in __pm_runtime_disable()
1481 spin_unlock_irq(&dev->power.lock); in __pm_runtime_disable()
1493 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_enable()
1495 if (!dev->power.disable_depth) { in pm_runtime_enable()
1500 if (--dev->power.disable_depth > 0) in pm_runtime_enable()
1503 dev->power.last_status = RPM_INVALID; in pm_runtime_enable()
1504 dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); in pm_runtime_enable()
1506 if (dev->power.runtime_status == RPM_SUSPENDED && in pm_runtime_enable()
1507 !dev->power.ignore_children && in pm_runtime_enable()
1508 atomic_read(&dev->power.child_count) > 0) in pm_runtime_enable()
1512 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_enable()
1542 * Increase the device's usage count and clear its power.runtime_auto flag,
1548 spin_lock_irq(&dev->power.lock); in pm_runtime_forbid()
1549 if (!dev->power.runtime_auto) in pm_runtime_forbid()
1552 dev->power.runtime_auto = false; in pm_runtime_forbid()
1553 atomic_inc(&dev->power.usage_count); in pm_runtime_forbid()
1557 spin_unlock_irq(&dev->power.lock); in pm_runtime_forbid()
1565 * Decrease the device's usage count and set its power.runtime_auto flag.
1571 spin_lock_irq(&dev->power.lock); in pm_runtime_allow()
1572 if (dev->power.runtime_auto) in pm_runtime_allow()
1575 dev->power.runtime_auto = true; in pm_runtime_allow()
1583 spin_unlock_irq(&dev->power.lock); in pm_runtime_allow()
1591 * Set the power.no_callbacks flag, which tells the PM core that this
1592 * device is power-managed through its parent and has no runtime PM
1597 spin_lock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1598 dev->power.no_callbacks = 1; in pm_runtime_no_callbacks()
1599 spin_unlock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1609 * Set the power.irq_safe flag, which tells the PM core that the
1621 spin_lock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1622 dev->power.irq_safe = 1; in pm_runtime_irq_safe()
1623 spin_unlock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1636 * This function must be called under dev->power.lock with interrupts disabled.
1640 int delay = dev->power.autosuspend_delay; in update_autosuspend()
1643 if (dev->power.use_autosuspend && delay < 0) { in update_autosuspend()
1647 atomic_inc(&dev->power.usage_count); in update_autosuspend()
1659 atomic_dec(&dev->power.usage_count); in update_autosuspend()
1671 * Set the device's power.autosuspend_delay value. If it changes to negative
1672 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1679 spin_lock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1680 old_delay = dev->power.autosuspend_delay; in pm_runtime_set_autosuspend_delay()
1681 old_use = dev->power.use_autosuspend; in pm_runtime_set_autosuspend_delay()
1682 dev->power.autosuspend_delay = delay; in pm_runtime_set_autosuspend_delay()
1684 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1693 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1700 spin_lock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1701 old_delay = dev->power.autosuspend_delay; in __pm_runtime_use_autosuspend()
1702 old_use = dev->power.use_autosuspend; in __pm_runtime_use_autosuspend()
1703 dev->power.use_autosuspend = use; in __pm_runtime_use_autosuspend()
1705 spin_unlock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1715 dev->power.runtime_status = RPM_SUSPENDED; in pm_runtime_init()
1716 dev->power.last_status = RPM_INVALID; in pm_runtime_init()
1717 dev->power.idle_notification = false; in pm_runtime_init()
1719 dev->power.disable_depth = 1; in pm_runtime_init()
1720 atomic_set(&dev->power.usage_count, 0); in pm_runtime_init()
1722 dev->power.runtime_error = 0; in pm_runtime_init()
1724 atomic_set(&dev->power.child_count, 0); in pm_runtime_init()
1726 dev->power.runtime_auto = true; in pm_runtime_init()
1728 dev->power.request_pending = false; in pm_runtime_init()
1729 dev->power.request = RPM_REQ_NONE; in pm_runtime_init()
1730 dev->power.deferred_resume = false; in pm_runtime_init()
1731 dev->power.needs_force_resume = 0; in pm_runtime_init()
1732 INIT_WORK(&dev->power.work, pm_runtime_work); in pm_runtime_init()
1734 dev->power.timer_expires = 0; in pm_runtime_init()
1735 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); in pm_runtime_init()
1736 dev->power.suspend_timer.function = pm_suspend_timer_fn; in pm_runtime_init()
1738 init_waitqueue_head(&dev->power.wait_queue); in pm_runtime_init()
1748 if (dev->power.runtime_status == RPM_ACTIVE) in pm_runtime_reinit()
1750 if (dev->power.irq_safe) { in pm_runtime_reinit()
1751 spin_lock_irq(&dev->power.lock); in pm_runtime_reinit()
1752 dev->power.irq_safe = 0; in pm_runtime_reinit()
1753 spin_unlock_irq(&dev->power.lock); in pm_runtime_reinit()
1814 spin_lock_irq(&dev->power.lock); in pm_runtime_new_link()
1815 dev->power.links_count++; in pm_runtime_new_link()
1816 spin_unlock_irq(&dev->power.lock); in pm_runtime_new_link()
1821 spin_lock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1822 WARN_ON(dev->power.links_count == 0); in pm_runtime_drop_link_count()
1823 dev->power.links_count--; in pm_runtime_drop_link_count()
1824 spin_unlock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1847 return atomic_read(&dev->power.usage_count) <= 1 && in pm_runtime_need_not_resume()
1848 (atomic_read(&dev->power.child_count) == 0 || in pm_runtime_need_not_resume()
1849 dev->power.ignore_children); in pm_runtime_need_not_resume()
1865 * sure the device is put into low power state and it should only be used during
1901 dev->power.needs_force_resume = 1; in pm_runtime_force_suspend()
1918 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1919 * those actions and bring the device into full power, if it is expected to be
1930 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume) in pm_runtime_force_resume()
1951 dev->power.needs_force_resume = 0; in pm_runtime_force_resume()