Lines Matching refs:dev

27 static pm_callback_t __rpm_get_driver_callback(struct device *dev,
30 if (dev->driver && dev->driver->pm)
31 return get_callback_ptr(dev->driver->pm, cb_offset);
36 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
41 if (dev->pm_domain)
42 ops = &dev->pm_domain->ops;
43 else if (dev->type && dev->type->pm)
44 ops = dev->type->pm;
45 else if (dev->class && dev->class->pm)
46 ops = dev->class->pm;
47 else if (dev->bus && dev->bus->pm)
48 ops = dev->bus->pm;
56 cb = __rpm_get_driver_callback(dev, cb_offset);
61 #define RPM_GET_CALLBACK(dev, callback) \
62 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
64 static int rpm_resume(struct device *dev, int rpmflags);
65 static int rpm_suspend(struct device *dev, int rpmflags);
69 * @dev: Device to update the accounting for
78 static void update_pm_runtime_accounting(struct device *dev)
82 if (dev->power.disable_depth > 0)
85 last = dev->power.accounting_timestamp;
88 dev->power.accounting_timestamp = now;
100 if (dev->power.runtime_status == RPM_SUSPENDED)
101 dev->power.suspended_time += delta;
103 dev->power.active_time += delta;
106 static void __update_runtime_status(struct device *dev, enum rpm_status status)
108 update_pm_runtime_accounting(dev);
109 trace_rpm_status(dev, status);
110 dev->power.runtime_status = status;
113 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
118 spin_lock_irqsave(&dev->power.lock, flags);
120 update_pm_runtime_accounting(dev);
121 time = suspended ? dev->power.suspended_time : dev->power.active_time;
123 spin_unlock_irqrestore(&dev->power.lock, flags);
128 u64 pm_runtime_active_time(struct device *dev)
130 return rpm_get_accounted_time(dev, false);
133 u64 pm_runtime_suspended_time(struct device *dev)
135 return rpm_get_accounted_time(dev, true);
141 * @dev: Device to handle.
143 static void pm_runtime_deactivate_timer(struct device *dev)
145 if (dev->power.timer_expires > 0) {
146 hrtimer_try_to_cancel(&dev->power.suspend_timer);
147 dev->power.timer_expires = 0;
153 * @dev: Device to handle.
155 static void pm_runtime_cancel_pending(struct device *dev)
157 pm_runtime_deactivate_timer(dev);
162 dev->power.request = RPM_REQ_NONE;
167 * @dev: Device to handle.
174 * This function may be called either with or without dev->power.lock held.
177 u64 pm_runtime_autosuspend_expiration(struct device *dev)
182 if (!dev->power.use_autosuspend)
185 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
189 expires = READ_ONCE(dev->power.last_busy);
198 static int dev_memalloc_noio(struct device *dev, void *data)
200 return dev->power.memalloc_noio;
205 * @dev: Device to handle.
231 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
240 spin_lock_irq(&dev->power.lock);
241 enabled = dev->power.memalloc_noio;
242 dev->power.memalloc_noio = enable;
243 spin_unlock_irq(&dev->power.lock);
252 dev = dev->parent;
259 if (!dev || (!enable &&
260 device_for_each_child(dev, NULL, dev_memalloc_noio)))
269 * @dev: Device to test.
271 static int rpm_check_suspend_allowed(struct device *dev)
275 if (dev->power.runtime_error)
277 else if (dev->power.disable_depth > 0)
279 else if (atomic_read(&dev->power.usage_count))
281 else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
285 else if ((dev->power.deferred_resume &&
286 dev->power.runtime_status == RPM_SUSPENDING) ||
287 (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
289 else if (__dev_pm_qos_resume_latency(dev) == 0)
291 else if (dev->power.runtime_status == RPM_SUSPENDED)
297 static int rpm_get_suppliers(struct device *dev)
301 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
340 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
344 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
352 static void rpm_put_suppliers(struct device *dev)
354 __rpm_put_suppliers(dev, true);
357 static void rpm_suspend_suppliers(struct device *dev)
362 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
372 * @dev: Device to run the callback for.
374 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
375 __releases(&dev->power.lock) __acquires(&dev->power.lock)
378 bool use_links = dev->power.links_count > 0;
380 if (dev->power.irq_safe) {
381 spin_unlock(&dev->power.lock);
383 spin_unlock_irq(&dev->power.lock);
392 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
395 retval = rpm_get_suppliers(dev);
397 rpm_put_suppliers(dev);
406 retval = cb(dev);
408 if (dev->power.irq_safe) {
409 spin_lock(&dev->power.lock);
419 ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
420 (dev->power.runtime_status == RPM_RESUMING && retval))) {
423 __rpm_put_suppliers(dev, false);
429 spin_lock_irq(&dev->power.lock);
438 * @dev: Device to run the callback for.
440 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
444 if (dev->power.memalloc_noio) {
457 retval = __rpm_callback(cb, dev);
460 retval = __rpm_callback(cb, dev);
473 dev->power.runtime_error = retval;
480 * @dev: Device to notify the bus type about.
489 * This function must be called under dev->power.lock with interrupts disabled.
491 static int rpm_idle(struct device *dev, int rpmflags)
496 trace_rpm_idle(dev, rpmflags);
497 retval = rpm_check_suspend_allowed(dev);
502 else if (dev->power.runtime_status != RPM_ACTIVE)
509 else if (dev->power.request_pending &&
510 dev->power.request > RPM_REQ_IDLE)
514 else if (dev->power.idle_notification)
521 dev->power.request = RPM_REQ_NONE;
523 callback = RPM_GET_CALLBACK(dev, runtime_idle);
526 if (!callback || dev->power.no_callbacks)
531 dev->power.request = RPM_REQ_IDLE;
532 if (!dev->power.request_pending) {
533 dev->power.request_pending = true;
534 queue_work(pm_wq, &dev->power.work);
536 trace_rpm_return_int(dev, _THIS_IP_, 0);
540 dev->power.idle_notification = true;
542 if (dev->power.irq_safe)
543 spin_unlock(&dev->power.lock);
545 spin_unlock_irq(&dev->power.lock);
547 retval = callback(dev);
549 if (dev->power.irq_safe)
550 spin_lock(&dev->power.lock);
552 spin_lock_irq(&dev->power.lock);
554 dev->power.idle_notification = false;
555 wake_up_all(&dev->power.wait_queue);
558 trace_rpm_return_int(dev, _THIS_IP_, retval);
559 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
564 * @dev: Device to suspend.
576 * ignore_children of parent->power and irq_safe of dev->power are not set).
581 * This function must be called under dev->power.lock with interrupts disabled.
583 static int rpm_suspend(struct device *dev, int rpmflags)
584 __releases(&dev->power.lock) __acquires(&dev->power.lock)
590 trace_rpm_suspend(dev, rpmflags);
593 retval = rpm_check_suspend_allowed(dev);
598 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
605 if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
606 u64 expires = pm_runtime_autosuspend_expiration(dev);
610 dev->power.request = RPM_REQ_NONE;
619 if (!(dev->power.timer_expires &&
620 dev->power.timer_expires <= expires)) {
625 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
628 dev->power.timer_expires = expires;
629 hrtimer_start_range_ns(&dev->power.suspend_timer,
634 dev->power.timer_autosuspends = 1;
640 pm_runtime_cancel_pending(dev);
642 if (dev->power.runtime_status == RPM_SUSPENDING) {
650 if (dev->power.irq_safe) {
651 spin_unlock(&dev->power.lock);
655 spin_lock(&dev->power.lock);
661 prepare_to_wait(&dev->power.wait_queue, &wait,
663 if (dev->power.runtime_status != RPM_SUSPENDING)
666 spin_unlock_irq(&dev->power.lock);
670 spin_lock_irq(&dev->power.lock);
672 finish_wait(&dev->power.wait_queue, &wait);
676 if (dev->power.no_callbacks)
681 dev->power.request = (rpmflags & RPM_AUTO) ?
683 if (!dev->power.request_pending) {
684 dev->power.request_pending = true;
685 queue_work(pm_wq, &dev->power.work);
690 __update_runtime_status(dev, RPM_SUSPENDING);
692 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
694 dev_pm_enable_wake_irq_check(dev, true);
695 retval = rpm_callback(callback, dev);
699 dev_pm_enable_wake_irq_complete(dev);
702 __update_runtime_status(dev, RPM_SUSPENDED);
703 pm_runtime_deactivate_timer(dev);
705 if (dev->parent) {
706 parent = dev->parent;
709 wake_up_all(&dev->power.wait_queue);
711 if (dev->power.deferred_resume) {
712 dev->power.deferred_resume = false;
713 rpm_resume(dev, 0);
718 if (dev->power.irq_safe)
723 spin_unlock(&dev->power.lock);
729 spin_lock(&dev->power.lock);
732 if (dev->power.links_count > 0) {
733 spin_unlock_irq(&dev->power.lock);
735 rpm_suspend_suppliers(dev);
737 spin_lock_irq(&dev->power.lock);
741 trace_rpm_return_int(dev, _THIS_IP_, retval);
746 dev_pm_disable_wake_irq_check(dev, true);
747 __update_runtime_status(dev, RPM_ACTIVE);
748 dev->power.deferred_resume = false;
749 wake_up_all(&dev->power.wait_queue);
757 if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&
758 pm_runtime_autosuspend_expiration(dev) != 0)
761 pm_runtime_cancel_pending(dev);
768 * @dev: Device to resume.
781 * This function must be called under dev->power.lock with interrupts disabled.
783 static int rpm_resume(struct device *dev, int rpmflags)
784 __releases(&dev->power.lock) __acquires(&dev->power.lock)
790 trace_rpm_resume(dev, rpmflags);
793 if (dev->power.runtime_error) {
795 } else if (dev->power.disable_depth > 0) {
796 if (dev->power.runtime_status == RPM_ACTIVE &&
797 dev->power.last_status == RPM_ACTIVE)
811 dev->power.request = RPM_REQ_NONE;
812 if (!dev->power.timer_autosuspends)
813 pm_runtime_deactivate_timer(dev);
815 if (dev->power.runtime_status == RPM_ACTIVE) {
820 if (dev->power.runtime_status == RPM_RESUMING ||
821 dev->power.runtime_status == RPM_SUSPENDING) {
825 if (dev->power.runtime_status == RPM_SUSPENDING) {
826 dev->power.deferred_resume = true;
835 if (dev->power.irq_safe) {
836 spin_unlock(&dev->power.lock);
840 spin_lock(&dev->power.lock);
846 prepare_to_wait(&dev->power.wait_queue, &wait,
848 if (dev->power.runtime_status != RPM_RESUMING &&
849 dev->power.runtime_status != RPM_SUSPENDING)
852 spin_unlock_irq(&dev->power.lock);
856 spin_lock_irq(&dev->power.lock);
858 finish_wait(&dev->power.wait_queue, &wait);
867 if (dev->power.no_callbacks && !parent && dev->parent) {
868 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
869 if (dev->parent->power.disable_depth > 0 ||
870 dev->parent->power.ignore_children ||
871 dev->parent->power.runtime_status == RPM_ACTIVE) {
872 atomic_inc(&dev->parent->power.child_count);
873 spin_unlock(&dev->parent->power.lock);
877 spin_unlock(&dev->parent->power.lock);
882 dev->power.request = RPM_REQ_RESUME;
883 if (!dev->power.request_pending) {
884 dev->power.request_pending = true;
885 queue_work(pm_wq, &dev->power.work);
891 if (!parent && dev->parent) {
894 * necessary. Not needed if dev is irq-safe; then the
897 parent = dev->parent;
898 if (dev->power.irq_safe)
901 spin_unlock(&dev->power.lock);
918 spin_lock(&dev->power.lock);
926 if (dev->power.no_callbacks)
929 __update_runtime_status(dev, RPM_RESUMING);
931 callback = RPM_GET_CALLBACK(dev, runtime_resume);
933 dev_pm_disable_wake_irq_check(dev, false);
934 retval = rpm_callback(callback, dev);
936 __update_runtime_status(dev, RPM_SUSPENDED);
937 pm_runtime_cancel_pending(dev);
938 dev_pm_enable_wake_irq_check(dev, false);
941 __update_runtime_status(dev, RPM_ACTIVE);
942 pm_runtime_mark_last_busy(dev);
946 wake_up_all(&dev->power.wait_queue);
949 rpm_idle(dev, RPM_ASYNC);
952 if (parent && !dev->power.irq_safe) {
953 spin_unlock_irq(&dev->power.lock);
957 spin_lock_irq(&dev->power.lock);
960 trace_rpm_return_int(dev, _THIS_IP_, retval);
974 struct device *dev = container_of(work, struct device, power.work);
977 spin_lock_irq(&dev->power.lock);
979 if (!dev->power.request_pending)
982 req = dev->power.request;
983 dev->power.request = RPM_REQ_NONE;
984 dev->power.request_pending = false;
990 rpm_idle(dev, RPM_NOWAIT);
993 rpm_suspend(dev, RPM_NOWAIT);
996 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
999 rpm_resume(dev, RPM_NOWAIT);
1004 spin_unlock_irq(&dev->power.lock);
1015 struct device *dev = container_of(timer, struct device, power.suspend_timer);
1019 spin_lock_irqsave(&dev->power.lock, flags);
1021 expires = dev->power.timer_expires;
1027 dev->power.timer_expires = 0;
1028 rpm_suspend(dev, dev->power.timer_autosuspends ?
1032 spin_unlock_irqrestore(&dev->power.lock, flags);
1039 * @dev: Device to suspend.
1042 int pm_schedule_suspend(struct device *dev, unsigned int delay)
1048 spin_lock_irqsave(&dev->power.lock, flags);
1051 retval = rpm_suspend(dev, RPM_ASYNC);
1055 retval = rpm_check_suspend_allowed(dev);
1060 pm_runtime_cancel_pending(dev);
1063 dev->power.timer_expires = expires;
1064 dev->power.timer_autosuspends = 0;
1065 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1068 spin_unlock_irqrestore(&dev->power.lock, flags);
1074 static int rpm_drop_usage_count(struct device *dev)
1078 ret = atomic_sub_return(1, &dev->power.usage_count);
1088 atomic_inc(&dev->power.usage_count);
1089 dev_warn(dev, "Runtime PM usage count underflow!\n");
1095 * @dev: Device to send idle notification for.
1106 int __pm_runtime_idle(struct device *dev, int rpmflags)
1112 retval = rpm_drop_usage_count(dev);
1116 trace_rpm_usage(dev, rpmflags);
1121 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1123 spin_lock_irqsave(&dev->power.lock, flags);
1124 retval = rpm_idle(dev, rpmflags);
1125 spin_unlock_irqrestore(&dev->power.lock, flags);
1133 * @dev: Device to suspend.
1144 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1150 retval = rpm_drop_usage_count(dev);
1154 trace_rpm_usage(dev, rpmflags);
1159 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1161 spin_lock_irqsave(&dev->power.lock, flags);
1162 retval = rpm_suspend(dev, rpmflags);
1163 spin_unlock_irqrestore(&dev->power.lock, flags);
1171 * @dev: Device to resume.
1180 int __pm_runtime_resume(struct device *dev, int rpmflags)
1185 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1186 dev->power.runtime_status != RPM_ACTIVE);
1189 atomic_inc(&dev->power.usage_count);
1191 spin_lock_irqsave(&dev->power.lock, flags);
1192 retval = rpm_resume(dev, rpmflags);
1193 spin_unlock_irqrestore(&dev->power.lock, flags);
1201 * @dev: Device to handle.
1204 * Return -EINVAL if runtime PM is disabled for @dev.
1207 * is set, or (2) @dev is not ignoring children and its active child count is
1208 * nonero, or (3) the runtime PM usage counter of @dev is not zero, increment
1209 * the usage counter of @dev and return 1.
1221 * @dev after this function has returned a positive value for it.
1223 static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
1228 spin_lock_irqsave(&dev->power.lock, flags);
1229 if (dev->power.disable_depth > 0) {
1231 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1233 } else if (ign_usage_count || (!dev->power.ignore_children &&
1234 atomic_read(&dev->power.child_count) > 0)) {
1236 atomic_inc(&dev->power.usage_count);
1238 retval = atomic_inc_not_zero(&dev->power.usage_count);
1240 trace_rpm_usage(dev, 0);
1241 spin_unlock_irqrestore(&dev->power.lock, flags);
1249 * @dev: Target device.
1251 * Increment the runtime PM usage counter of @dev if its runtime PM status is
1256 int pm_runtime_get_if_active(struct device *dev)
1258 return pm_runtime_get_conditional(dev, true);
1264 * @dev: Target device.
1266 * Increment the runtime PM usage counter of @dev if its runtime PM status is
1271 * If @dev is in a different state or it is not in use (that is, its usage
1276 * also the usage counter of @dev is not updated.
1278 int pm_runtime_get_if_in_use(struct device *dev)
1280 return pm_runtime_get_conditional(dev, false);
1286 * @dev: Device to handle.
1301 * If @dev has any suppliers (as reflected by device links to them), and @status
1303 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1308 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1310 struct device *parent = dev->parent;
1318 spin_lock_irqsave(&dev->power.lock, flags);
1324 if (dev->power.runtime_error || dev->power.disable_depth)
1325 dev->power.disable_depth++;
1329 spin_unlock_irqrestore(&dev->power.lock, flags);
1343 error = rpm_get_suppliers(dev);
1350 spin_lock_irqsave(&dev->power.lock, flags);
1352 if (dev->power.runtime_status == status || !parent)
1369 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1370 dev_name(dev),
1373 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1386 __update_runtime_status(dev, status);
1388 dev->power.runtime_error = 0;
1391 spin_unlock_irqrestore(&dev->power.lock, flags);
1399 rpm_put_suppliers(dev);
1404 pm_runtime_enable(dev);
1412 * @dev: Device to handle.
1417 * Should be called under dev->power.lock with interrupts disabled.
1419 static void __pm_runtime_barrier(struct device *dev)
1421 pm_runtime_deactivate_timer(dev);
1423 if (dev->power.request_pending) {
1424 dev->power.request = RPM_REQ_NONE;
1425 spin_unlock_irq(&dev->power.lock);
1427 cancel_work_sync(&dev->power.work);
1429 spin_lock_irq(&dev->power.lock);
1430 dev->power.request_pending = false;
1433 if (dev->power.runtime_status == RPM_SUSPENDING ||
1434 dev->power.runtime_status == RPM_RESUMING ||
1435 dev->power.idle_notification) {
1440 prepare_to_wait(&dev->power.wait_queue, &wait,
1442 if (dev->power.runtime_status != RPM_SUSPENDING
1443 && dev->power.runtime_status != RPM_RESUMING
1444 && !dev->power.idle_notification)
1446 spin_unlock_irq(&dev->power.lock);
1450 spin_lock_irq(&dev->power.lock);
1452 finish_wait(&dev->power.wait_queue, &wait);
1458 * @dev: Device to handle.
1470 int pm_runtime_barrier(struct device *dev)
1474 pm_runtime_get_noresume(dev);
1475 spin_lock_irq(&dev->power.lock);
1477 if (dev->power.request_pending
1478 && dev->power.request == RPM_REQ_RESUME) {
1479 rpm_resume(dev, 0);
1483 __pm_runtime_barrier(dev);
1485 spin_unlock_irq(&dev->power.lock);
1486 pm_runtime_put_noidle(dev);
1492 bool pm_runtime_block_if_disabled(struct device *dev)
1496 spin_lock_irq(&dev->power.lock);
1498 ret = !pm_runtime_enabled(dev);
1499 if (ret && dev->power.last_status == RPM_INVALID)
1500 dev->power.last_status = RPM_BLOCKED;
1502 spin_unlock_irq(&dev->power.lock);
1507 void pm_runtime_unblock(struct device *dev)
1509 spin_lock_irq(&dev->power.lock);
1511 if (dev->power.last_status == RPM_BLOCKED)
1512 dev->power.last_status = RPM_INVALID;
1514 spin_unlock_irq(&dev->power.lock);
1517 void __pm_runtime_disable(struct device *dev, bool check_resume)
1519 spin_lock_irq(&dev->power.lock);
1521 if (dev->power.disable_depth > 0) {
1522 dev->power.disable_depth++;
1531 if (check_resume && dev->power.request_pending &&
1532 dev->power.request == RPM_REQ_RESUME) {
1537 pm_runtime_get_noresume(dev);
1539 rpm_resume(dev, 0);
1541 pm_runtime_put_noidle(dev);
1545 update_pm_runtime_accounting(dev);
1547 if (!dev->power.disable_depth++) {
1548 __pm_runtime_barrier(dev);
1549 dev->power.last_status = dev->power.runtime_status;
1553 spin_unlock_irq(&dev->power.lock);
1559 * @dev: Device to handle.
1561 void pm_runtime_enable(struct device *dev)
1565 spin_lock_irqsave(&dev->power.lock, flags);
1567 if (!dev->power.disable_depth) {
1568 dev_warn(dev, "Unbalanced %s!\n", __func__);
1572 if (--dev->power.disable_depth > 0)
1575 if (dev->power.last_status == RPM_BLOCKED) {
1576 dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
1579 dev->power.last_status = RPM_INVALID;
1580 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1582 if (dev->power.runtime_status == RPM_SUSPENDED &&
1583 !dev->power.ignore_children &&
1584 atomic_read(&dev->power.child_count) > 0)
1585 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
1588 spin_unlock_irqrestore(&dev->power.lock, flags);
1600 * @dev: Device to handle.
1602 int devm_pm_runtime_set_active_enabled(struct device *dev)
1606 err = pm_runtime_set_active(dev);
1610 err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
1614 return devm_pm_runtime_enable(dev);
1630 * @dev: Device to handle.
1632 int devm_pm_runtime_enable(struct device *dev)
1634 pm_runtime_enable(dev);
1636 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1648 * @dev: Device to handle.
1650 int devm_pm_runtime_get_noresume(struct device *dev)
1652 pm_runtime_get_noresume(dev);
1654 return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
1660 * @dev: Device to handle.
1666 void pm_runtime_forbid(struct device *dev)
1668 spin_lock_irq(&dev->power.lock);
1669 if (!dev->power.runtime_auto)
1672 dev->power.runtime_auto = false;
1673 atomic_inc(&dev->power.usage_count);
1674 rpm_resume(dev, 0);
1677 spin_unlock_irq(&dev->power.lock);
1683 * @dev: Device to handle.
1687 void pm_runtime_allow(struct device *dev)
1691 spin_lock_irq(&dev->power.lock);
1692 if (dev->power.runtime_auto)
1695 dev->power.runtime_auto = true;
1696 ret = rpm_drop_usage_count(dev);
1698 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1700 trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
1703 spin_unlock_irq(&dev->power.lock);
1709 * @dev: Device to handle.
1715 void pm_runtime_no_callbacks(struct device *dev)
1717 spin_lock_irq(&dev->power.lock);
1718 dev->power.no_callbacks = 1;
1719 spin_unlock_irq(&dev->power.lock);
1720 if (device_is_registered(dev))
1721 rpm_sysfs_remove(dev);
1727 * @dev: Device to handle
1736 void pm_runtime_irq_safe(struct device *dev)
1738 if (dev->parent)
1739 pm_runtime_get_sync(dev->parent);
1741 spin_lock_irq(&dev->power.lock);
1742 dev->power.irq_safe = 1;
1743 spin_unlock_irq(&dev->power.lock);
1749 * @dev: Device to handle.
1756 * This function must be called under dev->power.lock with interrupts disabled.
1758 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1760 int delay = dev->power.autosuspend_delay;
1763 if (dev->power.use_autosuspend && delay < 0) {
1767 atomic_inc(&dev->power.usage_count);
1768 rpm_resume(dev, 0);
1770 trace_rpm_usage(dev, 0);
1779 atomic_dec(&dev->power.usage_count);
1782 rpm_idle(dev, RPM_AUTO);
1788 * @dev: Device to handle.
1795 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1799 spin_lock_irq(&dev->power.lock);
1800 old_delay = dev->power.autosuspend_delay;
1801 old_use = dev->power.use_autosuspend;
1802 dev->power.autosuspend_delay = delay;
1803 update_autosuspend(dev, old_delay, old_use);
1804 spin_unlock_irq(&dev->power.lock);
1810 * @dev: Device to handle.
1816 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1820 spin_lock_irq(&dev->power.lock);
1821 old_delay = dev->power.autosuspend_delay;
1822 old_use = dev->power.use_autosuspend;
1823 dev->power.use_autosuspend = use;
1824 update_autosuspend(dev, old_delay, old_use);
1825 spin_unlock_irq(&dev->power.lock);
1831 * @dev: Device object to initialize.
1833 void pm_runtime_init(struct device *dev)
1835 dev->power.runtime_status = RPM_SUSPENDED;
1836 dev->power.last_status = RPM_INVALID;
1837 dev->power.idle_notification = false;
1839 dev->power.disable_depth = 1;
1840 atomic_set(&dev->power.usage_count, 0);
1842 dev->power.runtime_error = 0;
1844 atomic_set(&dev->power.child_count, 0);
1845 pm_suspend_ignore_children(dev, false);
1846 dev->power.runtime_auto = true;
1848 dev->power.request_pending = false;
1849 dev->power.request = RPM_REQ_NONE;
1850 dev->power.deferred_resume = false;
1851 dev->power.needs_force_resume = false;
1852 INIT_WORK(&dev->power.work, pm_runtime_work);
1854 dev->power.timer_expires = 0;
1855 hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
1858 init_waitqueue_head(&dev->power.wait_queue);
1863 * @dev: Device object to re-initialize.
1865 void pm_runtime_reinit(struct device *dev)
1867 if (!pm_runtime_enabled(dev)) {
1868 if (dev->power.runtime_status == RPM_ACTIVE)
1869 pm_runtime_set_suspended(dev);
1870 if (dev->power.irq_safe) {
1871 spin_lock_irq(&dev->power.lock);
1872 dev->power.irq_safe = 0;
1873 spin_unlock_irq(&dev->power.lock);
1874 if (dev->parent)
1875 pm_runtime_put(dev->parent);
1882 dev->power.needs_force_resume = false;
1887 * @dev: Device object being removed from device hierarchy.
1889 void pm_runtime_remove(struct device *dev)
1891 __pm_runtime_disable(dev, false);
1892 pm_runtime_reinit(dev);
1897 * @dev: Consumer device.
1899 void pm_runtime_get_suppliers(struct device *dev)
1906 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1918 * @dev: Consumer device.
1920 void pm_runtime_put_suppliers(struct device *dev)
1927 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1937 void pm_runtime_new_link(struct device *dev)
1939 spin_lock_irq(&dev->power.lock);
1940 dev->power.links_count++;
1941 spin_unlock_irq(&dev->power.lock);
1944 static void pm_runtime_drop_link_count(struct device *dev)
1946 spin_lock_irq(&dev->power.lock);
1947 WARN_ON(dev->power.links_count == 0);
1948 dev->power.links_count--;
1949 spin_unlock_irq(&dev->power.lock);
1970 static pm_callback_t get_callback(struct device *dev, size_t cb_offset)
1978 if (dev_pm_strict_midlayer_is_set(dev))
1979 return __rpm_get_driver_callback(dev, cb_offset);
1981 return __rpm_get_callback(dev, cb_offset);
1984 #define GET_CALLBACK(dev, callback) \
1985 get_callback(dev, offsetof(struct dev_pm_ops, callback))
1989 * @dev: Device to suspend.
2004 int pm_runtime_force_suspend(struct device *dev)
2009 pm_runtime_disable(dev);
2010 if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume)
2013 callback = GET_CALLBACK(dev, runtime_suspend);
2015 dev_pm_enable_wake_irq_check(dev, true);
2016 ret = callback ? callback(dev) : 0;
2020 dev_pm_enable_wake_irq_complete(dev);
2031 if (pm_runtime_need_not_resume(dev))
2032 pm_runtime_set_suspended(dev);
2034 dev->power.needs_force_resume = true;
2039 dev_pm_disable_wake_irq_check(dev, true);
2040 pm_runtime_enable(dev);
2049 * @dev: Device to resume.
2066 int pm_runtime_force_resume(struct device *dev)
2071 if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) ||
2072 pm_runtime_status_suspended(dev)))
2075 callback = GET_CALLBACK(dev, runtime_resume);
2077 dev_pm_disable_wake_irq_check(dev, false);
2078 ret = callback ? callback(dev) : 0;
2080 pm_runtime_set_suspended(dev);
2081 dev_pm_enable_wake_irq_check(dev, false);
2085 pm_runtime_mark_last_busy(dev);
2093 dev->power.smart_suspend = false;
2098 dev->power.needs_force_resume = false;
2100 pm_runtime_enable(dev);
2105 bool pm_runtime_need_not_resume(struct device *dev)
2107 return atomic_read(&dev->power.usage_count) <= 1 &&
2108 (atomic_read(&dev->power.child_count) == 0 ||
2109 dev->power.ignore_children);