1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
16 */
17
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/thermal.h>
37 #include <linux/timer.h>
38 #include <linux/nmi.h>
39
40 #include "../base.h"
41 #include "power.h"
42
43 typedef int (*pm_callback_t)(struct device *);
44
45 /*
46 * The entries in the dpm_list list are in a depth first order, simply
47 * because children are guaranteed to be discovered after parents, and
48 * are inserted at the back of the list on discovery.
49 *
50 * Since device_pm_add() may be called with a device lock held,
51 * we must never try to acquire a device lock while holding
52 * dpm_list_mutex.
53 */
54
55 LIST_HEAD(dpm_list);
56 static LIST_HEAD(dpm_prepared_list);
57 static LIST_HEAD(dpm_suspended_list);
58 static LIST_HEAD(dpm_late_early_list);
59 static LIST_HEAD(dpm_noirq_list);
60
61 static DEFINE_MUTEX(dpm_list_mtx);
62 static pm_message_t pm_transition;
63
64 static DEFINE_MUTEX(async_wip_mtx);
65 static int async_error;
66
67 /**
68 * pm_hibernate_is_recovering - if recovering from hibernate due to error.
69 *
70 * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
71 * recovering from some error.
72 *
73 * Return: true for error case, false for normal case.
74 */
pm_hibernate_is_recovering(void)75 bool pm_hibernate_is_recovering(void)
76 {
77 return pm_transition.event == PM_EVENT_RECOVER;
78 }
79 EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
80
pm_verb(int event)81 static const char *pm_verb(int event)
82 {
83 switch (event) {
84 case PM_EVENT_SUSPEND:
85 return "suspend";
86 case PM_EVENT_RESUME:
87 return "resume";
88 case PM_EVENT_FREEZE:
89 return "freeze";
90 case PM_EVENT_QUIESCE:
91 return "quiesce";
92 case PM_EVENT_HIBERNATE:
93 return "hibernate";
94 case PM_EVENT_THAW:
95 return "thaw";
96 case PM_EVENT_RESTORE:
97 return "restore";
98 case PM_EVENT_RECOVER:
99 return "recover";
100 case PM_EVENT_POWEROFF:
101 return "poweroff";
102 default:
103 return "(unknown PM event)";
104 }
105 }
106
107 /**
108 * device_pm_sleep_init - Initialize system suspend-related device fields.
109 * @dev: Device object being initialized.
110 */
device_pm_sleep_init(struct device * dev)111 void device_pm_sleep_init(struct device *dev)
112 {
113 dev->power.is_prepared = false;
114 dev->power.is_suspended = false;
115 dev->power.is_noirq_suspended = false;
116 dev->power.is_late_suspended = false;
117 init_completion(&dev->power.completion);
118 complete_all(&dev->power.completion);
119 dev->power.wakeup = NULL;
120 INIT_LIST_HEAD(&dev->power.entry);
121 }
122
123 /**
124 * device_pm_lock - Lock the list of active devices used by the PM core.
125 */
device_pm_lock(void)126 void device_pm_lock(void)
127 {
128 mutex_lock(&dpm_list_mtx);
129 }
130
131 /**
132 * device_pm_unlock - Unlock the list of active devices used by the PM core.
133 */
device_pm_unlock(void)134 void device_pm_unlock(void)
135 {
136 mutex_unlock(&dpm_list_mtx);
137 }
138
139 /**
140 * device_pm_add - Add a device to the PM core's list of active devices.
141 * @dev: Device to add to the list.
142 */
device_pm_add(struct device * dev)143 void device_pm_add(struct device *dev)
144 {
145 /* Skip PM setup/initialization. */
146 if (device_pm_not_required(dev))
147 return;
148
149 pr_debug("Adding info for %s:%s\n",
150 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
151 device_pm_check_callbacks(dev);
152 mutex_lock(&dpm_list_mtx);
153 if (dev->parent && dev->parent->power.is_prepared)
154 dev_warn(dev, "parent %s should not be sleeping\n",
155 dev_name(dev->parent));
156 list_add_tail(&dev->power.entry, &dpm_list);
157 dev->power.in_dpm_list = true;
158 mutex_unlock(&dpm_list_mtx);
159 }
160
161 /**
162 * device_pm_remove - Remove a device from the PM core's list of active devices.
163 * @dev: Device to be removed from the list.
164 */
device_pm_remove(struct device * dev)165 void device_pm_remove(struct device *dev)
166 {
167 if (device_pm_not_required(dev))
168 return;
169
170 pr_debug("Removing info for %s:%s\n",
171 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
172 complete_all(&dev->power.completion);
173 mutex_lock(&dpm_list_mtx);
174 list_del_init(&dev->power.entry);
175 dev->power.in_dpm_list = false;
176 mutex_unlock(&dpm_list_mtx);
177 device_wakeup_disable(dev);
178 pm_runtime_remove(dev);
179 device_pm_check_callbacks(dev);
180 }
181
182 /**
183 * device_pm_move_before - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come before.
186 */
device_pm_move_before(struct device * deva,struct device * devb)187 void device_pm_move_before(struct device *deva, struct device *devb)
188 {
189 pr_debug("Moving %s:%s before %s:%s\n",
190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 /* Delete deva from dpm_list and reinsert before devb. */
193 list_move_tail(&deva->power.entry, &devb->power.entry);
194 }
195
196 /**
197 * device_pm_move_after - Move device in the PM core's list of active devices.
198 * @deva: Device to move in dpm_list.
199 * @devb: Device @deva should come after.
200 */
device_pm_move_after(struct device * deva,struct device * devb)201 void device_pm_move_after(struct device *deva, struct device *devb)
202 {
203 pr_debug("Moving %s:%s after %s:%s\n",
204 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
205 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
206 /* Delete deva from dpm_list and reinsert after devb. */
207 list_move(&deva->power.entry, &devb->power.entry);
208 }
209
210 /**
211 * device_pm_move_last - Move device to end of the PM core's list of devices.
212 * @dev: Device to move in dpm_list.
213 */
device_pm_move_last(struct device * dev)214 void device_pm_move_last(struct device *dev)
215 {
216 pr_debug("Moving %s:%s to end of list\n",
217 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
218 list_move_tail(&dev->power.entry, &dpm_list);
219 }
220
initcall_debug_start(struct device * dev,void * cb)221 static ktime_t initcall_debug_start(struct device *dev, void *cb)
222 {
223 if (!pm_print_times_enabled)
224 return 0;
225
226 dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
227 task_pid_nr(current),
228 dev->parent ? dev_name(dev->parent) : "none");
229 return ktime_get();
230 }
231
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)232 static void initcall_debug_report(struct device *dev, ktime_t calltime,
233 void *cb, int error)
234 {
235 ktime_t rettime;
236
237 if (!pm_print_times_enabled)
238 return;
239
240 rettime = ktime_get();
241 dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
242 (unsigned long long)ktime_us_delta(rettime, calltime));
243 }
244
245 /**
246 * dpm_wait - Wait for a PM operation to complete.
247 * @dev: Device to wait for.
248 * @async: If unset, wait only if the device's power.async_suspend flag is set.
249 */
dpm_wait(struct device * dev,bool async)250 static void dpm_wait(struct device *dev, bool async)
251 {
252 if (!dev)
253 return;
254
255 if (async || (pm_async_enabled && dev->power.async_suspend))
256 wait_for_completion(&dev->power.completion);
257 }
258
dpm_wait_fn(struct device * dev,void * async_ptr)259 static int dpm_wait_fn(struct device *dev, void *async_ptr)
260 {
261 dpm_wait(dev, *((bool *)async_ptr));
262 return 0;
263 }
264
dpm_wait_for_children(struct device * dev,bool async)265 static void dpm_wait_for_children(struct device *dev, bool async)
266 {
267 device_for_each_child(dev, &async, dpm_wait_fn);
268 }
269
dpm_wait_for_suppliers(struct device * dev,bool async)270 static void dpm_wait_for_suppliers(struct device *dev, bool async)
271 {
272 struct device_link *link;
273 int idx;
274
275 idx = device_links_read_lock();
276
277 /*
278 * If the supplier goes away right after we've checked the link to it,
279 * we'll wait for its completion to change the state, but that's fine,
280 * because the only things that will block as a result are the SRCU
281 * callbacks freeing the link objects for the links in the list we're
282 * walking.
283 */
284 dev_for_each_link_to_supplier(link, dev)
285 if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
286 !device_link_flag_is_sync_state_only(link->flags))
287 dpm_wait(link->supplier, async);
288
289 device_links_read_unlock(idx);
290 }
291
dpm_wait_for_superior(struct device * dev,bool async)292 static bool dpm_wait_for_superior(struct device *dev, bool async)
293 {
294 struct device *parent;
295
296 /*
297 * If the device is resumed asynchronously and the parent's callback
298 * deletes both the device and the parent itself, the parent object may
299 * be freed while this function is running, so avoid that by reference
300 * counting the parent once more unless the device has been deleted
301 * already (in which case return right away).
302 */
303 mutex_lock(&dpm_list_mtx);
304
305 if (!device_pm_initialized(dev)) {
306 mutex_unlock(&dpm_list_mtx);
307 return false;
308 }
309
310 parent = get_device(dev->parent);
311
312 mutex_unlock(&dpm_list_mtx);
313
314 dpm_wait(parent, async);
315 put_device(parent);
316
317 dpm_wait_for_suppliers(dev, async);
318
319 /*
320 * If the parent's callback has deleted the device, attempting to resume
321 * it would be invalid, so avoid doing that then.
322 */
323 return device_pm_initialized(dev);
324 }
325
dpm_wait_for_consumers(struct device * dev,bool async)326 static void dpm_wait_for_consumers(struct device *dev, bool async)
327 {
328 struct device_link *link;
329 int idx;
330
331 idx = device_links_read_lock();
332
333 /*
334 * The status of a device link can only be changed from "dormant" by a
335 * probe, but that cannot happen during system suspend/resume. In
336 * theory it can change to "dormant" at that time, but then it is
337 * reasonable to wait for the target device anyway (eg. if it goes
338 * away, it's better to wait for it to go away completely and then
339 * continue instead of trying to continue in parallel with its
340 * unregistration).
341 */
342 dev_for_each_link_to_consumer(link, dev)
343 if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
344 !device_link_flag_is_sync_state_only(link->flags))
345 dpm_wait(link->consumer, async);
346
347 device_links_read_unlock(idx);
348 }
349
dpm_wait_for_subordinate(struct device * dev,bool async)350 static void dpm_wait_for_subordinate(struct device *dev, bool async)
351 {
352 dpm_wait_for_children(dev, async);
353 dpm_wait_for_consumers(dev, async);
354 }
355
356 /**
357 * pm_op - Return the PM operation appropriate for given PM event.
358 * @ops: PM operations to choose from.
359 * @state: PM transition of the system being carried out.
360 */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)361 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
362 {
363 switch (state.event) {
364 #ifdef CONFIG_SUSPEND
365 case PM_EVENT_SUSPEND:
366 return ops->suspend;
367 case PM_EVENT_RESUME:
368 return ops->resume;
369 #endif /* CONFIG_SUSPEND */
370 #ifdef CONFIG_HIBERNATE_CALLBACKS
371 case PM_EVENT_FREEZE:
372 case PM_EVENT_QUIESCE:
373 return ops->freeze;
374 case PM_EVENT_POWEROFF:
375 case PM_EVENT_HIBERNATE:
376 return ops->poweroff;
377 case PM_EVENT_THAW:
378 case PM_EVENT_RECOVER:
379 return ops->thaw;
380 case PM_EVENT_RESTORE:
381 return ops->restore;
382 #endif /* CONFIG_HIBERNATE_CALLBACKS */
383 }
384
385 return NULL;
386 }
387
388 /**
389 * pm_late_early_op - Return the PM operation appropriate for given PM event.
390 * @ops: PM operations to choose from.
391 * @state: PM transition of the system being carried out.
392 *
393 * Runtime PM is disabled for @dev while this function is being executed.
394 */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)395 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
396 pm_message_t state)
397 {
398 switch (state.event) {
399 #ifdef CONFIG_SUSPEND
400 case PM_EVENT_SUSPEND:
401 return ops->suspend_late;
402 case PM_EVENT_RESUME:
403 return ops->resume_early;
404 #endif /* CONFIG_SUSPEND */
405 #ifdef CONFIG_HIBERNATE_CALLBACKS
406 case PM_EVENT_FREEZE:
407 case PM_EVENT_QUIESCE:
408 return ops->freeze_late;
409 case PM_EVENT_POWEROFF:
410 case PM_EVENT_HIBERNATE:
411 return ops->poweroff_late;
412 case PM_EVENT_THAW:
413 case PM_EVENT_RECOVER:
414 return ops->thaw_early;
415 case PM_EVENT_RESTORE:
416 return ops->restore_early;
417 #endif /* CONFIG_HIBERNATE_CALLBACKS */
418 }
419
420 return NULL;
421 }
422
423 /**
424 * pm_noirq_op - Return the PM operation appropriate for given PM event.
425 * @ops: PM operations to choose from.
426 * @state: PM transition of the system being carried out.
427 *
428 * The driver of @dev will not receive interrupts while this function is being
429 * executed.
430 */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)431 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
432 {
433 switch (state.event) {
434 #ifdef CONFIG_SUSPEND
435 case PM_EVENT_SUSPEND:
436 return ops->suspend_noirq;
437 case PM_EVENT_RESUME:
438 return ops->resume_noirq;
439 #endif /* CONFIG_SUSPEND */
440 #ifdef CONFIG_HIBERNATE_CALLBACKS
441 case PM_EVENT_FREEZE:
442 case PM_EVENT_QUIESCE:
443 return ops->freeze_noirq;
444 case PM_EVENT_POWEROFF:
445 case PM_EVENT_HIBERNATE:
446 return ops->poweroff_noirq;
447 case PM_EVENT_THAW:
448 case PM_EVENT_RECOVER:
449 return ops->thaw_noirq;
450 case PM_EVENT_RESTORE:
451 return ops->restore_noirq;
452 #endif /* CONFIG_HIBERNATE_CALLBACKS */
453 }
454
455 return NULL;
456 }
457
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)458 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
459 {
460 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
461 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
462 ", may wakeup" : "", dev->power.driver_flags);
463 }
464
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)465 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
466 int error)
467 {
468 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
469 error);
470 }
471
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)472 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
473 const char *info)
474 {
475 ktime_t calltime;
476 u64 usecs64;
477 int usecs;
478
479 calltime = ktime_get();
480 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
481 do_div(usecs64, NSEC_PER_USEC);
482 usecs = usecs64;
483 if (usecs == 0)
484 usecs = 1;
485
486 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
487 info ?: "", info ? " " : "", pm_verb(state.event),
488 error ? "aborted" : "complete",
489 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
490 }
491
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)492 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
493 pm_message_t state, const char *info)
494 {
495 ktime_t calltime;
496 int error;
497
498 if (!cb)
499 return 0;
500
501 calltime = initcall_debug_start(dev, cb);
502
503 pm_dev_dbg(dev, state, info);
504 trace_device_pm_callback_start(dev, info, state.event);
505 error = cb(dev);
506 trace_device_pm_callback_end(dev, error);
507 suspend_report_result(dev, cb, error);
508
509 initcall_debug_report(dev, calltime, cb, error);
510
511 return error;
512 }
513
514 #ifdef CONFIG_DPM_WATCHDOG
515 struct dpm_watchdog {
516 struct device *dev;
517 struct task_struct *tsk;
518 struct timer_list timer;
519 bool fatal;
520 };
521
522 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
523 struct dpm_watchdog wd
524
525 static bool __read_mostly dpm_watchdog_all_cpu_backtrace;
526 module_param(dpm_watchdog_all_cpu_backtrace, bool, 0644);
527 MODULE_PARM_DESC(dpm_watchdog_all_cpu_backtrace,
528 "Backtrace all CPUs on DPM watchdog timeout");
529
530 /**
531 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
532 * @t: The timer that PM watchdog depends on.
533 *
534 * Called when a driver has timed out suspending or resuming.
535 * There's not much we can do here to recover so panic() to
536 * capture a crash-dump in pstore.
537 */
dpm_watchdog_handler(struct timer_list * t)538 static void dpm_watchdog_handler(struct timer_list *t)
539 {
540 struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
541 struct timer_list *timer = &wd->timer;
542 unsigned int time_left;
543
544 if (wd->fatal) {
545 unsigned int this_cpu = smp_processor_id();
546
547 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
548 show_stack(wd->tsk, NULL, KERN_EMERG);
549 if (dpm_watchdog_all_cpu_backtrace)
550 trigger_allbutcpu_cpu_backtrace(this_cpu);
551 panic("%s %s: unrecoverable failure\n",
552 dev_driver_string(wd->dev), dev_name(wd->dev));
553 }
554
555 time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
556 dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
557 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
558 show_stack(wd->tsk, NULL, KERN_WARNING);
559
560 wd->fatal = true;
561 mod_timer(timer, jiffies + HZ * time_left);
562 }
563
564 /**
565 * dpm_watchdog_set - Enable pm watchdog for given device.
566 * @wd: Watchdog. Must be allocated on the stack.
567 * @dev: Device to handle.
568 */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)569 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
570 {
571 struct timer_list *timer = &wd->timer;
572
573 wd->dev = dev;
574 wd->tsk = current;
575 wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
576
577 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
578 /* use same timeout value for both suspend and resume */
579 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
580 add_timer(timer);
581 }
582
583 /**
584 * dpm_watchdog_clear - Disable suspend/resume watchdog.
585 * @wd: Watchdog to disable.
586 */
dpm_watchdog_clear(struct dpm_watchdog * wd)587 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
588 {
589 struct timer_list *timer = &wd->timer;
590
591 timer_delete_sync(timer);
592 timer_destroy_on_stack(timer);
593 }
594 #else
595 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
596 #define dpm_watchdog_set(x, y)
597 #define dpm_watchdog_clear(x)
598 #endif
599
600 /*------------------------- Resume routines -------------------------*/
601
602 /**
603 * dev_pm_skip_resume - System-wide device resume optimization check.
604 * @dev: Target device.
605 *
606 * Return:
607 * - %false if the transition under way is RESTORE.
608 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
609 * - The logical negation of %power.must_resume otherwise (that is, when the
610 * transition under way is RESUME).
611 */
dev_pm_skip_resume(struct device * dev)612 bool dev_pm_skip_resume(struct device *dev)
613 {
614 if (pm_transition.event == PM_EVENT_RESTORE)
615 return false;
616
617 if (pm_transition.event == PM_EVENT_THAW)
618 return dev_pm_skip_suspend(dev);
619
620 return !dev->power.must_resume;
621 }
622
is_async(struct device * dev)623 static bool is_async(struct device *dev)
624 {
625 return dev->power.async_suspend && pm_async_enabled
626 && !pm_trace_is_enabled();
627 }
628
__dpm_async(struct device * dev,async_func_t func)629 static bool __dpm_async(struct device *dev, async_func_t func)
630 {
631 if (dev->power.work_in_progress)
632 return true;
633
634 if (!is_async(dev))
635 return false;
636
637 dev->power.work_in_progress = true;
638
639 get_device(dev);
640
641 if (async_schedule_dev_nocall(func, dev))
642 return true;
643
644 put_device(dev);
645
646 return false;
647 }
648
dpm_async_fn(struct device * dev,async_func_t func)649 static bool dpm_async_fn(struct device *dev, async_func_t func)
650 {
651 guard(mutex)(&async_wip_mtx);
652
653 return __dpm_async(dev, func);
654 }
655
dpm_async_with_cleanup(struct device * dev,void * fn)656 static int dpm_async_with_cleanup(struct device *dev, void *fn)
657 {
658 guard(mutex)(&async_wip_mtx);
659
660 if (!__dpm_async(dev, fn))
661 dev->power.work_in_progress = false;
662
663 return 0;
664 }
665
dpm_async_resume_children(struct device * dev,async_func_t func)666 static void dpm_async_resume_children(struct device *dev, async_func_t func)
667 {
668 /*
669 * Prevent racing with dpm_clear_async_state() during initial list
670 * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
671 * dpm_resume().
672 */
673 guard(mutex)(&dpm_list_mtx);
674
675 /*
676 * Start processing "async" children of the device unless it's been
677 * started already for them.
678 */
679 device_for_each_child(dev, func, dpm_async_with_cleanup);
680 }
681
dpm_async_resume_subordinate(struct device * dev,async_func_t func)682 static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
683 {
684 struct device_link *link;
685 int idx;
686
687 dpm_async_resume_children(dev, func);
688
689 idx = device_links_read_lock();
690
691 /* Start processing the device's "async" consumers. */
692 dev_for_each_link_to_consumer(link, dev)
693 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
694 dpm_async_with_cleanup(link->consumer, func);
695
696 device_links_read_unlock(idx);
697 }
698
dpm_clear_async_state(struct device * dev)699 static void dpm_clear_async_state(struct device *dev)
700 {
701 reinit_completion(&dev->power.completion);
702 dev->power.work_in_progress = false;
703 }
704
dpm_root_device(struct device * dev)705 static bool dpm_root_device(struct device *dev)
706 {
707 lockdep_assert_held(&dpm_list_mtx);
708
709 /*
710 * Since this function is required to run under dpm_list_mtx, the
711 * list_empty() below will only return true if the device's list of
712 * consumers is actually empty before calling it.
713 */
714 return !dev->parent && list_empty(&dev->links.suppliers);
715 }
716
717 static void async_resume_noirq(void *data, async_cookie_t cookie);
718
719 /**
720 * device_resume_noirq - Execute a "noirq resume" callback for given device.
721 * @dev: Device to handle.
722 * @state: PM transition of the system being carried out.
723 * @async: If true, the device is being resumed asynchronously.
724 *
725 * The driver of @dev will not receive interrupts while this function is being
726 * executed.
727 */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)728 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
729 {
730 pm_callback_t callback = NULL;
731 const char *info = NULL;
732 bool skip_resume;
733 int error = 0;
734
735 TRACE_DEVICE(dev);
736 TRACE_RESUME(0);
737
738 if (dev->power.syscore || dev->power.direct_complete)
739 goto Out;
740
741 if (!dev->power.is_noirq_suspended) {
742 /*
743 * This means that system suspend has been aborted in the noirq
744 * phase before invoking the noirq suspend callback for the
745 * device, so if device_suspend_late() has left it in suspend,
746 * device_resume_early() should leave it in suspend either in
747 * case the early resume of it depends on the noirq resume that
748 * has not run.
749 */
750 if (dev_pm_skip_suspend(dev))
751 dev->power.must_resume = false;
752
753 goto Out;
754 }
755
756 if (!dpm_wait_for_superior(dev, async))
757 goto Out;
758
759 skip_resume = dev_pm_skip_resume(dev);
760 /*
761 * If the driver callback is skipped below or by the middle layer
762 * callback and device_resume_early() also skips the driver callback for
763 * this device later, it needs to appear as "suspended" to PM-runtime,
764 * so change its status accordingly.
765 *
766 * Otherwise, the device is going to be resumed, so set its PM-runtime
767 * status to "active" unless its power.smart_suspend flag is clear, in
768 * which case it is not necessary to update its PM-runtime status.
769 */
770 if (skip_resume)
771 pm_runtime_set_suspended(dev);
772 else if (dev_pm_smart_suspend(dev))
773 pm_runtime_set_active(dev);
774
775 if (dev->pm_domain) {
776 info = "noirq power domain ";
777 callback = pm_noirq_op(&dev->pm_domain->ops, state);
778 } else if (dev->type && dev->type->pm) {
779 info = "noirq type ";
780 callback = pm_noirq_op(dev->type->pm, state);
781 } else if (dev->class && dev->class->pm) {
782 info = "noirq class ";
783 callback = pm_noirq_op(dev->class->pm, state);
784 } else if (dev->bus && dev->bus->pm) {
785 info = "noirq bus ";
786 callback = pm_noirq_op(dev->bus->pm, state);
787 }
788 if (callback)
789 goto Run;
790
791 if (skip_resume)
792 goto Skip;
793
794 if (dev->driver && dev->driver->pm) {
795 info = "noirq driver ";
796 callback = pm_noirq_op(dev->driver->pm, state);
797 }
798
799 Run:
800 error = dpm_run_callback(callback, dev, state, info);
801
802 Skip:
803 dev->power.is_noirq_suspended = false;
804
805 Out:
806 complete_all(&dev->power.completion);
807 TRACE_RESUME(error);
808
809 if (error) {
810 WRITE_ONCE(async_error, error);
811 dpm_save_failed_dev(dev_name(dev));
812 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
813 }
814
815 dpm_async_resume_subordinate(dev, async_resume_noirq);
816 }
817
async_resume_noirq(void * data,async_cookie_t cookie)818 static void async_resume_noirq(void *data, async_cookie_t cookie)
819 {
820 struct device *dev = data;
821
822 device_resume_noirq(dev, pm_transition, true);
823 put_device(dev);
824 }
825
dpm_noirq_resume_devices(pm_message_t state)826 static void dpm_noirq_resume_devices(pm_message_t state)
827 {
828 struct device *dev;
829 ktime_t starttime = ktime_get();
830
831 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
832
833 async_error = 0;
834 pm_transition = state;
835
836 mutex_lock(&dpm_list_mtx);
837
838 /*
839 * Start processing "async" root devices upfront so they don't wait for
840 * the "sync" devices they don't depend on.
841 */
842 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
843 dpm_clear_async_state(dev);
844 if (dpm_root_device(dev))
845 dpm_async_with_cleanup(dev, async_resume_noirq);
846 }
847
848 while (!list_empty(&dpm_noirq_list)) {
849 dev = to_device(dpm_noirq_list.next);
850 list_move_tail(&dev->power.entry, &dpm_late_early_list);
851
852 if (!dpm_async_fn(dev, async_resume_noirq)) {
853 get_device(dev);
854
855 mutex_unlock(&dpm_list_mtx);
856
857 device_resume_noirq(dev, state, false);
858
859 put_device(dev);
860
861 mutex_lock(&dpm_list_mtx);
862 }
863 }
864 mutex_unlock(&dpm_list_mtx);
865 async_synchronize_full();
866 dpm_show_time(starttime, state, 0, "noirq");
867 if (READ_ONCE(async_error))
868 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
869
870 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
871 }
872
873 /**
874 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
875 * @state: PM transition of the system being carried out.
876 *
877 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
878 * allow device drivers' interrupt handlers to be called.
879 */
dpm_resume_noirq(pm_message_t state)880 void dpm_resume_noirq(pm_message_t state)
881 {
882 dpm_noirq_resume_devices(state);
883
884 resume_device_irqs();
885 device_wakeup_disarm_wake_irqs();
886 }
887
888 static void async_resume_early(void *data, async_cookie_t cookie);
889
890 /**
891 * device_resume_early - Execute an "early resume" callback for given device.
892 * @dev: Device to handle.
893 * @state: PM transition of the system being carried out.
894 * @async: If true, the device is being resumed asynchronously.
895 *
896 * Runtime PM is disabled for @dev while this function is being executed.
897 */
device_resume_early(struct device * dev,pm_message_t state,bool async)898 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
899 {
900 pm_callback_t callback = NULL;
901 const char *info = NULL;
902 int error = 0;
903
904 TRACE_DEVICE(dev);
905 TRACE_RESUME(0);
906
907 if (dev->power.direct_complete)
908 goto Out;
909
910 if (!dev->power.is_late_suspended)
911 goto Out;
912
913 if (dev->power.syscore)
914 goto Skip;
915
916 if (!dpm_wait_for_superior(dev, async))
917 goto Out;
918
919 if (dev->pm_domain) {
920 info = "early power domain ";
921 callback = pm_late_early_op(&dev->pm_domain->ops, state);
922 } else if (dev->type && dev->type->pm) {
923 info = "early type ";
924 callback = pm_late_early_op(dev->type->pm, state);
925 } else if (dev->class && dev->class->pm) {
926 info = "early class ";
927 callback = pm_late_early_op(dev->class->pm, state);
928 } else if (dev->bus && dev->bus->pm) {
929 info = "early bus ";
930 callback = pm_late_early_op(dev->bus->pm, state);
931 }
932 if (callback)
933 goto Run;
934
935 if (dev_pm_skip_resume(dev))
936 goto Skip;
937
938 if (dev->driver && dev->driver->pm) {
939 info = "early driver ";
940 callback = pm_late_early_op(dev->driver->pm, state);
941 }
942
943 Run:
944 error = dpm_run_callback(callback, dev, state, info);
945
946 Skip:
947 dev->power.is_late_suspended = false;
948 pm_runtime_enable(dev);
949
950 Out:
951 TRACE_RESUME(error);
952
953 complete_all(&dev->power.completion);
954
955 if (error) {
956 WRITE_ONCE(async_error, error);
957 dpm_save_failed_dev(dev_name(dev));
958 pm_dev_err(dev, state, async ? " async early" : " early", error);
959 }
960
961 dpm_async_resume_subordinate(dev, async_resume_early);
962 }
963
async_resume_early(void * data,async_cookie_t cookie)964 static void async_resume_early(void *data, async_cookie_t cookie)
965 {
966 struct device *dev = data;
967
968 device_resume_early(dev, pm_transition, true);
969 put_device(dev);
970 }
971
972 /**
973 * dpm_resume_early - Execute "early resume" callbacks for all devices.
974 * @state: PM transition of the system being carried out.
975 */
dpm_resume_early(pm_message_t state)976 void dpm_resume_early(pm_message_t state)
977 {
978 struct device *dev;
979 ktime_t starttime = ktime_get();
980
981 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
982
983 async_error = 0;
984 pm_transition = state;
985
986 mutex_lock(&dpm_list_mtx);
987
988 /*
989 * Start processing "async" root devices upfront so they don't wait for
990 * the "sync" devices they don't depend on.
991 */
992 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
993 dpm_clear_async_state(dev);
994 if (dpm_root_device(dev))
995 dpm_async_with_cleanup(dev, async_resume_early);
996 }
997
998 while (!list_empty(&dpm_late_early_list)) {
999 dev = to_device(dpm_late_early_list.next);
1000 list_move_tail(&dev->power.entry, &dpm_suspended_list);
1001
1002 if (!dpm_async_fn(dev, async_resume_early)) {
1003 get_device(dev);
1004
1005 mutex_unlock(&dpm_list_mtx);
1006
1007 device_resume_early(dev, state, false);
1008
1009 put_device(dev);
1010
1011 mutex_lock(&dpm_list_mtx);
1012 }
1013 }
1014 mutex_unlock(&dpm_list_mtx);
1015 async_synchronize_full();
1016 dpm_show_time(starttime, state, 0, "early");
1017 if (READ_ONCE(async_error))
1018 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
1019
1020 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
1021 }
1022
1023 /**
1024 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
1025 * @state: PM transition of the system being carried out.
1026 */
dpm_resume_start(pm_message_t state)1027 void dpm_resume_start(pm_message_t state)
1028 {
1029 dpm_resume_noirq(state);
1030 dpm_resume_early(state);
1031 }
1032 EXPORT_SYMBOL_GPL(dpm_resume_start);
1033
1034 static void async_resume(void *data, async_cookie_t cookie);
1035
1036 /**
1037 * device_resume - Execute "resume" callbacks for given device.
1038 * @dev: Device to handle.
1039 * @state: PM transition of the system being carried out.
1040 * @async: If true, the device is being resumed asynchronously.
1041 */
device_resume(struct device * dev,pm_message_t state,bool async)1042 static void device_resume(struct device *dev, pm_message_t state, bool async)
1043 {
1044 pm_callback_t callback = NULL;
1045 const char *info = NULL;
1046 int error = 0;
1047 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1048
1049 TRACE_DEVICE(dev);
1050 TRACE_RESUME(0);
1051
1052 if (dev->power.syscore)
1053 goto Complete;
1054
1055 if (!dev->power.is_suspended)
1056 goto Complete;
1057
1058 dev->power.is_suspended = false;
1059
1060 if (dev->power.direct_complete) {
1061 /*
1062 * Allow new children to be added under the device after this
1063 * point if it has no PM callbacks.
1064 */
1065 if (dev->power.no_pm_callbacks)
1066 dev->power.is_prepared = false;
1067
1068 /* Match the pm_runtime_disable() in device_suspend(). */
1069 pm_runtime_enable(dev);
1070 goto Complete;
1071 }
1072
1073 if (!dpm_wait_for_superior(dev, async))
1074 goto Complete;
1075
1076 dpm_watchdog_set(&wd, dev);
1077 device_lock(dev);
1078
1079 /*
1080 * This is a fib. But we'll allow new children to be added below
1081 * a resumed device, even if the device hasn't been completed yet.
1082 */
1083 dev->power.is_prepared = false;
1084
1085 if (dev->pm_domain) {
1086 info = "power domain ";
1087 callback = pm_op(&dev->pm_domain->ops, state);
1088 goto Driver;
1089 }
1090
1091 if (dev->type && dev->type->pm) {
1092 info = "type ";
1093 callback = pm_op(dev->type->pm, state);
1094 goto Driver;
1095 }
1096
1097 if (dev->class && dev->class->pm) {
1098 info = "class ";
1099 callback = pm_op(dev->class->pm, state);
1100 goto Driver;
1101 }
1102
1103 if (dev->bus) {
1104 if (dev->bus->pm) {
1105 info = "bus ";
1106 callback = pm_op(dev->bus->pm, state);
1107 } else if (dev->bus->resume) {
1108 info = "legacy bus ";
1109 callback = dev->bus->resume;
1110 goto End;
1111 }
1112 }
1113
1114 Driver:
1115 if (!callback && dev->driver && dev->driver->pm) {
1116 info = "driver ";
1117 callback = pm_op(dev->driver->pm, state);
1118 }
1119
1120 End:
1121 error = dpm_run_callback(callback, dev, state, info);
1122
1123 device_unlock(dev);
1124 dpm_watchdog_clear(&wd);
1125
1126 Complete:
1127 complete_all(&dev->power.completion);
1128
1129 TRACE_RESUME(error);
1130
1131 if (error) {
1132 WRITE_ONCE(async_error, error);
1133 dpm_save_failed_dev(dev_name(dev));
1134 pm_dev_err(dev, state, async ? " async" : "", error);
1135 }
1136
1137 dpm_async_resume_subordinate(dev, async_resume);
1138 }
1139
async_resume(void * data,async_cookie_t cookie)1140 static void async_resume(void *data, async_cookie_t cookie)
1141 {
1142 struct device *dev = data;
1143
1144 device_resume(dev, pm_transition, true);
1145 put_device(dev);
1146 }
1147
1148 /**
1149 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1150 * @state: PM transition of the system being carried out.
1151 *
1152 * Execute the appropriate "resume" callback for all devices whose status
1153 * indicates that they are suspended.
1154 */
dpm_resume(pm_message_t state)1155 void dpm_resume(pm_message_t state)
1156 {
1157 struct device *dev;
1158 ktime_t starttime = ktime_get();
1159
1160 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1161
1162 pm_transition = state;
1163 async_error = 0;
1164
1165 mutex_lock(&dpm_list_mtx);
1166
1167 /*
1168 * Start processing "async" root devices upfront so they don't wait for
1169 * the "sync" devices they don't depend on.
1170 */
1171 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1172 dpm_clear_async_state(dev);
1173 if (dpm_root_device(dev))
1174 dpm_async_with_cleanup(dev, async_resume);
1175 }
1176
1177 while (!list_empty(&dpm_suspended_list)) {
1178 dev = to_device(dpm_suspended_list.next);
1179 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1180
1181 if (!dpm_async_fn(dev, async_resume)) {
1182 get_device(dev);
1183
1184 mutex_unlock(&dpm_list_mtx);
1185
1186 device_resume(dev, state, false);
1187
1188 put_device(dev);
1189
1190 mutex_lock(&dpm_list_mtx);
1191 }
1192 }
1193 mutex_unlock(&dpm_list_mtx);
1194 async_synchronize_full();
1195 dpm_show_time(starttime, state, 0, NULL);
1196 if (READ_ONCE(async_error))
1197 dpm_save_failed_step(SUSPEND_RESUME);
1198
1199 cpufreq_resume();
1200 devfreq_resume();
1201 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1202 }
1203
1204 /**
1205 * device_complete - Complete a PM transition for given device.
1206 * @dev: Device to handle.
1207 * @state: PM transition of the system being carried out.
1208 */
device_complete(struct device * dev,pm_message_t state)1209 static void device_complete(struct device *dev, pm_message_t state)
1210 {
1211 void (*callback)(struct device *) = NULL;
1212 const char *info = NULL;
1213
1214 if (dev->power.syscore)
1215 goto out;
1216
1217 device_lock(dev);
1218
1219 if (dev->pm_domain) {
1220 info = "completing power domain ";
1221 callback = dev->pm_domain->ops.complete;
1222 } else if (dev->type && dev->type->pm) {
1223 info = "completing type ";
1224 callback = dev->type->pm->complete;
1225 } else if (dev->class && dev->class->pm) {
1226 info = "completing class ";
1227 callback = dev->class->pm->complete;
1228 } else if (dev->bus && dev->bus->pm) {
1229 info = "completing bus ";
1230 callback = dev->bus->pm->complete;
1231 }
1232
1233 if (!callback && dev->driver && dev->driver->pm) {
1234 info = "completing driver ";
1235 callback = dev->driver->pm->complete;
1236 }
1237
1238 if (callback) {
1239 pm_dev_dbg(dev, state, info);
1240 callback(dev);
1241 }
1242
1243 device_unlock(dev);
1244
1245 out:
1246 /* If enabling runtime PM for the device is blocked, unblock it. */
1247 pm_runtime_unblock(dev);
1248 pm_runtime_put(dev);
1249 }
1250
1251 /**
1252 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1253 * @state: PM transition of the system being carried out.
1254 *
1255 * Execute the ->complete() callbacks for all devices whose PM status is not
1256 * DPM_ON (this allows new devices to be registered).
1257 */
dpm_complete(pm_message_t state)1258 void dpm_complete(pm_message_t state)
1259 {
1260 struct list_head list;
1261
1262 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1263
1264 INIT_LIST_HEAD(&list);
1265 mutex_lock(&dpm_list_mtx);
1266 while (!list_empty(&dpm_prepared_list)) {
1267 struct device *dev = to_device(dpm_prepared_list.prev);
1268
1269 get_device(dev);
1270 dev->power.is_prepared = false;
1271 list_move(&dev->power.entry, &list);
1272
1273 mutex_unlock(&dpm_list_mtx);
1274
1275 trace_device_pm_callback_start(dev, "", state.event);
1276 device_complete(dev, state);
1277 trace_device_pm_callback_end(dev, 0);
1278
1279 put_device(dev);
1280
1281 mutex_lock(&dpm_list_mtx);
1282 }
1283 list_splice(&list, &dpm_list);
1284 mutex_unlock(&dpm_list_mtx);
1285
1286 /* Start resuming thermal control */
1287 thermal_pm_complete();
1288 /* Allow device probing and trigger re-probing of deferred devices */
1289 device_unblock_probing();
1290 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1291 }
1292
1293 /**
1294 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1295 * @state: PM transition of the system being carried out.
1296 *
1297 * Execute "resume" callbacks for all devices and complete the PM transition of
1298 * the system.
1299 */
dpm_resume_end(pm_message_t state)1300 void dpm_resume_end(pm_message_t state)
1301 {
1302 dpm_resume(state);
1303 pm_restore_gfp_mask();
1304 dpm_complete(state);
1305 }
1306 EXPORT_SYMBOL_GPL(dpm_resume_end);
1307
1308
1309 /*------------------------- Suspend routines -------------------------*/
1310
dpm_leaf_device(struct device * dev)1311 static bool dpm_leaf_device(struct device *dev)
1312 {
1313 struct device *child;
1314
1315 lockdep_assert_held(&dpm_list_mtx);
1316
1317 child = device_find_any_child(dev);
1318 if (child) {
1319 put_device(child);
1320
1321 return false;
1322 }
1323
1324 /*
1325 * Since this function is required to run under dpm_list_mtx, the
1326 * list_empty() below will only return true if the device's list of
1327 * consumers is actually empty before calling it.
1328 */
1329 return list_empty(&dev->links.consumers);
1330 }
1331
dpm_async_suspend_parent(struct device * dev,async_func_t func)1332 static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
1333 {
1334 guard(mutex)(&dpm_list_mtx);
1335
1336 /*
1337 * If the device is suspended asynchronously and the parent's callback
1338 * deletes both the device and the parent itself, the parent object may
1339 * be freed while this function is running, so avoid that by checking
1340 * if the device has been deleted already as the parent cannot be
1341 * deleted before it.
1342 */
1343 if (!device_pm_initialized(dev))
1344 return false;
1345
1346 /* Start processing the device's parent if it is "async". */
1347 if (dev->parent)
1348 dpm_async_with_cleanup(dev->parent, func);
1349
1350 return true;
1351 }
1352
dpm_async_suspend_superior(struct device * dev,async_func_t func)1353 static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
1354 {
1355 struct device_link *link;
1356 int idx;
1357
1358 if (!dpm_async_suspend_parent(dev, func))
1359 return;
1360
1361 idx = device_links_read_lock();
1362
1363 /* Start processing the device's "async" suppliers. */
1364 dev_for_each_link_to_supplier(link, dev)
1365 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
1366 dpm_async_with_cleanup(link->supplier, func);
1367
1368 device_links_read_unlock(idx);
1369 }
1370
dpm_async_suspend_complete_all(struct list_head * device_list)1371 static void dpm_async_suspend_complete_all(struct list_head *device_list)
1372 {
1373 struct device *dev;
1374
1375 guard(mutex)(&async_wip_mtx);
1376
1377 list_for_each_entry_reverse(dev, device_list, power.entry) {
1378 /*
1379 * In case the device is being waited for and async processing
1380 * has not started for it yet, let the waiters make progress.
1381 */
1382 if (!dev->power.work_in_progress)
1383 complete_all(&dev->power.completion);
1384 }
1385 }
1386
1387 /**
1388 * resume_event - Return a "resume" message for given "suspend" sleep state.
1389 * @sleep_state: PM message representing a sleep state.
1390 *
1391 * Return a PM message representing the resume event corresponding to given
1392 * sleep state.
1393 */
resume_event(pm_message_t sleep_state)1394 static pm_message_t resume_event(pm_message_t sleep_state)
1395 {
1396 switch (sleep_state.event) {
1397 case PM_EVENT_SUSPEND:
1398 return PMSG_RESUME;
1399 case PM_EVENT_FREEZE:
1400 case PM_EVENT_QUIESCE:
1401 return PMSG_RECOVER;
1402 case PM_EVENT_HIBERNATE:
1403 return PMSG_RESTORE;
1404 }
1405 return PMSG_ON;
1406 }
1407
dpm_superior_set_must_resume(struct device * dev)1408 static void dpm_superior_set_must_resume(struct device *dev)
1409 {
1410 struct device_link *link;
1411 int idx;
1412
1413 if (dev->parent)
1414 dev->parent->power.must_resume = true;
1415
1416 idx = device_links_read_lock();
1417
1418 dev_for_each_link_to_supplier(link, dev)
1419 link->supplier->power.must_resume = true;
1420
1421 device_links_read_unlock(idx);
1422 }
1423
1424 static void async_suspend_noirq(void *data, async_cookie_t cookie);
1425
1426 /**
1427 * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1428 * @dev: Device to handle.
1429 * @state: PM transition of the system being carried out.
1430 * @async: If true, the device is being suspended asynchronously.
1431 *
1432 * The driver of @dev will not receive interrupts while this function is being
1433 * executed.
1434 */
device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1435 static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1436 {
1437 pm_callback_t callback = NULL;
1438 const char *info = NULL;
1439 int error = 0;
1440
1441 TRACE_DEVICE(dev);
1442 TRACE_SUSPEND(0);
1443
1444 dpm_wait_for_subordinate(dev, async);
1445
1446 if (READ_ONCE(async_error))
1447 goto Complete;
1448
1449 if (dev->power.syscore || dev->power.direct_complete)
1450 goto Complete;
1451
1452 if (dev->pm_domain) {
1453 info = "noirq power domain ";
1454 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1455 } else if (dev->type && dev->type->pm) {
1456 info = "noirq type ";
1457 callback = pm_noirq_op(dev->type->pm, state);
1458 } else if (dev->class && dev->class->pm) {
1459 info = "noirq class ";
1460 callback = pm_noirq_op(dev->class->pm, state);
1461 } else if (dev->bus && dev->bus->pm) {
1462 info = "noirq bus ";
1463 callback = pm_noirq_op(dev->bus->pm, state);
1464 }
1465 if (callback)
1466 goto Run;
1467
1468 if (dev_pm_skip_suspend(dev))
1469 goto Skip;
1470
1471 if (dev->driver && dev->driver->pm) {
1472 info = "noirq driver ";
1473 callback = pm_noirq_op(dev->driver->pm, state);
1474 }
1475
1476 Run:
1477 error = dpm_run_callback(callback, dev, state, info);
1478 if (error) {
1479 WRITE_ONCE(async_error, error);
1480 dpm_save_failed_dev(dev_name(dev));
1481 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1482 goto Complete;
1483 }
1484
1485 Skip:
1486 dev->power.is_noirq_suspended = true;
1487
1488 /*
1489 * Devices must be resumed unless they are explicitly allowed to be left
1490 * in suspend, but even in that case skipping the resume of devices that
1491 * were in use right before the system suspend (as indicated by their
1492 * runtime PM usage counters and child counters) would be suboptimal.
1493 */
1494 if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1495 dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
1496 dev->power.must_resume = true;
1497
1498 if (dev->power.must_resume)
1499 dpm_superior_set_must_resume(dev);
1500
1501 Complete:
1502 complete_all(&dev->power.completion);
1503 TRACE_SUSPEND(error);
1504
1505 if (error || READ_ONCE(async_error))
1506 return;
1507
1508 dpm_async_suspend_superior(dev, async_suspend_noirq);
1509 }
1510
async_suspend_noirq(void * data,async_cookie_t cookie)1511 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1512 {
1513 struct device *dev = data;
1514
1515 device_suspend_noirq(dev, pm_transition, true);
1516 put_device(dev);
1517 }
1518
dpm_noirq_suspend_devices(pm_message_t state)1519 static int dpm_noirq_suspend_devices(pm_message_t state)
1520 {
1521 ktime_t starttime = ktime_get();
1522 struct device *dev;
1523 int error;
1524
1525 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1526
1527 pm_transition = state;
1528 async_error = 0;
1529
1530 mutex_lock(&dpm_list_mtx);
1531
1532 /*
1533 * Start processing "async" leaf devices upfront so they don't need to
1534 * wait for the "sync" devices they don't depend on.
1535 */
1536 list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
1537 dpm_clear_async_state(dev);
1538 if (dpm_leaf_device(dev))
1539 dpm_async_with_cleanup(dev, async_suspend_noirq);
1540 }
1541
1542 while (!list_empty(&dpm_late_early_list)) {
1543 dev = to_device(dpm_late_early_list.prev);
1544
1545 list_move(&dev->power.entry, &dpm_noirq_list);
1546
1547 if (dpm_async_fn(dev, async_suspend_noirq))
1548 continue;
1549
1550 get_device(dev);
1551
1552 mutex_unlock(&dpm_list_mtx);
1553
1554 device_suspend_noirq(dev, state, false);
1555
1556 put_device(dev);
1557
1558 mutex_lock(&dpm_list_mtx);
1559
1560 if (READ_ONCE(async_error)) {
1561 dpm_async_suspend_complete_all(&dpm_late_early_list);
1562 /*
1563 * Move all devices to the target list to resume them
1564 * properly.
1565 */
1566 list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
1567 break;
1568 }
1569 }
1570
1571 mutex_unlock(&dpm_list_mtx);
1572
1573 async_synchronize_full();
1574
1575 error = READ_ONCE(async_error);
1576 if (error)
1577 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1578
1579 dpm_show_time(starttime, state, error, "noirq");
1580 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1581 return error;
1582 }
1583
1584 /**
1585 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1586 * @state: PM transition of the system being carried out.
1587 *
1588 * Prevent device drivers' interrupt handlers from being called and invoke
1589 * "noirq" suspend callbacks for all non-sysdev devices.
1590 */
dpm_suspend_noirq(pm_message_t state)1591 int dpm_suspend_noirq(pm_message_t state)
1592 {
1593 int ret;
1594
1595 device_wakeup_arm_wake_irqs();
1596 suspend_device_irqs();
1597
1598 ret = dpm_noirq_suspend_devices(state);
1599 if (ret)
1600 dpm_resume_noirq(resume_event(state));
1601
1602 return ret;
1603 }
1604
dpm_propagate_wakeup_to_parent(struct device * dev)1605 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1606 {
1607 struct device *parent = dev->parent;
1608
1609 if (!parent)
1610 return;
1611
1612 spin_lock_irq(&parent->power.lock);
1613
1614 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1615 parent->power.wakeup_path = true;
1616
1617 spin_unlock_irq(&parent->power.lock);
1618 }
1619
1620 static void async_suspend_late(void *data, async_cookie_t cookie);
1621
1622 /**
1623 * device_suspend_late - Execute a "late suspend" callback for given device.
1624 * @dev: Device to handle.
1625 * @state: PM transition of the system being carried out.
1626 * @async: If true, the device is being suspended asynchronously.
1627 *
1628 * Runtime PM is disabled for @dev while this function is being executed.
1629 */
device_suspend_late(struct device * dev,pm_message_t state,bool async)1630 static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
1631 {
1632 pm_callback_t callback = NULL;
1633 const char *info = NULL;
1634 int error = 0;
1635
1636 TRACE_DEVICE(dev);
1637 TRACE_SUSPEND(0);
1638
1639 dpm_wait_for_subordinate(dev, async);
1640
1641 if (READ_ONCE(async_error))
1642 goto Complete;
1643
1644 if (pm_wakeup_pending()) {
1645 WRITE_ONCE(async_error, -EBUSY);
1646 goto Complete;
1647 }
1648
1649 if (dev->power.direct_complete)
1650 goto Complete;
1651
1652 /*
1653 * After this point, any runtime PM operations targeting the device
1654 * will fail until the corresponding pm_runtime_enable() call in
1655 * device_resume_early().
1656 */
1657 pm_runtime_disable(dev);
1658
1659 if (dev->power.syscore)
1660 goto Skip;
1661
1662 if (dev->pm_domain) {
1663 info = "late power domain ";
1664 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1665 } else if (dev->type && dev->type->pm) {
1666 info = "late type ";
1667 callback = pm_late_early_op(dev->type->pm, state);
1668 } else if (dev->class && dev->class->pm) {
1669 info = "late class ";
1670 callback = pm_late_early_op(dev->class->pm, state);
1671 } else if (dev->bus && dev->bus->pm) {
1672 info = "late bus ";
1673 callback = pm_late_early_op(dev->bus->pm, state);
1674 }
1675 if (callback)
1676 goto Run;
1677
1678 if (dev_pm_skip_suspend(dev))
1679 goto Skip;
1680
1681 if (dev->driver && dev->driver->pm) {
1682 info = "late driver ";
1683 callback = pm_late_early_op(dev->driver->pm, state);
1684 }
1685
1686 Run:
1687 error = dpm_run_callback(callback, dev, state, info);
1688 if (error) {
1689 WRITE_ONCE(async_error, error);
1690 dpm_save_failed_dev(dev_name(dev));
1691 pm_dev_err(dev, state, async ? " async late" : " late", error);
1692 pm_runtime_enable(dev);
1693 goto Complete;
1694 }
1695 dpm_propagate_wakeup_to_parent(dev);
1696
1697 Skip:
1698 dev->power.is_late_suspended = true;
1699
1700 Complete:
1701 TRACE_SUSPEND(error);
1702 complete_all(&dev->power.completion);
1703
1704 if (error || READ_ONCE(async_error))
1705 return;
1706
1707 dpm_async_suspend_superior(dev, async_suspend_late);
1708 }
1709
async_suspend_late(void * data,async_cookie_t cookie)1710 static void async_suspend_late(void *data, async_cookie_t cookie)
1711 {
1712 struct device *dev = data;
1713
1714 device_suspend_late(dev, pm_transition, true);
1715 put_device(dev);
1716 }
1717
1718 /**
1719 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1720 * @state: PM transition of the system being carried out.
1721 */
dpm_suspend_late(pm_message_t state)1722 int dpm_suspend_late(pm_message_t state)
1723 {
1724 ktime_t starttime = ktime_get();
1725 struct device *dev;
1726 int error;
1727
1728 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1729
1730 pm_transition = state;
1731 async_error = 0;
1732
1733 wake_up_all_idle_cpus();
1734
1735 mutex_lock(&dpm_list_mtx);
1736
1737 /*
1738 * Start processing "async" leaf devices upfront so they don't need to
1739 * wait for the "sync" devices they don't depend on.
1740 */
1741 list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
1742 dpm_clear_async_state(dev);
1743 if (dpm_leaf_device(dev))
1744 dpm_async_with_cleanup(dev, async_suspend_late);
1745 }
1746
1747 while (!list_empty(&dpm_suspended_list)) {
1748 dev = to_device(dpm_suspended_list.prev);
1749
1750 list_move(&dev->power.entry, &dpm_late_early_list);
1751
1752 if (dpm_async_fn(dev, async_suspend_late))
1753 continue;
1754
1755 get_device(dev);
1756
1757 mutex_unlock(&dpm_list_mtx);
1758
1759 device_suspend_late(dev, state, false);
1760
1761 put_device(dev);
1762
1763 mutex_lock(&dpm_list_mtx);
1764
1765 if (READ_ONCE(async_error)) {
1766 dpm_async_suspend_complete_all(&dpm_suspended_list);
1767 /*
1768 * Move all devices to the target list to resume them
1769 * properly.
1770 */
1771 list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
1772 break;
1773 }
1774 }
1775
1776 mutex_unlock(&dpm_list_mtx);
1777
1778 async_synchronize_full();
1779
1780 error = READ_ONCE(async_error);
1781 if (error) {
1782 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1783 dpm_resume_early(resume_event(state));
1784 }
1785 dpm_show_time(starttime, state, error, "late");
1786 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1787 return error;
1788 }
1789
1790 /**
1791 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1792 * @state: PM transition of the system being carried out.
1793 */
dpm_suspend_end(pm_message_t state)1794 int dpm_suspend_end(pm_message_t state)
1795 {
1796 ktime_t starttime = ktime_get();
1797 int error;
1798
1799 error = dpm_suspend_late(state);
1800 if (error)
1801 goto out;
1802
1803 error = dpm_suspend_noirq(state);
1804 if (error)
1805 dpm_resume_early(resume_event(state));
1806
1807 out:
1808 dpm_show_time(starttime, state, error, "end");
1809 return error;
1810 }
1811 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1812
1813 /**
1814 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1815 * @dev: Device to suspend.
1816 * @state: PM transition of the system being carried out.
1817 * @cb: Suspend callback to execute.
1818 * @info: string description of caller.
1819 */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1820 static int legacy_suspend(struct device *dev, pm_message_t state,
1821 int (*cb)(struct device *dev, pm_message_t state),
1822 const char *info)
1823 {
1824 int error;
1825 ktime_t calltime;
1826
1827 calltime = initcall_debug_start(dev, cb);
1828
1829 trace_device_pm_callback_start(dev, info, state.event);
1830 error = cb(dev, state);
1831 trace_device_pm_callback_end(dev, error);
1832 suspend_report_result(dev, cb, error);
1833
1834 initcall_debug_report(dev, calltime, cb, error);
1835
1836 return error;
1837 }
1838
dpm_clear_superiors_direct_complete(struct device * dev)1839 static void dpm_clear_superiors_direct_complete(struct device *dev)
1840 {
1841 struct device_link *link;
1842 int idx;
1843
1844 if (dev->parent) {
1845 spin_lock_irq(&dev->parent->power.lock);
1846 dev->parent->power.direct_complete = false;
1847 spin_unlock_irq(&dev->parent->power.lock);
1848 }
1849
1850 idx = device_links_read_lock();
1851
1852 dev_for_each_link_to_supplier(link, dev) {
1853 spin_lock_irq(&link->supplier->power.lock);
1854 link->supplier->power.direct_complete = false;
1855 spin_unlock_irq(&link->supplier->power.lock);
1856 }
1857
1858 device_links_read_unlock(idx);
1859 }
1860
1861 static void async_suspend(void *data, async_cookie_t cookie);
1862
1863 /**
1864 * device_suspend - Execute "suspend" callbacks for given device.
1865 * @dev: Device to handle.
1866 * @state: PM transition of the system being carried out.
1867 * @async: If true, the device is being suspended asynchronously.
1868 */
device_suspend(struct device * dev,pm_message_t state,bool async)1869 static void device_suspend(struct device *dev, pm_message_t state, bool async)
1870 {
1871 pm_callback_t callback = NULL;
1872 const char *info = NULL;
1873 int error = 0;
1874 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1875
1876 TRACE_DEVICE(dev);
1877 TRACE_SUSPEND(0);
1878
1879 dpm_wait_for_subordinate(dev, async);
1880
1881 if (READ_ONCE(async_error)) {
1882 dev->power.direct_complete = false;
1883 goto Complete;
1884 }
1885
1886 /*
1887 * Wait for possible runtime PM transitions of the device in progress
1888 * to complete and if there's a runtime resume request pending for it,
1889 * resume it before proceeding with invoking the system-wide suspend
1890 * callbacks for it.
1891 *
1892 * If the system-wide suspend callbacks below change the configuration
1893 * of the device, they must disable runtime PM for it or otherwise
1894 * ensure that its runtime-resume callbacks will not be confused by that
1895 * change in case they are invoked going forward.
1896 */
1897 pm_runtime_barrier(dev);
1898
1899 if (pm_wakeup_pending()) {
1900 dev->power.direct_complete = false;
1901 WRITE_ONCE(async_error, -EBUSY);
1902 goto Complete;
1903 }
1904
1905 if (dev->power.syscore)
1906 goto Complete;
1907
1908 /* Avoid direct_complete to let wakeup_path propagate. */
1909 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1910 dev->power.direct_complete = false;
1911
1912 if (dev->power.direct_complete) {
1913 if (pm_runtime_status_suspended(dev)) {
1914 pm_runtime_disable(dev);
1915 if (pm_runtime_status_suspended(dev)) {
1916 pm_dev_dbg(dev, state, "direct-complete ");
1917 dev->power.is_suspended = true;
1918 goto Complete;
1919 }
1920
1921 pm_runtime_enable(dev);
1922 }
1923 dev->power.direct_complete = false;
1924 }
1925
1926 dev->power.may_skip_resume = true;
1927 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1928
1929 dpm_watchdog_set(&wd, dev);
1930 device_lock(dev);
1931
1932 if (dev->pm_domain) {
1933 info = "power domain ";
1934 callback = pm_op(&dev->pm_domain->ops, state);
1935 goto Run;
1936 }
1937
1938 if (dev->type && dev->type->pm) {
1939 info = "type ";
1940 callback = pm_op(dev->type->pm, state);
1941 goto Run;
1942 }
1943
1944 if (dev->class && dev->class->pm) {
1945 info = "class ";
1946 callback = pm_op(dev->class->pm, state);
1947 goto Run;
1948 }
1949
1950 if (dev->bus) {
1951 if (dev->bus->pm) {
1952 info = "bus ";
1953 callback = pm_op(dev->bus->pm, state);
1954 } else if (dev->bus->suspend) {
1955 pm_dev_dbg(dev, state, "legacy bus ");
1956 error = legacy_suspend(dev, state, dev->bus->suspend,
1957 "legacy bus ");
1958 goto End;
1959 }
1960 }
1961
1962 Run:
1963 if (!callback && dev->driver && dev->driver->pm) {
1964 info = "driver ";
1965 callback = pm_op(dev->driver->pm, state);
1966 }
1967
1968 error = dpm_run_callback(callback, dev, state, info);
1969
1970 End:
1971 if (!error) {
1972 dev->power.is_suspended = true;
1973 if (device_may_wakeup(dev))
1974 dev->power.wakeup_path = true;
1975
1976 dpm_propagate_wakeup_to_parent(dev);
1977 dpm_clear_superiors_direct_complete(dev);
1978 }
1979
1980 device_unlock(dev);
1981 dpm_watchdog_clear(&wd);
1982
1983 Complete:
1984 if (error) {
1985 WRITE_ONCE(async_error, error);
1986 dpm_save_failed_dev(dev_name(dev));
1987 pm_dev_err(dev, state, async ? " async" : "", error);
1988 }
1989
1990 complete_all(&dev->power.completion);
1991 TRACE_SUSPEND(error);
1992
1993 if (error || READ_ONCE(async_error))
1994 return;
1995
1996 dpm_async_suspend_superior(dev, async_suspend);
1997 }
1998
async_suspend(void * data,async_cookie_t cookie)1999 static void async_suspend(void *data, async_cookie_t cookie)
2000 {
2001 struct device *dev = data;
2002
2003 device_suspend(dev, pm_transition, true);
2004 put_device(dev);
2005 }
2006
2007 /**
2008 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
2009 * @state: PM transition of the system being carried out.
2010 */
dpm_suspend(pm_message_t state)2011 int dpm_suspend(pm_message_t state)
2012 {
2013 ktime_t starttime = ktime_get();
2014 struct device *dev;
2015 int error;
2016
2017 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
2018 might_sleep();
2019
2020 devfreq_suspend();
2021 cpufreq_suspend();
2022
2023 pm_transition = state;
2024 async_error = 0;
2025
2026 mutex_lock(&dpm_list_mtx);
2027
2028 /*
2029 * Start processing "async" leaf devices upfront so they don't need to
2030 * wait for the "sync" devices they don't depend on.
2031 */
2032 list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
2033 dpm_clear_async_state(dev);
2034 if (dpm_leaf_device(dev))
2035 dpm_async_with_cleanup(dev, async_suspend);
2036 }
2037
2038 while (!list_empty(&dpm_prepared_list)) {
2039 dev = to_device(dpm_prepared_list.prev);
2040
2041 list_move(&dev->power.entry, &dpm_suspended_list);
2042
2043 if (dpm_async_fn(dev, async_suspend))
2044 continue;
2045
2046 get_device(dev);
2047
2048 mutex_unlock(&dpm_list_mtx);
2049
2050 device_suspend(dev, state, false);
2051
2052 put_device(dev);
2053
2054 mutex_lock(&dpm_list_mtx);
2055
2056 if (READ_ONCE(async_error)) {
2057 dpm_async_suspend_complete_all(&dpm_prepared_list);
2058 /*
2059 * Move all devices to the target list to resume them
2060 * properly.
2061 */
2062 list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
2063 break;
2064 }
2065 }
2066
2067 mutex_unlock(&dpm_list_mtx);
2068
2069 async_synchronize_full();
2070
2071 error = READ_ONCE(async_error);
2072 if (error)
2073 dpm_save_failed_step(SUSPEND_SUSPEND);
2074
2075 dpm_show_time(starttime, state, error, NULL);
2076 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
2077 return error;
2078 }
2079
device_prepare_smart_suspend(struct device * dev)2080 static bool device_prepare_smart_suspend(struct device *dev)
2081 {
2082 struct device_link *link;
2083 bool ret = true;
2084 int idx;
2085
2086 /*
2087 * The "smart suspend" feature is enabled for devices whose drivers ask
2088 * for it and for devices without PM callbacks.
2089 *
2090 * However, if "smart suspend" is not enabled for the device's parent
2091 * or any of its suppliers that take runtime PM into account, it cannot
2092 * be enabled for the device either.
2093 */
2094 if (!dev->power.no_pm_callbacks &&
2095 !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
2096 return false;
2097
2098 if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
2099 !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
2100 return false;
2101
2102 idx = device_links_read_lock();
2103
2104 dev_for_each_link_to_supplier(link, dev) {
2105 if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
2106 continue;
2107
2108 if (!dev_pm_smart_suspend(link->supplier) &&
2109 !pm_runtime_blocked(link->supplier)) {
2110 ret = false;
2111 break;
2112 }
2113 }
2114
2115 device_links_read_unlock(idx);
2116
2117 return ret;
2118 }
2119
2120 /**
2121 * device_prepare - Prepare a device for system power transition.
2122 * @dev: Device to handle.
2123 * @state: PM transition of the system being carried out.
2124 *
2125 * Execute the ->prepare() callback(s) for given device. No new children of the
2126 * device may be registered after this function has returned.
2127 */
device_prepare(struct device * dev,pm_message_t state)2128 static int device_prepare(struct device *dev, pm_message_t state)
2129 {
2130 int (*callback)(struct device *) = NULL;
2131 bool smart_suspend;
2132 int ret = 0;
2133
2134 /*
2135 * If a device's parent goes into runtime suspend at the wrong time,
2136 * it won't be possible to resume the device. To prevent this we
2137 * block runtime suspend here, during the prepare phase, and allow
2138 * it again during the complete phase.
2139 */
2140 pm_runtime_get_noresume(dev);
2141 /*
2142 * If runtime PM is disabled for the device at this point and it has
2143 * never been enabled so far, it should not be enabled until this system
2144 * suspend-resume cycle is complete, so prepare to trigger a warning on
2145 * subsequent attempts to enable it.
2146 */
2147 smart_suspend = !pm_runtime_block_if_disabled(dev);
2148
2149 if (dev->power.syscore)
2150 return 0;
2151
2152 device_lock(dev);
2153
2154 dev->power.wakeup_path = false;
2155 dev->power.out_band_wakeup = false;
2156
2157 if (dev->power.no_pm_callbacks)
2158 goto unlock;
2159
2160 if (dev->pm_domain)
2161 callback = dev->pm_domain->ops.prepare;
2162 else if (dev->type && dev->type->pm)
2163 callback = dev->type->pm->prepare;
2164 else if (dev->class && dev->class->pm)
2165 callback = dev->class->pm->prepare;
2166 else if (dev->bus && dev->bus->pm)
2167 callback = dev->bus->pm->prepare;
2168
2169 if (!callback && dev->driver && dev->driver->pm)
2170 callback = dev->driver->pm->prepare;
2171
2172 if (callback)
2173 ret = callback(dev);
2174
2175 unlock:
2176 device_unlock(dev);
2177
2178 if (ret < 0) {
2179 suspend_report_result(dev, callback, ret);
2180 pm_runtime_put(dev);
2181 return ret;
2182 }
2183 /* Do not enable "smart suspend" for devices with disabled runtime PM. */
2184 if (smart_suspend)
2185 smart_suspend = device_prepare_smart_suspend(dev);
2186
2187 spin_lock_irq(&dev->power.lock);
2188
2189 dev->power.smart_suspend = smart_suspend;
2190 /*
2191 * A positive return value from ->prepare() means "this device appears
2192 * to be runtime-suspended and its state is fine, so if it really is
2193 * runtime-suspended, you can leave it in that state provided that you
2194 * will do the same thing with all of its descendants". This only
2195 * applies to suspend transitions, however.
2196 */
2197 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
2198 (ret > 0 || dev->power.no_pm_callbacks) &&
2199 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
2200
2201 spin_unlock_irq(&dev->power.lock);
2202
2203 return 0;
2204 }
2205
2206 /**
2207 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
2208 * @state: PM transition of the system being carried out.
2209 *
2210 * Execute the ->prepare() callback(s) for all devices.
2211 */
dpm_prepare(pm_message_t state)2212 int dpm_prepare(pm_message_t state)
2213 {
2214 int error = 0;
2215
2216 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
2217
2218 /*
2219 * Give a chance for the known devices to complete their probes, before
2220 * disable probing of devices. This sync point is important at least
2221 * at boot time + hibernation restore.
2222 */
2223 wait_for_device_probe();
2224 /*
2225 * It is unsafe if probing of devices will happen during suspend or
2226 * hibernation and system behavior will be unpredictable in this case.
2227 * So, let's prohibit device's probing here and defer their probes
2228 * instead. The normal behavior will be restored in dpm_complete().
2229 */
2230 device_block_probing();
2231 /* Suspend thermal control. */
2232 thermal_pm_prepare();
2233
2234 mutex_lock(&dpm_list_mtx);
2235 while (!list_empty(&dpm_list) && !error) {
2236 struct device *dev = to_device(dpm_list.next);
2237
2238 get_device(dev);
2239
2240 mutex_unlock(&dpm_list_mtx);
2241
2242 trace_device_pm_callback_start(dev, "", state.event);
2243 error = device_prepare(dev, state);
2244 trace_device_pm_callback_end(dev, error);
2245
2246 mutex_lock(&dpm_list_mtx);
2247
2248 if (!error) {
2249 dev->power.is_prepared = true;
2250 if (!list_empty(&dev->power.entry))
2251 list_move_tail(&dev->power.entry, &dpm_prepared_list);
2252 } else if (error == -EAGAIN) {
2253 error = 0;
2254 } else {
2255 dev_info(dev, "not prepared for power transition: code %d\n",
2256 error);
2257 }
2258
2259 mutex_unlock(&dpm_list_mtx);
2260
2261 put_device(dev);
2262
2263 mutex_lock(&dpm_list_mtx);
2264 }
2265 mutex_unlock(&dpm_list_mtx);
2266 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2267 return error;
2268 }
2269
2270 /**
2271 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2272 * @state: PM transition of the system being carried out.
2273 *
2274 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2275 * callbacks for them.
2276 */
dpm_suspend_start(pm_message_t state)2277 int dpm_suspend_start(pm_message_t state)
2278 {
2279 ktime_t starttime = ktime_get();
2280 int error;
2281
2282 error = dpm_prepare(state);
2283 if (error)
2284 dpm_save_failed_step(SUSPEND_PREPARE);
2285 else {
2286 pm_restrict_gfp_mask();
2287 error = dpm_suspend(state);
2288 }
2289
2290 dpm_show_time(starttime, state, error, "start");
2291 return error;
2292 }
2293 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2294
__suspend_report_result(const char * function,struct device * dev,void * fn,int ret)2295 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
2296 {
2297 if (ret)
2298 dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
2299 }
2300 EXPORT_SYMBOL_GPL(__suspend_report_result);
2301
2302 /**
2303 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2304 * @subordinate: Device that needs to wait for @dev.
2305 * @dev: Device to wait for.
2306 */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)2307 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2308 {
2309 dpm_wait(dev, subordinate->power.async_suspend);
2310 return async_error;
2311 }
2312 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2313
2314 /**
2315 * dpm_for_each_dev - device iterator.
2316 * @data: data for the callback.
2317 * @fn: function to be called for each device.
2318 *
2319 * Iterate over devices in dpm_list, and call @fn for each device,
2320 * passing it @data.
2321 */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))2322 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2323 {
2324 struct device *dev;
2325
2326 if (!fn)
2327 return;
2328
2329 device_pm_lock();
2330 list_for_each_entry(dev, &dpm_list, power.entry)
2331 fn(dev, data);
2332 device_pm_unlock();
2333 }
2334 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2335
pm_ops_is_empty(const struct dev_pm_ops * ops)2336 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2337 {
2338 if (!ops)
2339 return true;
2340
2341 return !ops->prepare &&
2342 !ops->suspend &&
2343 !ops->suspend_late &&
2344 !ops->suspend_noirq &&
2345 !ops->resume_noirq &&
2346 !ops->resume_early &&
2347 !ops->resume &&
2348 !ops->complete;
2349 }
2350
device_pm_check_callbacks(struct device * dev)2351 void device_pm_check_callbacks(struct device *dev)
2352 {
2353 unsigned long flags;
2354
2355 spin_lock_irqsave(&dev->power.lock, flags);
2356 dev->power.no_pm_callbacks =
2357 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2358 !dev->bus->suspend && !dev->bus->resume)) &&
2359 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2360 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2361 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2362 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2363 !dev->driver->suspend && !dev->driver->resume));
2364 spin_unlock_irqrestore(&dev->power.lock, flags);
2365 }
2366
dev_pm_skip_suspend(struct device * dev)2367 bool dev_pm_skip_suspend(struct device *dev)
2368 {
2369 return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
2370 }
2371