1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
16 */
17
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 #include <linux/nmi.h>
38
39 #include "../base.h"
40 #include "power.h"
41
42 typedef int (*pm_callback_t)(struct device *);
43
44 /*
45 * The entries in the dpm_list list are in a depth first order, simply
46 * because children are guaranteed to be discovered after parents, and
47 * are inserted at the back of the list on discovery.
48 *
49 * Since device_pm_add() may be called with a device lock held,
50 * we must never try to acquire a device lock while holding
51 * dpm_list_mutex.
52 */
53
54 LIST_HEAD(dpm_list);
55 static LIST_HEAD(dpm_prepared_list);
56 static LIST_HEAD(dpm_suspended_list);
57 static LIST_HEAD(dpm_late_early_list);
58 static LIST_HEAD(dpm_noirq_list);
59
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62
63 static DEFINE_MUTEX(async_wip_mtx);
64 static int async_error;
65
66 /**
67 * pm_hibernate_is_recovering - if recovering from hibernate due to error.
68 *
69 * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
70 * recovering from some error.
71 *
72 * Return: true for error case, false for normal case.
73 */
pm_hibernate_is_recovering(void)74 bool pm_hibernate_is_recovering(void)
75 {
76 return pm_transition.event == PM_EVENT_RECOVER;
77 }
78 EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
79
pm_verb(int event)80 static const char *pm_verb(int event)
81 {
82 switch (event) {
83 case PM_EVENT_SUSPEND:
84 return "suspend";
85 case PM_EVENT_RESUME:
86 return "resume";
87 case PM_EVENT_FREEZE:
88 return "freeze";
89 case PM_EVENT_QUIESCE:
90 return "quiesce";
91 case PM_EVENT_HIBERNATE:
92 return "hibernate";
93 case PM_EVENT_THAW:
94 return "thaw";
95 case PM_EVENT_RESTORE:
96 return "restore";
97 case PM_EVENT_RECOVER:
98 return "recover";
99 case PM_EVENT_POWEROFF:
100 return "poweroff";
101 default:
102 return "(unknown PM event)";
103 }
104 }
105
106 /**
107 * device_pm_sleep_init - Initialize system suspend-related device fields.
108 * @dev: Device object being initialized.
109 */
device_pm_sleep_init(struct device * dev)110 void device_pm_sleep_init(struct device *dev)
111 {
112 dev->power.is_prepared = false;
113 dev->power.is_suspended = false;
114 dev->power.is_noirq_suspended = false;
115 dev->power.is_late_suspended = false;
116 init_completion(&dev->power.completion);
117 complete_all(&dev->power.completion);
118 dev->power.wakeup = NULL;
119 INIT_LIST_HEAD(&dev->power.entry);
120 }
121
122 /**
123 * device_pm_lock - Lock the list of active devices used by the PM core.
124 */
device_pm_lock(void)125 void device_pm_lock(void)
126 {
127 mutex_lock(&dpm_list_mtx);
128 }
129
130 /**
131 * device_pm_unlock - Unlock the list of active devices used by the PM core.
132 */
device_pm_unlock(void)133 void device_pm_unlock(void)
134 {
135 mutex_unlock(&dpm_list_mtx);
136 }
137
138 /**
139 * device_pm_add - Add a device to the PM core's list of active devices.
140 * @dev: Device to add to the list.
141 */
device_pm_add(struct device * dev)142 void device_pm_add(struct device *dev)
143 {
144 /* Skip PM setup/initialization. */
145 if (device_pm_not_required(dev))
146 return;
147
148 pr_debug("Adding info for %s:%s\n",
149 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
150 device_pm_check_callbacks(dev);
151 mutex_lock(&dpm_list_mtx);
152 if (dev->parent && dev->parent->power.is_prepared)
153 dev_warn(dev, "parent %s should not be sleeping\n",
154 dev_name(dev->parent));
155 list_add_tail(&dev->power.entry, &dpm_list);
156 dev->power.in_dpm_list = true;
157 mutex_unlock(&dpm_list_mtx);
158 }
159
160 /**
161 * device_pm_remove - Remove a device from the PM core's list of active devices.
162 * @dev: Device to be removed from the list.
163 */
device_pm_remove(struct device * dev)164 void device_pm_remove(struct device *dev)
165 {
166 if (device_pm_not_required(dev))
167 return;
168
169 pr_debug("Removing info for %s:%s\n",
170 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
171 complete_all(&dev->power.completion);
172 mutex_lock(&dpm_list_mtx);
173 list_del_init(&dev->power.entry);
174 dev->power.in_dpm_list = false;
175 mutex_unlock(&dpm_list_mtx);
176 device_wakeup_disable(dev);
177 pm_runtime_remove(dev);
178 device_pm_check_callbacks(dev);
179 }
180
181 /**
182 * device_pm_move_before - Move device in the PM core's list of active devices.
183 * @deva: Device to move in dpm_list.
184 * @devb: Device @deva should come before.
185 */
device_pm_move_before(struct device * deva,struct device * devb)186 void device_pm_move_before(struct device *deva, struct device *devb)
187 {
188 pr_debug("Moving %s:%s before %s:%s\n",
189 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
190 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
191 /* Delete deva from dpm_list and reinsert before devb. */
192 list_move_tail(&deva->power.entry, &devb->power.entry);
193 }
194
195 /**
196 * device_pm_move_after - Move device in the PM core's list of active devices.
197 * @deva: Device to move in dpm_list.
198 * @devb: Device @deva should come after.
199 */
device_pm_move_after(struct device * deva,struct device * devb)200 void device_pm_move_after(struct device *deva, struct device *devb)
201 {
202 pr_debug("Moving %s:%s after %s:%s\n",
203 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
204 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
205 /* Delete deva from dpm_list and reinsert after devb. */
206 list_move(&deva->power.entry, &devb->power.entry);
207 }
208
209 /**
210 * device_pm_move_last - Move device to end of the PM core's list of devices.
211 * @dev: Device to move in dpm_list.
212 */
device_pm_move_last(struct device * dev)213 void device_pm_move_last(struct device *dev)
214 {
215 pr_debug("Moving %s:%s to end of list\n",
216 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
217 list_move_tail(&dev->power.entry, &dpm_list);
218 }
219
initcall_debug_start(struct device * dev,void * cb)220 static ktime_t initcall_debug_start(struct device *dev, void *cb)
221 {
222 if (!pm_print_times_enabled)
223 return 0;
224
225 dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
226 task_pid_nr(current),
227 dev->parent ? dev_name(dev->parent) : "none");
228 return ktime_get();
229 }
230
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)231 static void initcall_debug_report(struct device *dev, ktime_t calltime,
232 void *cb, int error)
233 {
234 ktime_t rettime;
235
236 if (!pm_print_times_enabled)
237 return;
238
239 rettime = ktime_get();
240 dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
241 (unsigned long long)ktime_us_delta(rettime, calltime));
242 }
243
244 /**
245 * dpm_wait - Wait for a PM operation to complete.
246 * @dev: Device to wait for.
247 * @async: If unset, wait only if the device's power.async_suspend flag is set.
248 */
dpm_wait(struct device * dev,bool async)249 static void dpm_wait(struct device *dev, bool async)
250 {
251 if (!dev)
252 return;
253
254 if (async || (pm_async_enabled && dev->power.async_suspend))
255 wait_for_completion(&dev->power.completion);
256 }
257
dpm_wait_fn(struct device * dev,void * async_ptr)258 static int dpm_wait_fn(struct device *dev, void *async_ptr)
259 {
260 dpm_wait(dev, *((bool *)async_ptr));
261 return 0;
262 }
263
dpm_wait_for_children(struct device * dev,bool async)264 static void dpm_wait_for_children(struct device *dev, bool async)
265 {
266 device_for_each_child(dev, &async, dpm_wait_fn);
267 }
268
dpm_wait_for_suppliers(struct device * dev,bool async)269 static void dpm_wait_for_suppliers(struct device *dev, bool async)
270 {
271 struct device_link *link;
272 int idx;
273
274 idx = device_links_read_lock();
275
276 /*
277 * If the supplier goes away right after we've checked the link to it,
278 * we'll wait for its completion to change the state, but that's fine,
279 * because the only things that will block as a result are the SRCU
280 * callbacks freeing the link objects for the links in the list we're
281 * walking.
282 */
283 dev_for_each_link_to_supplier(link, dev)
284 if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
285 !device_link_flag_is_sync_state_only(link->flags))
286 dpm_wait(link->supplier, async);
287
288 device_links_read_unlock(idx);
289 }
290
dpm_wait_for_superior(struct device * dev,bool async)291 static bool dpm_wait_for_superior(struct device *dev, bool async)
292 {
293 struct device *parent;
294
295 /*
296 * If the device is resumed asynchronously and the parent's callback
297 * deletes both the device and the parent itself, the parent object may
298 * be freed while this function is running, so avoid that by reference
299 * counting the parent once more unless the device has been deleted
300 * already (in which case return right away).
301 */
302 mutex_lock(&dpm_list_mtx);
303
304 if (!device_pm_initialized(dev)) {
305 mutex_unlock(&dpm_list_mtx);
306 return false;
307 }
308
309 parent = get_device(dev->parent);
310
311 mutex_unlock(&dpm_list_mtx);
312
313 dpm_wait(parent, async);
314 put_device(parent);
315
316 dpm_wait_for_suppliers(dev, async);
317
318 /*
319 * If the parent's callback has deleted the device, attempting to resume
320 * it would be invalid, so avoid doing that then.
321 */
322 return device_pm_initialized(dev);
323 }
324
dpm_wait_for_consumers(struct device * dev,bool async)325 static void dpm_wait_for_consumers(struct device *dev, bool async)
326 {
327 struct device_link *link;
328 int idx;
329
330 idx = device_links_read_lock();
331
332 /*
333 * The status of a device link can only be changed from "dormant" by a
334 * probe, but that cannot happen during system suspend/resume. In
335 * theory it can change to "dormant" at that time, but then it is
336 * reasonable to wait for the target device anyway (eg. if it goes
337 * away, it's better to wait for it to go away completely and then
338 * continue instead of trying to continue in parallel with its
339 * unregistration).
340 */
341 dev_for_each_link_to_consumer(link, dev)
342 if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
343 !device_link_flag_is_sync_state_only(link->flags))
344 dpm_wait(link->consumer, async);
345
346 device_links_read_unlock(idx);
347 }
348
dpm_wait_for_subordinate(struct device * dev,bool async)349 static void dpm_wait_for_subordinate(struct device *dev, bool async)
350 {
351 dpm_wait_for_children(dev, async);
352 dpm_wait_for_consumers(dev, async);
353 }
354
355 /**
356 * pm_op - Return the PM operation appropriate for given PM event.
357 * @ops: PM operations to choose from.
358 * @state: PM transition of the system being carried out.
359 */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)360 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
361 {
362 switch (state.event) {
363 #ifdef CONFIG_SUSPEND
364 case PM_EVENT_SUSPEND:
365 return ops->suspend;
366 case PM_EVENT_RESUME:
367 return ops->resume;
368 #endif /* CONFIG_SUSPEND */
369 #ifdef CONFIG_HIBERNATE_CALLBACKS
370 case PM_EVENT_FREEZE:
371 case PM_EVENT_QUIESCE:
372 return ops->freeze;
373 case PM_EVENT_POWEROFF:
374 case PM_EVENT_HIBERNATE:
375 return ops->poweroff;
376 case PM_EVENT_THAW:
377 case PM_EVENT_RECOVER:
378 return ops->thaw;
379 case PM_EVENT_RESTORE:
380 return ops->restore;
381 #endif /* CONFIG_HIBERNATE_CALLBACKS */
382 }
383
384 return NULL;
385 }
386
387 /**
388 * pm_late_early_op - Return the PM operation appropriate for given PM event.
389 * @ops: PM operations to choose from.
390 * @state: PM transition of the system being carried out.
391 *
392 * Runtime PM is disabled for @dev while this function is being executed.
393 */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)394 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
395 pm_message_t state)
396 {
397 switch (state.event) {
398 #ifdef CONFIG_SUSPEND
399 case PM_EVENT_SUSPEND:
400 return ops->suspend_late;
401 case PM_EVENT_RESUME:
402 return ops->resume_early;
403 #endif /* CONFIG_SUSPEND */
404 #ifdef CONFIG_HIBERNATE_CALLBACKS
405 case PM_EVENT_FREEZE:
406 case PM_EVENT_QUIESCE:
407 return ops->freeze_late;
408 case PM_EVENT_POWEROFF:
409 case PM_EVENT_HIBERNATE:
410 return ops->poweroff_late;
411 case PM_EVENT_THAW:
412 case PM_EVENT_RECOVER:
413 return ops->thaw_early;
414 case PM_EVENT_RESTORE:
415 return ops->restore_early;
416 #endif /* CONFIG_HIBERNATE_CALLBACKS */
417 }
418
419 return NULL;
420 }
421
422 /**
423 * pm_noirq_op - Return the PM operation appropriate for given PM event.
424 * @ops: PM operations to choose from.
425 * @state: PM transition of the system being carried out.
426 *
427 * The driver of @dev will not receive interrupts while this function is being
428 * executed.
429 */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)430 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
431 {
432 switch (state.event) {
433 #ifdef CONFIG_SUSPEND
434 case PM_EVENT_SUSPEND:
435 return ops->suspend_noirq;
436 case PM_EVENT_RESUME:
437 return ops->resume_noirq;
438 #endif /* CONFIG_SUSPEND */
439 #ifdef CONFIG_HIBERNATE_CALLBACKS
440 case PM_EVENT_FREEZE:
441 case PM_EVENT_QUIESCE:
442 return ops->freeze_noirq;
443 case PM_EVENT_POWEROFF:
444 case PM_EVENT_HIBERNATE:
445 return ops->poweroff_noirq;
446 case PM_EVENT_THAW:
447 case PM_EVENT_RECOVER:
448 return ops->thaw_noirq;
449 case PM_EVENT_RESTORE:
450 return ops->restore_noirq;
451 #endif /* CONFIG_HIBERNATE_CALLBACKS */
452 }
453
454 return NULL;
455 }
456
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)457 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
458 {
459 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
460 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
461 ", may wakeup" : "", dev->power.driver_flags);
462 }
463
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)464 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
465 int error)
466 {
467 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
468 error);
469 }
470
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)471 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
472 const char *info)
473 {
474 ktime_t calltime;
475 u64 usecs64;
476 int usecs;
477
478 calltime = ktime_get();
479 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
480 do_div(usecs64, NSEC_PER_USEC);
481 usecs = usecs64;
482 if (usecs == 0)
483 usecs = 1;
484
485 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
486 info ?: "", info ? " " : "", pm_verb(state.event),
487 error ? "aborted" : "complete",
488 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
489 }
490
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)491 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
492 pm_message_t state, const char *info)
493 {
494 ktime_t calltime;
495 int error;
496
497 if (!cb)
498 return 0;
499
500 calltime = initcall_debug_start(dev, cb);
501
502 pm_dev_dbg(dev, state, info);
503 trace_device_pm_callback_start(dev, info, state.event);
504 error = cb(dev);
505 trace_device_pm_callback_end(dev, error);
506 suspend_report_result(dev, cb, error);
507
508 initcall_debug_report(dev, calltime, cb, error);
509
510 return error;
511 }
512
513 #ifdef CONFIG_DPM_WATCHDOG
514 struct dpm_watchdog {
515 struct device *dev;
516 struct task_struct *tsk;
517 struct timer_list timer;
518 bool fatal;
519 };
520
521 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
522 struct dpm_watchdog wd
523
524 static bool __read_mostly dpm_watchdog_all_cpu_backtrace;
525 module_param(dpm_watchdog_all_cpu_backtrace, bool, 0644);
526 MODULE_PARM_DESC(dpm_watchdog_all_cpu_backtrace,
527 "Backtrace all CPUs on DPM watchdog timeout");
528
529 /**
530 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
531 * @t: The timer that PM watchdog depends on.
532 *
533 * Called when a driver has timed out suspending or resuming.
534 * There's not much we can do here to recover so panic() to
535 * capture a crash-dump in pstore.
536 */
dpm_watchdog_handler(struct timer_list * t)537 static void dpm_watchdog_handler(struct timer_list *t)
538 {
539 struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
540 struct timer_list *timer = &wd->timer;
541 unsigned int time_left;
542
543 if (wd->fatal) {
544 unsigned int this_cpu = smp_processor_id();
545
546 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
547 show_stack(wd->tsk, NULL, KERN_EMERG);
548 if (dpm_watchdog_all_cpu_backtrace)
549 trigger_allbutcpu_cpu_backtrace(this_cpu);
550 panic("%s %s: unrecoverable failure\n",
551 dev_driver_string(wd->dev), dev_name(wd->dev));
552 }
553
554 time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
555 dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
556 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
557 show_stack(wd->tsk, NULL, KERN_WARNING);
558
559 wd->fatal = true;
560 mod_timer(timer, jiffies + HZ * time_left);
561 }
562
563 /**
564 * dpm_watchdog_set - Enable pm watchdog for given device.
565 * @wd: Watchdog. Must be allocated on the stack.
566 * @dev: Device to handle.
567 */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)568 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
569 {
570 struct timer_list *timer = &wd->timer;
571
572 wd->dev = dev;
573 wd->tsk = current;
574 wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
575
576 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
577 /* use same timeout value for both suspend and resume */
578 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
579 add_timer(timer);
580 }
581
582 /**
583 * dpm_watchdog_clear - Disable suspend/resume watchdog.
584 * @wd: Watchdog to disable.
585 */
dpm_watchdog_clear(struct dpm_watchdog * wd)586 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
587 {
588 struct timer_list *timer = &wd->timer;
589
590 timer_delete_sync(timer);
591 timer_destroy_on_stack(timer);
592 }
593 #else
594 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
595 #define dpm_watchdog_set(x, y)
596 #define dpm_watchdog_clear(x)
597 #endif
598
599 /*------------------------- Resume routines -------------------------*/
600
601 /**
602 * dev_pm_skip_resume - System-wide device resume optimization check.
603 * @dev: Target device.
604 *
605 * Return:
606 * - %false if the transition under way is RESTORE.
607 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
608 * - The logical negation of %power.must_resume otherwise (that is, when the
609 * transition under way is RESUME).
610 */
dev_pm_skip_resume(struct device * dev)611 bool dev_pm_skip_resume(struct device *dev)
612 {
613 if (pm_transition.event == PM_EVENT_RESTORE)
614 return false;
615
616 if (pm_transition.event == PM_EVENT_THAW)
617 return dev_pm_skip_suspend(dev);
618
619 return !dev->power.must_resume;
620 }
621
is_async(struct device * dev)622 static bool is_async(struct device *dev)
623 {
624 return dev->power.async_suspend && pm_async_enabled
625 && !pm_trace_is_enabled();
626 }
627
__dpm_async(struct device * dev,async_func_t func)628 static bool __dpm_async(struct device *dev, async_func_t func)
629 {
630 if (dev->power.work_in_progress)
631 return true;
632
633 if (!is_async(dev))
634 return false;
635
636 dev->power.work_in_progress = true;
637
638 get_device(dev);
639
640 if (async_schedule_dev_nocall(func, dev))
641 return true;
642
643 put_device(dev);
644
645 return false;
646 }
647
dpm_async_fn(struct device * dev,async_func_t func)648 static bool dpm_async_fn(struct device *dev, async_func_t func)
649 {
650 guard(mutex)(&async_wip_mtx);
651
652 return __dpm_async(dev, func);
653 }
654
dpm_async_with_cleanup(struct device * dev,void * fn)655 static int dpm_async_with_cleanup(struct device *dev, void *fn)
656 {
657 guard(mutex)(&async_wip_mtx);
658
659 if (!__dpm_async(dev, fn))
660 dev->power.work_in_progress = false;
661
662 return 0;
663 }
664
dpm_async_resume_children(struct device * dev,async_func_t func)665 static void dpm_async_resume_children(struct device *dev, async_func_t func)
666 {
667 /*
668 * Prevent racing with dpm_clear_async_state() during initial list
669 * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
670 * dpm_resume().
671 */
672 guard(mutex)(&dpm_list_mtx);
673
674 /*
675 * Start processing "async" children of the device unless it's been
676 * started already for them.
677 */
678 device_for_each_child(dev, func, dpm_async_with_cleanup);
679 }
680
dpm_async_resume_subordinate(struct device * dev,async_func_t func)681 static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
682 {
683 struct device_link *link;
684 int idx;
685
686 dpm_async_resume_children(dev, func);
687
688 idx = device_links_read_lock();
689
690 /* Start processing the device's "async" consumers. */
691 dev_for_each_link_to_consumer(link, dev)
692 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
693 dpm_async_with_cleanup(link->consumer, func);
694
695 device_links_read_unlock(idx);
696 }
697
dpm_clear_async_state(struct device * dev)698 static void dpm_clear_async_state(struct device *dev)
699 {
700 reinit_completion(&dev->power.completion);
701 dev->power.work_in_progress = false;
702 }
703
dpm_root_device(struct device * dev)704 static bool dpm_root_device(struct device *dev)
705 {
706 lockdep_assert_held(&dpm_list_mtx);
707
708 /*
709 * Since this function is required to run under dpm_list_mtx, the
710 * list_empty() below will only return true if the device's list of
711 * consumers is actually empty before calling it.
712 */
713 return !dev->parent && list_empty(&dev->links.suppliers);
714 }
715
716 static void async_resume_noirq(void *data, async_cookie_t cookie);
717
718 /**
719 * device_resume_noirq - Execute a "noirq resume" callback for given device.
720 * @dev: Device to handle.
721 * @state: PM transition of the system being carried out.
722 * @async: If true, the device is being resumed asynchronously.
723 *
724 * The driver of @dev will not receive interrupts while this function is being
725 * executed.
726 */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)727 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
728 {
729 pm_callback_t callback = NULL;
730 const char *info = NULL;
731 bool skip_resume;
732 int error = 0;
733
734 TRACE_DEVICE(dev);
735 TRACE_RESUME(0);
736
737 if (dev->power.syscore || dev->power.direct_complete)
738 goto Out;
739
740 if (!dev->power.is_noirq_suspended) {
741 /*
742 * This means that system suspend has been aborted in the noirq
743 * phase before invoking the noirq suspend callback for the
744 * device, so if device_suspend_late() has left it in suspend,
745 * device_resume_early() should leave it in suspend either in
746 * case the early resume of it depends on the noirq resume that
747 * has not run.
748 */
749 if (dev_pm_skip_suspend(dev))
750 dev->power.must_resume = false;
751
752 goto Out;
753 }
754
755 if (!dpm_wait_for_superior(dev, async))
756 goto Out;
757
758 skip_resume = dev_pm_skip_resume(dev);
759 /*
760 * If the driver callback is skipped below or by the middle layer
761 * callback and device_resume_early() also skips the driver callback for
762 * this device later, it needs to appear as "suspended" to PM-runtime,
763 * so change its status accordingly.
764 *
765 * Otherwise, the device is going to be resumed, so set its PM-runtime
766 * status to "active" unless its power.smart_suspend flag is clear, in
767 * which case it is not necessary to update its PM-runtime status.
768 */
769 if (skip_resume)
770 pm_runtime_set_suspended(dev);
771 else if (dev_pm_smart_suspend(dev))
772 pm_runtime_set_active(dev);
773
774 if (dev->pm_domain) {
775 info = "noirq power domain ";
776 callback = pm_noirq_op(&dev->pm_domain->ops, state);
777 } else if (dev->type && dev->type->pm) {
778 info = "noirq type ";
779 callback = pm_noirq_op(dev->type->pm, state);
780 } else if (dev->class && dev->class->pm) {
781 info = "noirq class ";
782 callback = pm_noirq_op(dev->class->pm, state);
783 } else if (dev->bus && dev->bus->pm) {
784 info = "noirq bus ";
785 callback = pm_noirq_op(dev->bus->pm, state);
786 }
787 if (callback)
788 goto Run;
789
790 if (skip_resume)
791 goto Skip;
792
793 if (dev->driver && dev->driver->pm) {
794 info = "noirq driver ";
795 callback = pm_noirq_op(dev->driver->pm, state);
796 }
797
798 Run:
799 error = dpm_run_callback(callback, dev, state, info);
800
801 Skip:
802 dev->power.is_noirq_suspended = false;
803
804 Out:
805 complete_all(&dev->power.completion);
806 TRACE_RESUME(error);
807
808 if (error) {
809 WRITE_ONCE(async_error, error);
810 dpm_save_failed_dev(dev_name(dev));
811 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
812 }
813
814 dpm_async_resume_subordinate(dev, async_resume_noirq);
815 }
816
async_resume_noirq(void * data,async_cookie_t cookie)817 static void async_resume_noirq(void *data, async_cookie_t cookie)
818 {
819 struct device *dev = data;
820
821 device_resume_noirq(dev, pm_transition, true);
822 put_device(dev);
823 }
824
dpm_noirq_resume_devices(pm_message_t state)825 static void dpm_noirq_resume_devices(pm_message_t state)
826 {
827 struct device *dev;
828 ktime_t starttime = ktime_get();
829
830 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
831
832 async_error = 0;
833 pm_transition = state;
834
835 mutex_lock(&dpm_list_mtx);
836
837 /*
838 * Start processing "async" root devices upfront so they don't wait for
839 * the "sync" devices they don't depend on.
840 */
841 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
842 dpm_clear_async_state(dev);
843 if (dpm_root_device(dev))
844 dpm_async_with_cleanup(dev, async_resume_noirq);
845 }
846
847 while (!list_empty(&dpm_noirq_list)) {
848 dev = to_device(dpm_noirq_list.next);
849 list_move_tail(&dev->power.entry, &dpm_late_early_list);
850
851 if (!dpm_async_fn(dev, async_resume_noirq)) {
852 get_device(dev);
853
854 mutex_unlock(&dpm_list_mtx);
855
856 device_resume_noirq(dev, state, false);
857
858 put_device(dev);
859
860 mutex_lock(&dpm_list_mtx);
861 }
862 }
863 mutex_unlock(&dpm_list_mtx);
864 async_synchronize_full();
865 dpm_show_time(starttime, state, 0, "noirq");
866 if (READ_ONCE(async_error))
867 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
868
869 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
870 }
871
872 /**
873 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
874 * @state: PM transition of the system being carried out.
875 *
876 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
877 * allow device drivers' interrupt handlers to be called.
878 */
dpm_resume_noirq(pm_message_t state)879 void dpm_resume_noirq(pm_message_t state)
880 {
881 dpm_noirq_resume_devices(state);
882
883 resume_device_irqs();
884 device_wakeup_disarm_wake_irqs();
885 }
886
887 static void async_resume_early(void *data, async_cookie_t cookie);
888
889 /**
890 * device_resume_early - Execute an "early resume" callback for given device.
891 * @dev: Device to handle.
892 * @state: PM transition of the system being carried out.
893 * @async: If true, the device is being resumed asynchronously.
894 *
895 * Runtime PM is disabled for @dev while this function is being executed.
896 */
device_resume_early(struct device * dev,pm_message_t state,bool async)897 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
898 {
899 pm_callback_t callback = NULL;
900 const char *info = NULL;
901 int error = 0;
902
903 TRACE_DEVICE(dev);
904 TRACE_RESUME(0);
905
906 if (dev->power.direct_complete)
907 goto Out;
908
909 if (!dev->power.is_late_suspended)
910 goto Out;
911
912 if (dev->power.syscore)
913 goto Skip;
914
915 if (!dpm_wait_for_superior(dev, async))
916 goto Out;
917
918 if (dev->pm_domain) {
919 info = "early power domain ";
920 callback = pm_late_early_op(&dev->pm_domain->ops, state);
921 } else if (dev->type && dev->type->pm) {
922 info = "early type ";
923 callback = pm_late_early_op(dev->type->pm, state);
924 } else if (dev->class && dev->class->pm) {
925 info = "early class ";
926 callback = pm_late_early_op(dev->class->pm, state);
927 } else if (dev->bus && dev->bus->pm) {
928 info = "early bus ";
929 callback = pm_late_early_op(dev->bus->pm, state);
930 }
931 if (callback)
932 goto Run;
933
934 if (dev_pm_skip_resume(dev))
935 goto Skip;
936
937 if (dev->driver && dev->driver->pm) {
938 info = "early driver ";
939 callback = pm_late_early_op(dev->driver->pm, state);
940 }
941
942 Run:
943 error = dpm_run_callback(callback, dev, state, info);
944
945 Skip:
946 dev->power.is_late_suspended = false;
947 pm_runtime_enable(dev);
948
949 Out:
950 TRACE_RESUME(error);
951
952 complete_all(&dev->power.completion);
953
954 if (error) {
955 WRITE_ONCE(async_error, error);
956 dpm_save_failed_dev(dev_name(dev));
957 pm_dev_err(dev, state, async ? " async early" : " early", error);
958 }
959
960 dpm_async_resume_subordinate(dev, async_resume_early);
961 }
962
async_resume_early(void * data,async_cookie_t cookie)963 static void async_resume_early(void *data, async_cookie_t cookie)
964 {
965 struct device *dev = data;
966
967 device_resume_early(dev, pm_transition, true);
968 put_device(dev);
969 }
970
971 /**
972 * dpm_resume_early - Execute "early resume" callbacks for all devices.
973 * @state: PM transition of the system being carried out.
974 */
dpm_resume_early(pm_message_t state)975 void dpm_resume_early(pm_message_t state)
976 {
977 struct device *dev;
978 ktime_t starttime = ktime_get();
979
980 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
981
982 async_error = 0;
983 pm_transition = state;
984
985 mutex_lock(&dpm_list_mtx);
986
987 /*
988 * Start processing "async" root devices upfront so they don't wait for
989 * the "sync" devices they don't depend on.
990 */
991 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
992 dpm_clear_async_state(dev);
993 if (dpm_root_device(dev))
994 dpm_async_with_cleanup(dev, async_resume_early);
995 }
996
997 while (!list_empty(&dpm_late_early_list)) {
998 dev = to_device(dpm_late_early_list.next);
999 list_move_tail(&dev->power.entry, &dpm_suspended_list);
1000
1001 if (!dpm_async_fn(dev, async_resume_early)) {
1002 get_device(dev);
1003
1004 mutex_unlock(&dpm_list_mtx);
1005
1006 device_resume_early(dev, state, false);
1007
1008 put_device(dev);
1009
1010 mutex_lock(&dpm_list_mtx);
1011 }
1012 }
1013 mutex_unlock(&dpm_list_mtx);
1014 async_synchronize_full();
1015 dpm_show_time(starttime, state, 0, "early");
1016 if (READ_ONCE(async_error))
1017 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
1018
1019 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
1020 }
1021
1022 /**
1023 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
1024 * @state: PM transition of the system being carried out.
1025 */
dpm_resume_start(pm_message_t state)1026 void dpm_resume_start(pm_message_t state)
1027 {
1028 dpm_resume_noirq(state);
1029 dpm_resume_early(state);
1030 }
1031 EXPORT_SYMBOL_GPL(dpm_resume_start);
1032
1033 static void async_resume(void *data, async_cookie_t cookie);
1034
1035 /**
1036 * device_resume - Execute "resume" callbacks for given device.
1037 * @dev: Device to handle.
1038 * @state: PM transition of the system being carried out.
1039 * @async: If true, the device is being resumed asynchronously.
1040 */
device_resume(struct device * dev,pm_message_t state,bool async)1041 static void device_resume(struct device *dev, pm_message_t state, bool async)
1042 {
1043 pm_callback_t callback = NULL;
1044 const char *info = NULL;
1045 int error = 0;
1046 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1047
1048 TRACE_DEVICE(dev);
1049 TRACE_RESUME(0);
1050
1051 if (dev->power.syscore)
1052 goto Complete;
1053
1054 if (!dev->power.is_suspended)
1055 goto Complete;
1056
1057 dev->power.is_suspended = false;
1058
1059 if (dev->power.direct_complete) {
1060 /*
1061 * Allow new children to be added under the device after this
1062 * point if it has no PM callbacks.
1063 */
1064 if (dev->power.no_pm_callbacks)
1065 dev->power.is_prepared = false;
1066
1067 /* Match the pm_runtime_disable() in device_suspend(). */
1068 pm_runtime_enable(dev);
1069 goto Complete;
1070 }
1071
1072 if (!dpm_wait_for_superior(dev, async))
1073 goto Complete;
1074
1075 dpm_watchdog_set(&wd, dev);
1076 device_lock(dev);
1077
1078 /*
1079 * This is a fib. But we'll allow new children to be added below
1080 * a resumed device, even if the device hasn't been completed yet.
1081 */
1082 dev->power.is_prepared = false;
1083
1084 if (dev->pm_domain) {
1085 info = "power domain ";
1086 callback = pm_op(&dev->pm_domain->ops, state);
1087 goto Driver;
1088 }
1089
1090 if (dev->type && dev->type->pm) {
1091 info = "type ";
1092 callback = pm_op(dev->type->pm, state);
1093 goto Driver;
1094 }
1095
1096 if (dev->class && dev->class->pm) {
1097 info = "class ";
1098 callback = pm_op(dev->class->pm, state);
1099 goto Driver;
1100 }
1101
1102 if (dev->bus) {
1103 if (dev->bus->pm) {
1104 info = "bus ";
1105 callback = pm_op(dev->bus->pm, state);
1106 } else if (dev->bus->resume) {
1107 info = "legacy bus ";
1108 callback = dev->bus->resume;
1109 goto End;
1110 }
1111 }
1112
1113 Driver:
1114 if (!callback && dev->driver && dev->driver->pm) {
1115 info = "driver ";
1116 callback = pm_op(dev->driver->pm, state);
1117 }
1118
1119 End:
1120 error = dpm_run_callback(callback, dev, state, info);
1121
1122 device_unlock(dev);
1123 dpm_watchdog_clear(&wd);
1124
1125 Complete:
1126 complete_all(&dev->power.completion);
1127
1128 TRACE_RESUME(error);
1129
1130 if (error) {
1131 WRITE_ONCE(async_error, error);
1132 dpm_save_failed_dev(dev_name(dev));
1133 pm_dev_err(dev, state, async ? " async" : "", error);
1134 }
1135
1136 dpm_async_resume_subordinate(dev, async_resume);
1137 }
1138
async_resume(void * data,async_cookie_t cookie)1139 static void async_resume(void *data, async_cookie_t cookie)
1140 {
1141 struct device *dev = data;
1142
1143 device_resume(dev, pm_transition, true);
1144 put_device(dev);
1145 }
1146
1147 /**
1148 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1149 * @state: PM transition of the system being carried out.
1150 *
1151 * Execute the appropriate "resume" callback for all devices whose status
1152 * indicates that they are suspended.
1153 */
dpm_resume(pm_message_t state)1154 void dpm_resume(pm_message_t state)
1155 {
1156 struct device *dev;
1157 ktime_t starttime = ktime_get();
1158
1159 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1160
1161 pm_transition = state;
1162 async_error = 0;
1163
1164 mutex_lock(&dpm_list_mtx);
1165
1166 /*
1167 * Start processing "async" root devices upfront so they don't wait for
1168 * the "sync" devices they don't depend on.
1169 */
1170 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1171 dpm_clear_async_state(dev);
1172 if (dpm_root_device(dev))
1173 dpm_async_with_cleanup(dev, async_resume);
1174 }
1175
1176 while (!list_empty(&dpm_suspended_list)) {
1177 dev = to_device(dpm_suspended_list.next);
1178 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1179
1180 if (!dpm_async_fn(dev, async_resume)) {
1181 get_device(dev);
1182
1183 mutex_unlock(&dpm_list_mtx);
1184
1185 device_resume(dev, state, false);
1186
1187 put_device(dev);
1188
1189 mutex_lock(&dpm_list_mtx);
1190 }
1191 }
1192 mutex_unlock(&dpm_list_mtx);
1193 async_synchronize_full();
1194 dpm_show_time(starttime, state, 0, NULL);
1195 if (READ_ONCE(async_error))
1196 dpm_save_failed_step(SUSPEND_RESUME);
1197
1198 cpufreq_resume();
1199 devfreq_resume();
1200 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1201 }
1202
1203 /**
1204 * device_complete - Complete a PM transition for given device.
1205 * @dev: Device to handle.
1206 * @state: PM transition of the system being carried out.
1207 */
device_complete(struct device * dev,pm_message_t state)1208 static void device_complete(struct device *dev, pm_message_t state)
1209 {
1210 void (*callback)(struct device *) = NULL;
1211 const char *info = NULL;
1212
1213 if (dev->power.syscore)
1214 goto out;
1215
1216 device_lock(dev);
1217
1218 if (dev->pm_domain) {
1219 info = "completing power domain ";
1220 callback = dev->pm_domain->ops.complete;
1221 } else if (dev->type && dev->type->pm) {
1222 info = "completing type ";
1223 callback = dev->type->pm->complete;
1224 } else if (dev->class && dev->class->pm) {
1225 info = "completing class ";
1226 callback = dev->class->pm->complete;
1227 } else if (dev->bus && dev->bus->pm) {
1228 info = "completing bus ";
1229 callback = dev->bus->pm->complete;
1230 }
1231
1232 if (!callback && dev->driver && dev->driver->pm) {
1233 info = "completing driver ";
1234 callback = dev->driver->pm->complete;
1235 }
1236
1237 if (callback) {
1238 pm_dev_dbg(dev, state, info);
1239 callback(dev);
1240 }
1241
1242 device_unlock(dev);
1243
1244 out:
1245 /* If enabling runtime PM for the device is blocked, unblock it. */
1246 pm_runtime_unblock(dev);
1247 pm_runtime_put(dev);
1248 }
1249
1250 /**
1251 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1252 * @state: PM transition of the system being carried out.
1253 *
1254 * Execute the ->complete() callbacks for all devices whose PM status is not
1255 * DPM_ON (this allows new devices to be registered).
1256 */
dpm_complete(pm_message_t state)1257 void dpm_complete(pm_message_t state)
1258 {
1259 struct list_head list;
1260
1261 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1262
1263 INIT_LIST_HEAD(&list);
1264 mutex_lock(&dpm_list_mtx);
1265 while (!list_empty(&dpm_prepared_list)) {
1266 struct device *dev = to_device(dpm_prepared_list.prev);
1267
1268 get_device(dev);
1269 dev->power.is_prepared = false;
1270 list_move(&dev->power.entry, &list);
1271
1272 mutex_unlock(&dpm_list_mtx);
1273
1274 trace_device_pm_callback_start(dev, "", state.event);
1275 device_complete(dev, state);
1276 trace_device_pm_callback_end(dev, 0);
1277
1278 put_device(dev);
1279
1280 mutex_lock(&dpm_list_mtx);
1281 }
1282 list_splice(&list, &dpm_list);
1283 mutex_unlock(&dpm_list_mtx);
1284
1285 /* Allow device probing and trigger re-probing of deferred devices */
1286 device_unblock_probing();
1287 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1288 }
1289
1290 /**
1291 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1292 * @state: PM transition of the system being carried out.
1293 *
1294 * Execute "resume" callbacks for all devices and complete the PM transition of
1295 * the system.
1296 */
dpm_resume_end(pm_message_t state)1297 void dpm_resume_end(pm_message_t state)
1298 {
1299 dpm_resume(state);
1300 pm_restore_gfp_mask();
1301 dpm_complete(state);
1302 }
1303 EXPORT_SYMBOL_GPL(dpm_resume_end);
1304
1305
1306 /*------------------------- Suspend routines -------------------------*/
1307
dpm_leaf_device(struct device * dev)1308 static bool dpm_leaf_device(struct device *dev)
1309 {
1310 struct device *child;
1311
1312 lockdep_assert_held(&dpm_list_mtx);
1313
1314 child = device_find_any_child(dev);
1315 if (child) {
1316 put_device(child);
1317
1318 return false;
1319 }
1320
1321 /*
1322 * Since this function is required to run under dpm_list_mtx, the
1323 * list_empty() below will only return true if the device's list of
1324 * consumers is actually empty before calling it.
1325 */
1326 return list_empty(&dev->links.consumers);
1327 }
1328
dpm_async_suspend_parent(struct device * dev,async_func_t func)1329 static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
1330 {
1331 guard(mutex)(&dpm_list_mtx);
1332
1333 /*
1334 * If the device is suspended asynchronously and the parent's callback
1335 * deletes both the device and the parent itself, the parent object may
1336 * be freed while this function is running, so avoid that by checking
1337 * if the device has been deleted already as the parent cannot be
1338 * deleted before it.
1339 */
1340 if (!device_pm_initialized(dev))
1341 return false;
1342
1343 /* Start processing the device's parent if it is "async". */
1344 if (dev->parent)
1345 dpm_async_with_cleanup(dev->parent, func);
1346
1347 return true;
1348 }
1349
dpm_async_suspend_superior(struct device * dev,async_func_t func)1350 static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
1351 {
1352 struct device_link *link;
1353 int idx;
1354
1355 if (!dpm_async_suspend_parent(dev, func))
1356 return;
1357
1358 idx = device_links_read_lock();
1359
1360 /* Start processing the device's "async" suppliers. */
1361 dev_for_each_link_to_supplier(link, dev)
1362 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
1363 dpm_async_with_cleanup(link->supplier, func);
1364
1365 device_links_read_unlock(idx);
1366 }
1367
dpm_async_suspend_complete_all(struct list_head * device_list)1368 static void dpm_async_suspend_complete_all(struct list_head *device_list)
1369 {
1370 struct device *dev;
1371
1372 guard(mutex)(&async_wip_mtx);
1373
1374 list_for_each_entry_reverse(dev, device_list, power.entry) {
1375 /*
1376 * In case the device is being waited for and async processing
1377 * has not started for it yet, let the waiters make progress.
1378 */
1379 if (!dev->power.work_in_progress)
1380 complete_all(&dev->power.completion);
1381 }
1382 }
1383
1384 /**
1385 * resume_event - Return a "resume" message for given "suspend" sleep state.
1386 * @sleep_state: PM message representing a sleep state.
1387 *
1388 * Return a PM message representing the resume event corresponding to given
1389 * sleep state.
1390 */
resume_event(pm_message_t sleep_state)1391 static pm_message_t resume_event(pm_message_t sleep_state)
1392 {
1393 switch (sleep_state.event) {
1394 case PM_EVENT_SUSPEND:
1395 return PMSG_RESUME;
1396 case PM_EVENT_FREEZE:
1397 case PM_EVENT_QUIESCE:
1398 return PMSG_RECOVER;
1399 case PM_EVENT_HIBERNATE:
1400 return PMSG_RESTORE;
1401 }
1402 return PMSG_ON;
1403 }
1404
dpm_superior_set_must_resume(struct device * dev)1405 static void dpm_superior_set_must_resume(struct device *dev)
1406 {
1407 struct device_link *link;
1408 int idx;
1409
1410 if (dev->parent)
1411 dev->parent->power.must_resume = true;
1412
1413 idx = device_links_read_lock();
1414
1415 dev_for_each_link_to_supplier(link, dev)
1416 link->supplier->power.must_resume = true;
1417
1418 device_links_read_unlock(idx);
1419 }
1420
1421 static void async_suspend_noirq(void *data, async_cookie_t cookie);
1422
1423 /**
1424 * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1425 * @dev: Device to handle.
1426 * @state: PM transition of the system being carried out.
1427 * @async: If true, the device is being suspended asynchronously.
1428 *
1429 * The driver of @dev will not receive interrupts while this function is being
1430 * executed.
1431 */
device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1432 static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1433 {
1434 pm_callback_t callback = NULL;
1435 const char *info = NULL;
1436 int error = 0;
1437
1438 TRACE_DEVICE(dev);
1439 TRACE_SUSPEND(0);
1440
1441 dpm_wait_for_subordinate(dev, async);
1442
1443 if (READ_ONCE(async_error))
1444 goto Complete;
1445
1446 if (dev->power.syscore || dev->power.direct_complete)
1447 goto Complete;
1448
1449 if (dev->pm_domain) {
1450 info = "noirq power domain ";
1451 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1452 } else if (dev->type && dev->type->pm) {
1453 info = "noirq type ";
1454 callback = pm_noirq_op(dev->type->pm, state);
1455 } else if (dev->class && dev->class->pm) {
1456 info = "noirq class ";
1457 callback = pm_noirq_op(dev->class->pm, state);
1458 } else if (dev->bus && dev->bus->pm) {
1459 info = "noirq bus ";
1460 callback = pm_noirq_op(dev->bus->pm, state);
1461 }
1462 if (callback)
1463 goto Run;
1464
1465 if (dev_pm_skip_suspend(dev))
1466 goto Skip;
1467
1468 if (dev->driver && dev->driver->pm) {
1469 info = "noirq driver ";
1470 callback = pm_noirq_op(dev->driver->pm, state);
1471 }
1472
1473 Run:
1474 error = dpm_run_callback(callback, dev, state, info);
1475 if (error) {
1476 WRITE_ONCE(async_error, error);
1477 dpm_save_failed_dev(dev_name(dev));
1478 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1479 goto Complete;
1480 }
1481
1482 Skip:
1483 dev->power.is_noirq_suspended = true;
1484
1485 /*
1486 * Devices must be resumed unless they are explicitly allowed to be left
1487 * in suspend, but even in that case skipping the resume of devices that
1488 * were in use right before the system suspend (as indicated by their
1489 * runtime PM usage counters and child counters) would be suboptimal.
1490 */
1491 if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1492 dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
1493 dev->power.must_resume = true;
1494
1495 if (dev->power.must_resume)
1496 dpm_superior_set_must_resume(dev);
1497
1498 Complete:
1499 complete_all(&dev->power.completion);
1500 TRACE_SUSPEND(error);
1501
1502 if (error || READ_ONCE(async_error))
1503 return;
1504
1505 dpm_async_suspend_superior(dev, async_suspend_noirq);
1506 }
1507
async_suspend_noirq(void * data,async_cookie_t cookie)1508 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1509 {
1510 struct device *dev = data;
1511
1512 device_suspend_noirq(dev, pm_transition, true);
1513 put_device(dev);
1514 }
1515
dpm_noirq_suspend_devices(pm_message_t state)1516 static int dpm_noirq_suspend_devices(pm_message_t state)
1517 {
1518 ktime_t starttime = ktime_get();
1519 struct device *dev;
1520 int error;
1521
1522 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1523
1524 pm_transition = state;
1525 async_error = 0;
1526
1527 mutex_lock(&dpm_list_mtx);
1528
1529 /*
1530 * Start processing "async" leaf devices upfront so they don't need to
1531 * wait for the "sync" devices they don't depend on.
1532 */
1533 list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
1534 dpm_clear_async_state(dev);
1535 if (dpm_leaf_device(dev))
1536 dpm_async_with_cleanup(dev, async_suspend_noirq);
1537 }
1538
1539 while (!list_empty(&dpm_late_early_list)) {
1540 dev = to_device(dpm_late_early_list.prev);
1541
1542 list_move(&dev->power.entry, &dpm_noirq_list);
1543
1544 if (dpm_async_fn(dev, async_suspend_noirq))
1545 continue;
1546
1547 get_device(dev);
1548
1549 mutex_unlock(&dpm_list_mtx);
1550
1551 device_suspend_noirq(dev, state, false);
1552
1553 put_device(dev);
1554
1555 mutex_lock(&dpm_list_mtx);
1556
1557 if (READ_ONCE(async_error)) {
1558 dpm_async_suspend_complete_all(&dpm_late_early_list);
1559 /*
1560 * Move all devices to the target list to resume them
1561 * properly.
1562 */
1563 list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
1564 break;
1565 }
1566 }
1567
1568 mutex_unlock(&dpm_list_mtx);
1569
1570 async_synchronize_full();
1571
1572 error = READ_ONCE(async_error);
1573 if (error)
1574 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1575
1576 dpm_show_time(starttime, state, error, "noirq");
1577 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1578 return error;
1579 }
1580
1581 /**
1582 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1583 * @state: PM transition of the system being carried out.
1584 *
1585 * Prevent device drivers' interrupt handlers from being called and invoke
1586 * "noirq" suspend callbacks for all non-sysdev devices.
1587 */
dpm_suspend_noirq(pm_message_t state)1588 int dpm_suspend_noirq(pm_message_t state)
1589 {
1590 int ret;
1591
1592 device_wakeup_arm_wake_irqs();
1593 suspend_device_irqs();
1594
1595 ret = dpm_noirq_suspend_devices(state);
1596 if (ret)
1597 dpm_resume_noirq(resume_event(state));
1598
1599 return ret;
1600 }
1601
dpm_propagate_wakeup_to_parent(struct device * dev)1602 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1603 {
1604 struct device *parent = dev->parent;
1605
1606 if (!parent)
1607 return;
1608
1609 spin_lock_irq(&parent->power.lock);
1610
1611 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1612 parent->power.wakeup_path = true;
1613
1614 spin_unlock_irq(&parent->power.lock);
1615 }
1616
1617 static void async_suspend_late(void *data, async_cookie_t cookie);
1618
1619 /**
1620 * device_suspend_late - Execute a "late suspend" callback for given device.
1621 * @dev: Device to handle.
1622 * @state: PM transition of the system being carried out.
1623 * @async: If true, the device is being suspended asynchronously.
1624 *
1625 * Runtime PM is disabled for @dev while this function is being executed.
1626 */
device_suspend_late(struct device * dev,pm_message_t state,bool async)1627 static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
1628 {
1629 pm_callback_t callback = NULL;
1630 const char *info = NULL;
1631 int error = 0;
1632
1633 TRACE_DEVICE(dev);
1634 TRACE_SUSPEND(0);
1635
1636 dpm_wait_for_subordinate(dev, async);
1637
1638 if (READ_ONCE(async_error))
1639 goto Complete;
1640
1641 if (pm_wakeup_pending()) {
1642 WRITE_ONCE(async_error, -EBUSY);
1643 goto Complete;
1644 }
1645
1646 if (dev->power.direct_complete)
1647 goto Complete;
1648
1649 /*
1650 * After this point, any runtime PM operations targeting the device
1651 * will fail until the corresponding pm_runtime_enable() call in
1652 * device_resume_early().
1653 */
1654 pm_runtime_disable(dev);
1655
1656 if (dev->power.syscore)
1657 goto Skip;
1658
1659 if (dev->pm_domain) {
1660 info = "late power domain ";
1661 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1662 } else if (dev->type && dev->type->pm) {
1663 info = "late type ";
1664 callback = pm_late_early_op(dev->type->pm, state);
1665 } else if (dev->class && dev->class->pm) {
1666 info = "late class ";
1667 callback = pm_late_early_op(dev->class->pm, state);
1668 } else if (dev->bus && dev->bus->pm) {
1669 info = "late bus ";
1670 callback = pm_late_early_op(dev->bus->pm, state);
1671 }
1672 if (callback)
1673 goto Run;
1674
1675 if (dev_pm_skip_suspend(dev))
1676 goto Skip;
1677
1678 if (dev->driver && dev->driver->pm) {
1679 info = "late driver ";
1680 callback = pm_late_early_op(dev->driver->pm, state);
1681 }
1682
1683 Run:
1684 error = dpm_run_callback(callback, dev, state, info);
1685 if (error) {
1686 WRITE_ONCE(async_error, error);
1687 dpm_save_failed_dev(dev_name(dev));
1688 pm_dev_err(dev, state, async ? " async late" : " late", error);
1689 pm_runtime_enable(dev);
1690 goto Complete;
1691 }
1692 dpm_propagate_wakeup_to_parent(dev);
1693
1694 Skip:
1695 dev->power.is_late_suspended = true;
1696
1697 Complete:
1698 TRACE_SUSPEND(error);
1699 complete_all(&dev->power.completion);
1700
1701 if (error || READ_ONCE(async_error))
1702 return;
1703
1704 dpm_async_suspend_superior(dev, async_suspend_late);
1705 }
1706
async_suspend_late(void * data,async_cookie_t cookie)1707 static void async_suspend_late(void *data, async_cookie_t cookie)
1708 {
1709 struct device *dev = data;
1710
1711 device_suspend_late(dev, pm_transition, true);
1712 put_device(dev);
1713 }
1714
1715 /**
1716 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1717 * @state: PM transition of the system being carried out.
1718 */
dpm_suspend_late(pm_message_t state)1719 int dpm_suspend_late(pm_message_t state)
1720 {
1721 ktime_t starttime = ktime_get();
1722 struct device *dev;
1723 int error;
1724
1725 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1726
1727 pm_transition = state;
1728 async_error = 0;
1729
1730 wake_up_all_idle_cpus();
1731
1732 mutex_lock(&dpm_list_mtx);
1733
1734 /*
1735 * Start processing "async" leaf devices upfront so they don't need to
1736 * wait for the "sync" devices they don't depend on.
1737 */
1738 list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
1739 dpm_clear_async_state(dev);
1740 if (dpm_leaf_device(dev))
1741 dpm_async_with_cleanup(dev, async_suspend_late);
1742 }
1743
1744 while (!list_empty(&dpm_suspended_list)) {
1745 dev = to_device(dpm_suspended_list.prev);
1746
1747 list_move(&dev->power.entry, &dpm_late_early_list);
1748
1749 if (dpm_async_fn(dev, async_suspend_late))
1750 continue;
1751
1752 get_device(dev);
1753
1754 mutex_unlock(&dpm_list_mtx);
1755
1756 device_suspend_late(dev, state, false);
1757
1758 put_device(dev);
1759
1760 mutex_lock(&dpm_list_mtx);
1761
1762 if (READ_ONCE(async_error)) {
1763 dpm_async_suspend_complete_all(&dpm_suspended_list);
1764 /*
1765 * Move all devices to the target list to resume them
1766 * properly.
1767 */
1768 list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
1769 break;
1770 }
1771 }
1772
1773 mutex_unlock(&dpm_list_mtx);
1774
1775 async_synchronize_full();
1776
1777 error = READ_ONCE(async_error);
1778 if (error) {
1779 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1780 dpm_resume_early(resume_event(state));
1781 }
1782 dpm_show_time(starttime, state, error, "late");
1783 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1784 return error;
1785 }
1786
1787 /**
1788 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1789 * @state: PM transition of the system being carried out.
1790 */
dpm_suspend_end(pm_message_t state)1791 int dpm_suspend_end(pm_message_t state)
1792 {
1793 ktime_t starttime = ktime_get();
1794 int error;
1795
1796 error = dpm_suspend_late(state);
1797 if (error)
1798 goto out;
1799
1800 error = dpm_suspend_noirq(state);
1801 if (error)
1802 dpm_resume_early(resume_event(state));
1803
1804 out:
1805 dpm_show_time(starttime, state, error, "end");
1806 return error;
1807 }
1808 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1809
1810 /**
1811 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1812 * @dev: Device to suspend.
1813 * @state: PM transition of the system being carried out.
1814 * @cb: Suspend callback to execute.
1815 * @info: string description of caller.
1816 */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1817 static int legacy_suspend(struct device *dev, pm_message_t state,
1818 int (*cb)(struct device *dev, pm_message_t state),
1819 const char *info)
1820 {
1821 int error;
1822 ktime_t calltime;
1823
1824 calltime = initcall_debug_start(dev, cb);
1825
1826 trace_device_pm_callback_start(dev, info, state.event);
1827 error = cb(dev, state);
1828 trace_device_pm_callback_end(dev, error);
1829 suspend_report_result(dev, cb, error);
1830
1831 initcall_debug_report(dev, calltime, cb, error);
1832
1833 return error;
1834 }
1835
dpm_clear_superiors_direct_complete(struct device * dev)1836 static void dpm_clear_superiors_direct_complete(struct device *dev)
1837 {
1838 struct device_link *link;
1839 int idx;
1840
1841 if (dev->parent) {
1842 spin_lock_irq(&dev->parent->power.lock);
1843 dev->parent->power.direct_complete = false;
1844 spin_unlock_irq(&dev->parent->power.lock);
1845 }
1846
1847 idx = device_links_read_lock();
1848
1849 dev_for_each_link_to_supplier(link, dev) {
1850 spin_lock_irq(&link->supplier->power.lock);
1851 link->supplier->power.direct_complete = false;
1852 spin_unlock_irq(&link->supplier->power.lock);
1853 }
1854
1855 device_links_read_unlock(idx);
1856 }
1857
1858 static void async_suspend(void *data, async_cookie_t cookie);
1859
1860 /**
1861 * device_suspend - Execute "suspend" callbacks for given device.
1862 * @dev: Device to handle.
1863 * @state: PM transition of the system being carried out.
1864 * @async: If true, the device is being suspended asynchronously.
1865 */
device_suspend(struct device * dev,pm_message_t state,bool async)1866 static void device_suspend(struct device *dev, pm_message_t state, bool async)
1867 {
1868 pm_callback_t callback = NULL;
1869 const char *info = NULL;
1870 int error = 0;
1871 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1872
1873 TRACE_DEVICE(dev);
1874 TRACE_SUSPEND(0);
1875
1876 dpm_wait_for_subordinate(dev, async);
1877
1878 if (READ_ONCE(async_error)) {
1879 dev->power.direct_complete = false;
1880 goto Complete;
1881 }
1882
1883 /*
1884 * Wait for possible runtime PM transitions of the device in progress
1885 * to complete and if there's a runtime resume request pending for it,
1886 * resume it before proceeding with invoking the system-wide suspend
1887 * callbacks for it.
1888 *
1889 * If the system-wide suspend callbacks below change the configuration
1890 * of the device, they must disable runtime PM for it or otherwise
1891 * ensure that its runtime-resume callbacks will not be confused by that
1892 * change in case they are invoked going forward.
1893 */
1894 pm_runtime_barrier(dev);
1895
1896 if (pm_wakeup_pending()) {
1897 dev->power.direct_complete = false;
1898 WRITE_ONCE(async_error, -EBUSY);
1899 goto Complete;
1900 }
1901
1902 if (dev->power.syscore)
1903 goto Complete;
1904
1905 /* Avoid direct_complete to let wakeup_path propagate. */
1906 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1907 dev->power.direct_complete = false;
1908
1909 if (dev->power.direct_complete) {
1910 if (pm_runtime_status_suspended(dev)) {
1911 pm_runtime_disable(dev);
1912 if (pm_runtime_status_suspended(dev)) {
1913 pm_dev_dbg(dev, state, "direct-complete ");
1914 dev->power.is_suspended = true;
1915 goto Complete;
1916 }
1917
1918 pm_runtime_enable(dev);
1919 }
1920 dev->power.direct_complete = false;
1921 }
1922
1923 dev->power.may_skip_resume = true;
1924 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1925
1926 dpm_watchdog_set(&wd, dev);
1927 device_lock(dev);
1928
1929 if (dev->pm_domain) {
1930 info = "power domain ";
1931 callback = pm_op(&dev->pm_domain->ops, state);
1932 goto Run;
1933 }
1934
1935 if (dev->type && dev->type->pm) {
1936 info = "type ";
1937 callback = pm_op(dev->type->pm, state);
1938 goto Run;
1939 }
1940
1941 if (dev->class && dev->class->pm) {
1942 info = "class ";
1943 callback = pm_op(dev->class->pm, state);
1944 goto Run;
1945 }
1946
1947 if (dev->bus) {
1948 if (dev->bus->pm) {
1949 info = "bus ";
1950 callback = pm_op(dev->bus->pm, state);
1951 } else if (dev->bus->suspend) {
1952 pm_dev_dbg(dev, state, "legacy bus ");
1953 error = legacy_suspend(dev, state, dev->bus->suspend,
1954 "legacy bus ");
1955 goto End;
1956 }
1957 }
1958
1959 Run:
1960 if (!callback && dev->driver && dev->driver->pm) {
1961 info = "driver ";
1962 callback = pm_op(dev->driver->pm, state);
1963 }
1964
1965 error = dpm_run_callback(callback, dev, state, info);
1966
1967 End:
1968 if (!error) {
1969 dev->power.is_suspended = true;
1970 if (device_may_wakeup(dev))
1971 dev->power.wakeup_path = true;
1972
1973 dpm_propagate_wakeup_to_parent(dev);
1974 dpm_clear_superiors_direct_complete(dev);
1975 }
1976
1977 device_unlock(dev);
1978 dpm_watchdog_clear(&wd);
1979
1980 Complete:
1981 if (error) {
1982 WRITE_ONCE(async_error, error);
1983 dpm_save_failed_dev(dev_name(dev));
1984 pm_dev_err(dev, state, async ? " async" : "", error);
1985 }
1986
1987 complete_all(&dev->power.completion);
1988 TRACE_SUSPEND(error);
1989
1990 if (error || READ_ONCE(async_error))
1991 return;
1992
1993 dpm_async_suspend_superior(dev, async_suspend);
1994 }
1995
async_suspend(void * data,async_cookie_t cookie)1996 static void async_suspend(void *data, async_cookie_t cookie)
1997 {
1998 struct device *dev = data;
1999
2000 device_suspend(dev, pm_transition, true);
2001 put_device(dev);
2002 }
2003
2004 /**
2005 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
2006 * @state: PM transition of the system being carried out.
2007 */
dpm_suspend(pm_message_t state)2008 int dpm_suspend(pm_message_t state)
2009 {
2010 ktime_t starttime = ktime_get();
2011 struct device *dev;
2012 int error;
2013
2014 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
2015 might_sleep();
2016
2017 devfreq_suspend();
2018 cpufreq_suspend();
2019
2020 pm_transition = state;
2021 async_error = 0;
2022
2023 mutex_lock(&dpm_list_mtx);
2024
2025 /*
2026 * Start processing "async" leaf devices upfront so they don't need to
2027 * wait for the "sync" devices they don't depend on.
2028 */
2029 list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
2030 dpm_clear_async_state(dev);
2031 if (dpm_leaf_device(dev))
2032 dpm_async_with_cleanup(dev, async_suspend);
2033 }
2034
2035 while (!list_empty(&dpm_prepared_list)) {
2036 dev = to_device(dpm_prepared_list.prev);
2037
2038 list_move(&dev->power.entry, &dpm_suspended_list);
2039
2040 if (dpm_async_fn(dev, async_suspend))
2041 continue;
2042
2043 get_device(dev);
2044
2045 mutex_unlock(&dpm_list_mtx);
2046
2047 device_suspend(dev, state, false);
2048
2049 put_device(dev);
2050
2051 mutex_lock(&dpm_list_mtx);
2052
2053 if (READ_ONCE(async_error)) {
2054 dpm_async_suspend_complete_all(&dpm_prepared_list);
2055 /*
2056 * Move all devices to the target list to resume them
2057 * properly.
2058 */
2059 list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
2060 break;
2061 }
2062 }
2063
2064 mutex_unlock(&dpm_list_mtx);
2065
2066 async_synchronize_full();
2067
2068 error = READ_ONCE(async_error);
2069 if (error)
2070 dpm_save_failed_step(SUSPEND_SUSPEND);
2071
2072 dpm_show_time(starttime, state, error, NULL);
2073 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
2074 return error;
2075 }
2076
device_prepare_smart_suspend(struct device * dev)2077 static bool device_prepare_smart_suspend(struct device *dev)
2078 {
2079 struct device_link *link;
2080 bool ret = true;
2081 int idx;
2082
2083 /*
2084 * The "smart suspend" feature is enabled for devices whose drivers ask
2085 * for it and for devices without PM callbacks.
2086 *
2087 * However, if "smart suspend" is not enabled for the device's parent
2088 * or any of its suppliers that take runtime PM into account, it cannot
2089 * be enabled for the device either.
2090 */
2091 if (!dev->power.no_pm_callbacks &&
2092 !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
2093 return false;
2094
2095 if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
2096 !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
2097 return false;
2098
2099 idx = device_links_read_lock();
2100
2101 dev_for_each_link_to_supplier(link, dev) {
2102 if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
2103 continue;
2104
2105 if (!dev_pm_smart_suspend(link->supplier) &&
2106 !pm_runtime_blocked(link->supplier)) {
2107 ret = false;
2108 break;
2109 }
2110 }
2111
2112 device_links_read_unlock(idx);
2113
2114 return ret;
2115 }
2116
2117 /**
2118 * device_prepare - Prepare a device for system power transition.
2119 * @dev: Device to handle.
2120 * @state: PM transition of the system being carried out.
2121 *
2122 * Execute the ->prepare() callback(s) for given device. No new children of the
2123 * device may be registered after this function has returned.
2124 */
device_prepare(struct device * dev,pm_message_t state)2125 static int device_prepare(struct device *dev, pm_message_t state)
2126 {
2127 int (*callback)(struct device *) = NULL;
2128 bool smart_suspend;
2129 int ret = 0;
2130
2131 /*
2132 * If a device's parent goes into runtime suspend at the wrong time,
2133 * it won't be possible to resume the device. To prevent this we
2134 * block runtime suspend here, during the prepare phase, and allow
2135 * it again during the complete phase.
2136 */
2137 pm_runtime_get_noresume(dev);
2138 /*
2139 * If runtime PM is disabled for the device at this point and it has
2140 * never been enabled so far, it should not be enabled until this system
2141 * suspend-resume cycle is complete, so prepare to trigger a warning on
2142 * subsequent attempts to enable it.
2143 */
2144 smart_suspend = !pm_runtime_block_if_disabled(dev);
2145
2146 if (dev->power.syscore)
2147 return 0;
2148
2149 device_lock(dev);
2150
2151 dev->power.wakeup_path = false;
2152 dev->power.out_band_wakeup = false;
2153
2154 if (dev->power.no_pm_callbacks)
2155 goto unlock;
2156
2157 if (dev->pm_domain)
2158 callback = dev->pm_domain->ops.prepare;
2159 else if (dev->type && dev->type->pm)
2160 callback = dev->type->pm->prepare;
2161 else if (dev->class && dev->class->pm)
2162 callback = dev->class->pm->prepare;
2163 else if (dev->bus && dev->bus->pm)
2164 callback = dev->bus->pm->prepare;
2165
2166 if (!callback && dev->driver && dev->driver->pm)
2167 callback = dev->driver->pm->prepare;
2168
2169 if (callback)
2170 ret = callback(dev);
2171
2172 unlock:
2173 device_unlock(dev);
2174
2175 if (ret < 0) {
2176 suspend_report_result(dev, callback, ret);
2177 pm_runtime_put(dev);
2178 return ret;
2179 }
2180 /* Do not enable "smart suspend" for devices with disabled runtime PM. */
2181 if (smart_suspend)
2182 smart_suspend = device_prepare_smart_suspend(dev);
2183
2184 spin_lock_irq(&dev->power.lock);
2185
2186 dev->power.smart_suspend = smart_suspend;
2187 /*
2188 * A positive return value from ->prepare() means "this device appears
2189 * to be runtime-suspended and its state is fine, so if it really is
2190 * runtime-suspended, you can leave it in that state provided that you
2191 * will do the same thing with all of its descendants". This only
2192 * applies to suspend transitions, however.
2193 */
2194 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
2195 (ret > 0 || dev->power.no_pm_callbacks) &&
2196 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
2197
2198 spin_unlock_irq(&dev->power.lock);
2199
2200 return 0;
2201 }
2202
2203 /**
2204 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
2205 * @state: PM transition of the system being carried out.
2206 *
2207 * Execute the ->prepare() callback(s) for all devices.
2208 */
dpm_prepare(pm_message_t state)2209 int dpm_prepare(pm_message_t state)
2210 {
2211 int error = 0;
2212
2213 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
2214
2215 /*
2216 * Give a chance for the known devices to complete their probes, before
2217 * disable probing of devices. This sync point is important at least
2218 * at boot time + hibernation restore.
2219 */
2220 wait_for_device_probe();
2221 /*
2222 * It is unsafe if probing of devices will happen during suspend or
2223 * hibernation and system behavior will be unpredictable in this case.
2224 * So, let's prohibit device's probing here and defer their probes
2225 * instead. The normal behavior will be restored in dpm_complete().
2226 */
2227 device_block_probing();
2228
2229 mutex_lock(&dpm_list_mtx);
2230 while (!list_empty(&dpm_list) && !error) {
2231 struct device *dev = to_device(dpm_list.next);
2232
2233 get_device(dev);
2234
2235 mutex_unlock(&dpm_list_mtx);
2236
2237 trace_device_pm_callback_start(dev, "", state.event);
2238 error = device_prepare(dev, state);
2239 trace_device_pm_callback_end(dev, error);
2240
2241 mutex_lock(&dpm_list_mtx);
2242
2243 if (!error) {
2244 dev->power.is_prepared = true;
2245 if (!list_empty(&dev->power.entry))
2246 list_move_tail(&dev->power.entry, &dpm_prepared_list);
2247 } else if (error == -EAGAIN) {
2248 error = 0;
2249 } else {
2250 dev_info(dev, "not prepared for power transition: code %d\n",
2251 error);
2252 }
2253
2254 mutex_unlock(&dpm_list_mtx);
2255
2256 put_device(dev);
2257
2258 mutex_lock(&dpm_list_mtx);
2259 }
2260 mutex_unlock(&dpm_list_mtx);
2261 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2262 return error;
2263 }
2264
2265 /**
2266 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2267 * @state: PM transition of the system being carried out.
2268 *
2269 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2270 * callbacks for them.
2271 */
dpm_suspend_start(pm_message_t state)2272 int dpm_suspend_start(pm_message_t state)
2273 {
2274 ktime_t starttime = ktime_get();
2275 int error;
2276
2277 error = dpm_prepare(state);
2278 if (error)
2279 dpm_save_failed_step(SUSPEND_PREPARE);
2280 else {
2281 pm_restrict_gfp_mask();
2282 error = dpm_suspend(state);
2283 }
2284
2285 dpm_show_time(starttime, state, error, "start");
2286 return error;
2287 }
2288 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2289
__suspend_report_result(const char * function,struct device * dev,void * fn,int ret)2290 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
2291 {
2292 if (ret)
2293 dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
2294 }
2295 EXPORT_SYMBOL_GPL(__suspend_report_result);
2296
2297 /**
2298 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2299 * @subordinate: Device that needs to wait for @dev.
2300 * @dev: Device to wait for.
2301 */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)2302 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2303 {
2304 dpm_wait(dev, subordinate->power.async_suspend);
2305 return async_error;
2306 }
2307 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2308
2309 /**
2310 * dpm_for_each_dev - device iterator.
2311 * @data: data for the callback.
2312 * @fn: function to be called for each device.
2313 *
2314 * Iterate over devices in dpm_list, and call @fn for each device,
2315 * passing it @data.
2316 */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))2317 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2318 {
2319 struct device *dev;
2320
2321 if (!fn)
2322 return;
2323
2324 device_pm_lock();
2325 list_for_each_entry(dev, &dpm_list, power.entry)
2326 fn(dev, data);
2327 device_pm_unlock();
2328 }
2329 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2330
pm_ops_is_empty(const struct dev_pm_ops * ops)2331 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2332 {
2333 if (!ops)
2334 return true;
2335
2336 return !ops->prepare &&
2337 !ops->suspend &&
2338 !ops->suspend_late &&
2339 !ops->suspend_noirq &&
2340 !ops->resume_noirq &&
2341 !ops->resume_early &&
2342 !ops->resume &&
2343 !ops->complete;
2344 }
2345
device_pm_check_callbacks(struct device * dev)2346 void device_pm_check_callbacks(struct device *dev)
2347 {
2348 unsigned long flags;
2349
2350 spin_lock_irqsave(&dev->power.lock, flags);
2351 dev->power.no_pm_callbacks =
2352 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2353 !dev->bus->suspend && !dev->bus->resume)) &&
2354 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2355 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2356 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2357 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2358 !dev->driver->suspend && !dev->driver->resume));
2359 spin_unlock_irqrestore(&dev->power.lock, flags);
2360 }
2361
dev_pm_skip_suspend(struct device * dev)2362 bool dev_pm_skip_suspend(struct device *dev)
2363 {
2364 return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
2365 }
2366