1 /*
2 * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
3 *
4 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9 #include <linux/pm.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/export.h>
12
13 #ifdef CONFIG_PM_RUNTIME
14 /**
15 * pm_generic_runtime_idle - Generic runtime idle callback for subsystems.
16 * @dev: Device to handle.
17 *
18 * If PM operations are defined for the @dev's driver and they include
19 * ->runtime_idle(), execute it and return its error code, if nonzero.
20 * Otherwise, execute pm_runtime_suspend() for the device and return 0.
21 */
pm_generic_runtime_idle(struct device * dev)22 int pm_generic_runtime_idle(struct device *dev)
23 {
24 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
25
26 if (pm && pm->runtime_idle) {
27 int ret = pm->runtime_idle(dev);
28 if (ret)
29 return ret;
30 }
31
32 pm_runtime_suspend(dev);
33 return 0;
34 }
35 EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
36
37 /**
38 * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
39 * @dev: Device to suspend.
40 *
41 * If PM operations are defined for the @dev's driver and they include
42 * ->runtime_suspend(), execute it and return its error code. Otherwise,
43 * return 0.
44 */
pm_generic_runtime_suspend(struct device * dev)45 int pm_generic_runtime_suspend(struct device *dev)
46 {
47 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
48 int ret;
49
50 ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
51
52 return ret;
53 }
54 EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
55
56 /**
57 * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
58 * @dev: Device to resume.
59 *
60 * If PM operations are defined for the @dev's driver and they include
61 * ->runtime_resume(), execute it and return its error code. Otherwise,
62 * return 0.
63 */
pm_generic_runtime_resume(struct device * dev)64 int pm_generic_runtime_resume(struct device *dev)
65 {
66 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
67 int ret;
68
69 ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
70
71 return ret;
72 }
73 EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
74 #endif /* CONFIG_PM_RUNTIME */
75
76 #ifdef CONFIG_PM_SLEEP
77 /**
78 * pm_generic_prepare - Generic routine preparing a device for power transition.
79 * @dev: Device to prepare.
80 *
81 * Prepare a device for a system-wide power transition.
82 */
pm_generic_prepare(struct device * dev)83 int pm_generic_prepare(struct device *dev)
84 {
85 struct device_driver *drv = dev->driver;
86 int ret = 0;
87
88 if (drv && drv->pm && drv->pm->prepare)
89 ret = drv->pm->prepare(dev);
90
91 return ret;
92 }
93
94 /**
95 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
96 * @dev: Device to handle.
97 * @event: PM transition of the system under way.
98 * @bool: Whether or not this is the "noirq" stage.
99 *
100 * Execute the PM callback corresponding to @event provided by the driver of
101 * @dev, if defined, and return its error code. Return 0 if the callback is
102 * not present.
103 */
__pm_generic_call(struct device * dev,int event,bool noirq)104 static int __pm_generic_call(struct device *dev, int event, bool noirq)
105 {
106 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
107 int (*callback)(struct device *);
108
109 if (!pm)
110 return 0;
111
112 switch (event) {
113 case PM_EVENT_SUSPEND:
114 callback = noirq ? pm->suspend_noirq : pm->suspend;
115 break;
116 case PM_EVENT_FREEZE:
117 callback = noirq ? pm->freeze_noirq : pm->freeze;
118 break;
119 case PM_EVENT_HIBERNATE:
120 callback = noirq ? pm->poweroff_noirq : pm->poweroff;
121 break;
122 case PM_EVENT_RESUME:
123 callback = noirq ? pm->resume_noirq : pm->resume;
124 break;
125 case PM_EVENT_THAW:
126 callback = noirq ? pm->thaw_noirq : pm->thaw;
127 break;
128 case PM_EVENT_RESTORE:
129 callback = noirq ? pm->restore_noirq : pm->restore;
130 break;
131 default:
132 callback = NULL;
133 break;
134 }
135
136 return callback ? callback(dev) : 0;
137 }
138
139 /**
140 * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
141 * @dev: Device to suspend.
142 */
pm_generic_suspend_noirq(struct device * dev)143 int pm_generic_suspend_noirq(struct device *dev)
144 {
145 return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
146 }
147 EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
148
149 /**
150 * pm_generic_suspend - Generic suspend callback for subsystems.
151 * @dev: Device to suspend.
152 */
pm_generic_suspend(struct device * dev)153 int pm_generic_suspend(struct device *dev)
154 {
155 return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
156 }
157 EXPORT_SYMBOL_GPL(pm_generic_suspend);
158
159 /**
160 * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
161 * @dev: Device to freeze.
162 */
pm_generic_freeze_noirq(struct device * dev)163 int pm_generic_freeze_noirq(struct device *dev)
164 {
165 return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
166 }
167 EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
168
169 /**
170 * pm_generic_freeze - Generic freeze callback for subsystems.
171 * @dev: Device to freeze.
172 */
pm_generic_freeze(struct device * dev)173 int pm_generic_freeze(struct device *dev)
174 {
175 return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
176 }
177 EXPORT_SYMBOL_GPL(pm_generic_freeze);
178
179 /**
180 * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
181 * @dev: Device to handle.
182 */
pm_generic_poweroff_noirq(struct device * dev)183 int pm_generic_poweroff_noirq(struct device *dev)
184 {
185 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
186 }
187 EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
188
189 /**
190 * pm_generic_poweroff - Generic poweroff callback for subsystems.
191 * @dev: Device to handle.
192 */
pm_generic_poweroff(struct device * dev)193 int pm_generic_poweroff(struct device *dev)
194 {
195 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
196 }
197 EXPORT_SYMBOL_GPL(pm_generic_poweroff);
198
199 /**
200 * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
201 * @dev: Device to thaw.
202 */
pm_generic_thaw_noirq(struct device * dev)203 int pm_generic_thaw_noirq(struct device *dev)
204 {
205 return __pm_generic_call(dev, PM_EVENT_THAW, true);
206 }
207 EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
208
209 /**
210 * pm_generic_thaw - Generic thaw callback for subsystems.
211 * @dev: Device to thaw.
212 */
pm_generic_thaw(struct device * dev)213 int pm_generic_thaw(struct device *dev)
214 {
215 return __pm_generic_call(dev, PM_EVENT_THAW, false);
216 }
217 EXPORT_SYMBOL_GPL(pm_generic_thaw);
218
219 /**
220 * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
221 * @dev: Device to resume.
222 */
pm_generic_resume_noirq(struct device * dev)223 int pm_generic_resume_noirq(struct device *dev)
224 {
225 return __pm_generic_call(dev, PM_EVENT_RESUME, true);
226 }
227 EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
228
229 /**
230 * pm_generic_resume - Generic resume callback for subsystems.
231 * @dev: Device to resume.
232 */
pm_generic_resume(struct device * dev)233 int pm_generic_resume(struct device *dev)
234 {
235 return __pm_generic_call(dev, PM_EVENT_RESUME, false);
236 }
237 EXPORT_SYMBOL_GPL(pm_generic_resume);
238
239 /**
240 * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
241 * @dev: Device to restore.
242 */
pm_generic_restore_noirq(struct device * dev)243 int pm_generic_restore_noirq(struct device *dev)
244 {
245 return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
246 }
247 EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
248
249 /**
250 * pm_generic_restore - Generic restore callback for subsystems.
251 * @dev: Device to restore.
252 */
pm_generic_restore(struct device * dev)253 int pm_generic_restore(struct device *dev)
254 {
255 return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
256 }
257 EXPORT_SYMBOL_GPL(pm_generic_restore);
258
259 /**
260 * pm_generic_complete - Generic routine competing a device power transition.
261 * @dev: Device to handle.
262 *
263 * Complete a device power transition during a system-wide power transition.
264 */
pm_generic_complete(struct device * dev)265 void pm_generic_complete(struct device *dev)
266 {
267 struct device_driver *drv = dev->driver;
268
269 if (drv && drv->pm && drv->pm->complete)
270 drv->pm->complete(dev);
271
272 /*
273 * Let runtime PM try to suspend devices that haven't been in use before
274 * going into the system-wide sleep state we're resuming from.
275 */
276 pm_runtime_idle(dev);
277 }
278 #endif /* CONFIG_PM_SLEEP */
279