1 /*
2 * cpuidle.h - a generic framework for CPU idle power management
3 *
4 * (C) 2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
7 *
8 * This code is licenced under the GPL.
9 */
10
11 #ifndef _LINUX_CPUIDLE_H
12 #define _LINUX_CPUIDLE_H
13
14 #include <linux/percpu.h>
15 #include <linux/list.h>
16 #include <linux/hrtimer.h>
17 #include <linux/context_tracking.h>
18
19 #define CPUIDLE_STATE_MAX 10
20 #define CPUIDLE_NAME_LEN 16
21 #define CPUIDLE_DESC_LEN 32
22
23 struct module;
24
25 struct cpuidle_device;
26 struct cpuidle_driver;
27
28
29 /****************************
30 * CPUIDLE DEVICE INTERFACE *
31 ****************************/
32
33 #define CPUIDLE_STATE_DISABLED_BY_USER BIT(0)
34 #define CPUIDLE_STATE_DISABLED_BY_DRIVER BIT(1)
35
36 struct cpuidle_state_usage {
37 unsigned long long disable;
38 unsigned long long usage;
39 u64 time_ns;
40 unsigned long long above; /* Number of times it's been too deep */
41 unsigned long long below; /* Number of times it's been too shallow */
42 unsigned long long rejected; /* Number of times idle entry was rejected */
43 #ifdef CONFIG_SUSPEND
44 unsigned long long s2idle_usage;
45 unsigned long long s2idle_time; /* in US */
46 #endif
47 };
48
49 struct cpuidle_state {
50 char name[CPUIDLE_NAME_LEN];
51 char desc[CPUIDLE_DESC_LEN];
52
53 s64 exit_latency_ns;
54 s64 target_residency_ns;
55 unsigned int flags;
56 unsigned int exit_latency; /* in US */
57 int power_usage; /* in mW */
58 unsigned int target_residency; /* in US */
59
60 int (*enter) (struct cpuidle_device *dev,
61 struct cpuidle_driver *drv,
62 int index);
63
64 void (*enter_dead) (struct cpuidle_device *dev, int index);
65
66 /*
67 * CPUs execute ->enter_s2idle with the local tick or entire timekeeping
68 * suspended, so it must not re-enable interrupts at any point (even
69 * temporarily) or attempt to change states of clock event devices.
70 *
71 * This callback may point to the same function as ->enter if all of
72 * the above requirements are met by it.
73 */
74 int (*enter_s2idle)(struct cpuidle_device *dev,
75 struct cpuidle_driver *drv,
76 int index);
77 };
78
79 /* Idle State Flags */
80 #define CPUIDLE_FLAG_NONE (0x00)
81 #define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
82 #define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
83 #define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
84 #define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
85 #define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
86 #define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */
87 #define CPUIDLE_FLAG_RCU_IDLE BIT(6) /* idle-state takes care of RCU */
88
89 struct cpuidle_device_kobj;
90 struct cpuidle_state_kobj;
91 struct cpuidle_driver_kobj;
92
93 struct cpuidle_device {
94 unsigned int registered:1;
95 unsigned int enabled:1;
96 unsigned int poll_time_limit:1;
97 unsigned int cpu;
98 ktime_t next_hrtimer;
99
100 int last_state_idx;
101 u64 last_residency_ns;
102 u64 poll_limit_ns;
103 u64 forced_idle_latency_limit_ns;
104 struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
105 struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
106 struct cpuidle_driver_kobj *kobj_driver;
107 struct cpuidle_device_kobj *kobj_dev;
108 struct list_head device_list;
109
110 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
111 cpumask_t coupled_cpus;
112 struct cpuidle_coupled *coupled;
113 #endif
114 };
115
116 DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
117 DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
118
ct_cpuidle_enter(void)119 static __always_inline void ct_cpuidle_enter(void)
120 {
121 lockdep_assert_irqs_disabled();
122 /*
123 * Idle is allowed to (temporary) enable IRQs. It
124 * will return with IRQs disabled.
125 *
126 * Trace IRQs enable here, then switch off RCU, and have
127 * arch_cpu_idle() use raw_local_irq_enable(). Note that
128 * ct_idle_enter() relies on lockdep IRQ state, so switch that
129 * last -- this is very similar to the entry code.
130 */
131 trace_hardirqs_on_prepare();
132 lockdep_hardirqs_on_prepare();
133 instrumentation_end();
134 ct_idle_enter();
135 lockdep_hardirqs_on(_RET_IP_);
136 }
137
ct_cpuidle_exit(void)138 static __always_inline void ct_cpuidle_exit(void)
139 {
140 /*
141 * Carefully undo the above.
142 */
143 lockdep_hardirqs_off(_RET_IP_);
144 ct_idle_exit();
145 instrumentation_begin();
146 }
147
148 /****************************
149 * CPUIDLE DRIVER INTERFACE *
150 ****************************/
151
152 struct cpuidle_driver {
153 const char *name;
154 struct module *owner;
155
156 /* used by the cpuidle framework to setup the broadcast timer */
157 unsigned int bctimer:1;
158 /* states array must be ordered in decreasing power consumption */
159 struct cpuidle_state states[CPUIDLE_STATE_MAX];
160 int state_count;
161 int safe_state_index;
162
163 /* the driver handles the cpus in cpumask */
164 struct cpumask *cpumask;
165
166 /* preferred governor to switch at register time */
167 const char *governor;
168 };
169
170 #ifdef CONFIG_CPU_IDLE
171 extern void disable_cpuidle(void);
172 extern bool cpuidle_not_available(struct cpuidle_driver *drv,
173 struct cpuidle_device *dev);
174
175 extern int cpuidle_select(struct cpuidle_driver *drv,
176 struct cpuidle_device *dev,
177 bool *stop_tick);
178 extern int cpuidle_enter(struct cpuidle_driver *drv,
179 struct cpuidle_device *dev, int index);
180 extern void cpuidle_reflect(struct cpuidle_device *dev, int index);
181 extern u64 cpuidle_poll_time(struct cpuidle_driver *drv,
182 struct cpuidle_device *dev);
183
184 extern int cpuidle_register_driver(struct cpuidle_driver *drv);
185 extern struct cpuidle_driver *cpuidle_get_driver(void);
186 extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
187 bool disable);
188 extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
189 extern int cpuidle_register_device(struct cpuidle_device *dev);
190 extern void cpuidle_unregister_device(struct cpuidle_device *dev);
191 extern void cpuidle_unregister_device_no_lock(struct cpuidle_device *dev);
192 extern int cpuidle_register(struct cpuidle_driver *drv,
193 const struct cpumask *const coupled_cpus);
194 extern void cpuidle_unregister(struct cpuidle_driver *drv);
195 extern void cpuidle_pause_and_lock(void);
196 extern void cpuidle_resume_and_unlock(void);
197 extern void cpuidle_pause(void);
198 extern void cpuidle_resume(void);
199 extern int cpuidle_enable_device(struct cpuidle_device *dev);
200 extern void cpuidle_disable_device(struct cpuidle_device *dev);
201 extern int cpuidle_play_dead(void);
202
203 extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
cpuidle_get_device(void)204 static inline struct cpuidle_device *cpuidle_get_device(void)
205 {return __this_cpu_read(cpuidle_devices); }
206 #else
disable_cpuidle(void)207 static inline void disable_cpuidle(void) { }
cpuidle_not_available(struct cpuidle_driver * drv,struct cpuidle_device * dev)208 static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
209 struct cpuidle_device *dev)
210 {return true; }
cpuidle_select(struct cpuidle_driver * drv,struct cpuidle_device * dev,bool * stop_tick)211 static inline int cpuidle_select(struct cpuidle_driver *drv,
212 struct cpuidle_device *dev, bool *stop_tick)
213 {return -ENODEV; }
cpuidle_enter(struct cpuidle_driver * drv,struct cpuidle_device * dev,int index)214 static inline int cpuidle_enter(struct cpuidle_driver *drv,
215 struct cpuidle_device *dev, int index)
216 {return -ENODEV; }
cpuidle_reflect(struct cpuidle_device * dev,int index)217 static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { }
cpuidle_poll_time(struct cpuidle_driver * drv,struct cpuidle_device * dev)218 static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv,
219 struct cpuidle_device *dev)
220 {return 0; }
cpuidle_register_driver(struct cpuidle_driver * drv)221 static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
222 {return -ENODEV; }
cpuidle_get_driver(void)223 static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
cpuidle_driver_state_disabled(struct cpuidle_driver * drv,int idx,bool disable)224 static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv,
225 int idx, bool disable) { }
cpuidle_unregister_driver(struct cpuidle_driver * drv)226 static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
cpuidle_register_device(struct cpuidle_device * dev)227 static inline int cpuidle_register_device(struct cpuidle_device *dev)
228 {return -ENODEV; }
cpuidle_unregister_device(struct cpuidle_device * dev)229 static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { }
cpuidle_unregister_device_no_lock(struct cpuidle_device * dev)230 static inline void cpuidle_unregister_device_no_lock(struct cpuidle_device *dev) {}
cpuidle_register(struct cpuidle_driver * drv,const struct cpumask * const coupled_cpus)231 static inline int cpuidle_register(struct cpuidle_driver *drv,
232 const struct cpumask *const coupled_cpus)
233 {return -ENODEV; }
cpuidle_unregister(struct cpuidle_driver * drv)234 static inline void cpuidle_unregister(struct cpuidle_driver *drv) { }
cpuidle_pause_and_lock(void)235 static inline void cpuidle_pause_and_lock(void) { }
cpuidle_resume_and_unlock(void)236 static inline void cpuidle_resume_and_unlock(void) { }
cpuidle_pause(void)237 static inline void cpuidle_pause(void) { }
cpuidle_resume(void)238 static inline void cpuidle_resume(void) { }
cpuidle_enable_device(struct cpuidle_device * dev)239 static inline int cpuidle_enable_device(struct cpuidle_device *dev)
240 {return -ENODEV; }
cpuidle_disable_device(struct cpuidle_device * dev)241 static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
cpuidle_play_dead(void)242 static inline int cpuidle_play_dead(void) {return -ENODEV; }
cpuidle_get_cpu_driver(struct cpuidle_device * dev)243 static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
244 struct cpuidle_device *dev) {return NULL; }
cpuidle_get_device(void)245 static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
246 #endif
247
248 #ifdef CONFIG_CPU_IDLE
249 extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
250 struct cpuidle_device *dev,
251 u64 latency_limit_ns);
252 extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
253 struct cpuidle_device *dev,
254 u64 latency_limit_ns);
255 extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
256 #else
cpuidle_find_deepest_state(struct cpuidle_driver * drv,struct cpuidle_device * dev,u64 latency_limit_ns)257 static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
258 struct cpuidle_device *dev,
259 u64 latency_limit_ns)
260 {return -ENODEV; }
cpuidle_enter_s2idle(struct cpuidle_driver * drv,struct cpuidle_device * dev,u64 latency_limit_ns)261 static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
262 struct cpuidle_device *dev,
263 u64 latency_limit_ns)
264 {return -ENODEV; }
cpuidle_use_deepest_state(u64 latency_limit_ns)265 static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
266 {
267 }
268 #endif
269
270 /* kernel/sched/idle.c */
271 extern void sched_idle_set_state(struct cpuidle_state *idle_state);
272 extern void default_idle_call(void);
273
274 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
275 void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
276 #else
cpuidle_coupled_parallel_barrier(struct cpuidle_device * dev,atomic_t * a)277 static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
278 {
279 }
280 #endif
281
282 #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX)
283 void cpuidle_poll_state_init(struct cpuidle_driver *drv);
284 #else
cpuidle_poll_state_init(struct cpuidle_driver * drv)285 static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {}
286 #endif
287
288 /******************************
289 * CPUIDLE GOVERNOR INTERFACE *
290 ******************************/
291
292 struct cpuidle_governor {
293 char name[CPUIDLE_NAME_LEN];
294 struct list_head governor_list;
295 unsigned int rating;
296
297 int (*enable) (struct cpuidle_driver *drv,
298 struct cpuidle_device *dev);
299 void (*disable) (struct cpuidle_driver *drv,
300 struct cpuidle_device *dev);
301
302 int (*select) (struct cpuidle_driver *drv,
303 struct cpuidle_device *dev,
304 bool *stop_tick);
305 void (*reflect) (struct cpuidle_device *dev, int index);
306 };
307
308 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
309 extern s64 cpuidle_governor_latency_req(unsigned int cpu);
310
311 #define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \
312 idx, \
313 state, \
314 is_retention, is_rcu) \
315 ({ \
316 int __ret = 0; \
317 \
318 if (!idx) { \
319 cpu_do_idle(); \
320 return idx; \
321 } \
322 \
323 if (!is_retention) \
324 __ret = cpu_pm_enter(); \
325 if (!__ret) { \
326 if (!is_rcu) \
327 ct_cpuidle_enter(); \
328 __ret = low_level_idle_enter(state); \
329 if (!is_rcu) \
330 ct_cpuidle_exit(); \
331 if (!is_retention) \
332 cpu_pm_exit(); \
333 } \
334 \
335 __ret ? -1 : idx; \
336 })
337
338 #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
339 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0, 0)
340
341 #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
342 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1, 0)
343
344 #define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \
345 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 0)
346
347 #define CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(low_level_idle_enter, idx, state) \
348 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 1)
349
350 #define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \
351 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 0)
352
353 #define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(low_level_idle_enter, idx, state) \
354 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 1)
355
356 #endif /* _LINUX_CPUIDLE_H */
357