1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_JUMP_LABEL_H
3 #define _LINUX_JUMP_LABEL_H
4
5 /*
6 * Jump label support
7 *
8 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10 *
11 * DEPRECATED API:
12 *
13 * The use of 'struct static_key' directly, is now DEPRECATED. In addition
14 * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
15 *
16 * struct static_key false = STATIC_KEY_INIT_FALSE;
17 * struct static_key true = STATIC_KEY_INIT_TRUE;
18 * static_key_true()
19 * static_key_false()
20 *
21 * The updated API replacements are:
22 *
23 * DEFINE_STATIC_KEY_TRUE(key);
24 * DEFINE_STATIC_KEY_FALSE(key);
25 * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
26 * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
27 * static_branch_likely()
28 * static_branch_unlikely()
29 *
30 * Jump labels provide an interface to generate dynamic branches using
31 * self-modifying code. Assuming toolchain and architecture support, if we
32 * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
33 * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
34 * (which defaults to false - and the true block is placed out of line).
35 * Similarly, we can define an initially true key via
36 * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
37 * "if (static_branch_unlikely(&key))", in which case we will generate an
38 * unconditional branch to the out-of-line true branch. Keys that are
39 * initially true or false can be using in both static_branch_unlikely()
40 * and static_branch_likely() statements.
41 *
42 * At runtime we can change the branch target by setting the key
43 * to true via a call to static_branch_enable(), or false using
44 * static_branch_disable(). If the direction of the branch is switched by
45 * these calls then we run-time modify the branch target via a
46 * no-op -> jump or jump -> no-op conversion. For example, for an
47 * initially false key that is used in an "if (static_branch_unlikely(&key))"
48 * statement, setting the key to true requires us to patch in a jump
49 * to the out-of-line of true branch.
50 *
51 * In addition to static_branch_{enable,disable}, we can also reference count
52 * the key or branch direction via static_branch_{inc,dec}. Thus,
53 * static_branch_inc() can be thought of as a 'make more true' and
54 * static_branch_dec() as a 'make more false'.
55 *
56 * Since this relies on modifying code, the branch modifying functions
57 * must be considered absolute slow paths (machine wide synchronization etc.).
58 * OTOH, since the affected branches are unconditional, their runtime overhead
59 * will be absolutely minimal, esp. in the default (off) case where the total
60 * effect is a single NOP of appropriate size. The on case will patch in a jump
61 * to the out-of-line block.
62 *
63 * When the control is directly exposed to userspace, it is prudent to delay the
64 * decrement to avoid high frequency code modifications which can (and do)
65 * cause significant performance degradation. Struct static_key_deferred and
66 * static_key_slow_dec_deferred() provide for this.
67 *
68 * Lacking toolchain and or architecture support, static keys fall back to a
69 * simple conditional branch.
70 *
71 * Additional babbling in: Documentation/staging/static-keys.rst
72 */
73
74 #ifndef __ASSEMBLY__
75
76 #include <linux/types.h>
77 #include <linux/compiler.h>
78 #include <linux/cleanup.h>
79
80 extern bool static_key_initialized;
81
82 #define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \
83 "%s(): static key '%pS' used before call to jump_label_init()", \
84 __func__, (key))
85
86 struct static_key {
87 atomic_t enabled;
88 #ifdef CONFIG_JUMP_LABEL
89 /*
90 * bit 0 => 1 if key is initially true
91 * 0 if initially false
92 * bit 1 => 1 if points to struct static_key_mod
93 * 0 if points to struct jump_entry
94 */
95 union {
96 unsigned long type;
97 struct jump_entry *entries;
98 struct static_key_mod *next;
99 };
100 #endif /* CONFIG_JUMP_LABEL */
101 };
102
103 #endif /* __ASSEMBLY__ */
104
105 #ifdef CONFIG_JUMP_LABEL
106 #include <asm/jump_label.h>
107
108 #ifndef __ASSEMBLY__
109 #ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
110
111 struct jump_entry {
112 s32 code;
113 s32 target;
114 long key; // key may be far away from the core kernel under KASLR
115 };
116
jump_entry_code(const struct jump_entry * entry)117 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
118 {
119 return (unsigned long)&entry->code + entry->code;
120 }
121
jump_entry_target(const struct jump_entry * entry)122 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
123 {
124 return (unsigned long)&entry->target + entry->target;
125 }
126
jump_entry_key(const struct jump_entry * entry)127 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
128 {
129 long offset = entry->key & ~3L;
130
131 return (struct static_key *)((unsigned long)&entry->key + offset);
132 }
133
134 #else
135
jump_entry_code(const struct jump_entry * entry)136 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
137 {
138 return entry->code;
139 }
140
jump_entry_target(const struct jump_entry * entry)141 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
142 {
143 return entry->target;
144 }
145
jump_entry_key(const struct jump_entry * entry)146 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
147 {
148 return (struct static_key *)((unsigned long)entry->key & ~3UL);
149 }
150
151 #endif
152
jump_entry_is_branch(const struct jump_entry * entry)153 static inline bool jump_entry_is_branch(const struct jump_entry *entry)
154 {
155 return (unsigned long)entry->key & 1UL;
156 }
157
jump_entry_is_init(const struct jump_entry * entry)158 static inline bool jump_entry_is_init(const struct jump_entry *entry)
159 {
160 return (unsigned long)entry->key & 2UL;
161 }
162
jump_entry_set_init(struct jump_entry * entry,bool set)163 static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
164 {
165 if (set)
166 entry->key |= 2;
167 else
168 entry->key &= ~2;
169 }
170
jump_entry_size(struct jump_entry * entry)171 static inline int jump_entry_size(struct jump_entry *entry)
172 {
173 #ifdef JUMP_LABEL_NOP_SIZE
174 return JUMP_LABEL_NOP_SIZE;
175 #else
176 return arch_jump_entry_size(entry);
177 #endif
178 }
179
180 #endif
181 #endif
182
183 #ifndef __ASSEMBLY__
184
185 enum jump_label_type {
186 JUMP_LABEL_NOP = 0,
187 JUMP_LABEL_JMP,
188 };
189
190 struct module;
191
192 #ifdef CONFIG_JUMP_LABEL
193
194 #define JUMP_TYPE_FALSE 0UL
195 #define JUMP_TYPE_TRUE 1UL
196 #define JUMP_TYPE_LINKED 2UL
197 #define JUMP_TYPE_MASK 3UL
198
static_key_false(struct static_key * key)199 static __always_inline bool static_key_false(struct static_key *key)
200 {
201 return arch_static_branch(key, false);
202 }
203
static_key_true(struct static_key * key)204 static __always_inline bool static_key_true(struct static_key *key)
205 {
206 return !arch_static_branch(key, true);
207 }
208
209 extern struct jump_entry __start___jump_table[];
210 extern struct jump_entry __stop___jump_table[];
211
212 extern void jump_label_init(void);
213 extern void jump_label_init_ro(void);
214 extern void jump_label_lock(void);
215 extern void jump_label_unlock(void);
216 extern void arch_jump_label_transform(struct jump_entry *entry,
217 enum jump_label_type type);
218 extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
219 enum jump_label_type type);
220 extern void arch_jump_label_transform_apply(void);
221 extern int jump_label_text_reserved(void *start, void *end);
222 extern bool static_key_slow_inc(struct static_key *key);
223 extern bool static_key_fast_inc_not_disabled(struct static_key *key);
224 extern void static_key_slow_dec(struct static_key *key);
225 extern bool static_key_slow_inc_cpuslocked(struct static_key *key);
226 extern void static_key_slow_dec_cpuslocked(struct static_key *key);
227 extern int static_key_count(struct static_key *key);
228 extern void static_key_enable(struct static_key *key);
229 extern void static_key_disable(struct static_key *key);
230 extern void static_key_enable_cpuslocked(struct static_key *key);
231 extern void static_key_disable_cpuslocked(struct static_key *key);
232 extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
233
234 #define STATIC_KEY_INIT_TRUE \
235 { .enabled = ATOMIC_INIT(1), \
236 .type = JUMP_TYPE_TRUE }
237 #define STATIC_KEY_INIT_FALSE \
238 { .enabled = ATOMIC_INIT(0), \
239 .type = JUMP_TYPE_FALSE }
240
241 #else /* !CONFIG_JUMP_LABEL */
242
243 #include <linux/atomic.h>
244 #include <linux/bug.h>
245
static_key_count(struct static_key * key)246 static __always_inline int static_key_count(struct static_key *key)
247 {
248 return raw_atomic_read(&key->enabled);
249 }
250
jump_label_init(void)251 static __always_inline void jump_label_init(void)
252 {
253 static_key_initialized = true;
254 }
255
jump_label_init_ro(void)256 static __always_inline void jump_label_init_ro(void) { }
257
static_key_false(struct static_key * key)258 static __always_inline bool static_key_false(struct static_key *key)
259 {
260 if (unlikely_notrace(static_key_count(key) > 0))
261 return true;
262 return false;
263 }
264
static_key_true(struct static_key * key)265 static __always_inline bool static_key_true(struct static_key *key)
266 {
267 if (likely_notrace(static_key_count(key) > 0))
268 return true;
269 return false;
270 }
271
static_key_fast_inc_not_disabled(struct static_key * key)272 static inline bool static_key_fast_inc_not_disabled(struct static_key *key)
273 {
274 int v;
275
276 STATIC_KEY_CHECK_USE(key);
277 /*
278 * Prevent key->enabled getting negative to follow the same semantics
279 * as for CONFIG_JUMP_LABEL=y, see kernel/jump_label.c comment.
280 */
281 v = atomic_read(&key->enabled);
282 do {
283 if (v < 0 || (v + 1) < 0)
284 return false;
285 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
286 return true;
287 }
288 #define static_key_slow_inc(key) static_key_fast_inc_not_disabled(key)
289
static_key_slow_dec(struct static_key * key)290 static inline void static_key_slow_dec(struct static_key *key)
291 {
292 STATIC_KEY_CHECK_USE(key);
293 atomic_dec(&key->enabled);
294 }
295
296 #define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
297 #define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
298
jump_label_text_reserved(void * start,void * end)299 static inline int jump_label_text_reserved(void *start, void *end)
300 {
301 return 0;
302 }
303
jump_label_lock(void)304 static inline void jump_label_lock(void) {}
jump_label_unlock(void)305 static inline void jump_label_unlock(void) {}
306
static_key_enable(struct static_key * key)307 static inline void static_key_enable(struct static_key *key)
308 {
309 STATIC_KEY_CHECK_USE(key);
310
311 if (atomic_read(&key->enabled) != 0) {
312 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
313 return;
314 }
315 atomic_set(&key->enabled, 1);
316 }
317
static_key_disable(struct static_key * key)318 static inline void static_key_disable(struct static_key *key)
319 {
320 STATIC_KEY_CHECK_USE(key);
321
322 if (atomic_read(&key->enabled) != 1) {
323 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
324 return;
325 }
326 atomic_set(&key->enabled, 0);
327 }
328
329 #define static_key_enable_cpuslocked(k) static_key_enable((k))
330 #define static_key_disable_cpuslocked(k) static_key_disable((k))
331
332 #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
333 #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
334
335 #endif /* CONFIG_JUMP_LABEL */
336
337 DEFINE_LOCK_GUARD_0(jump_label_lock, jump_label_lock(), jump_label_unlock())
338
339 #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
340 #define jump_label_enabled static_key_enabled
341
342 /* -------------------------------------------------------------------------- */
343
344 /*
345 * Two type wrappers around static_key, such that we can use compile time
346 * type differentiation to emit the right code.
347 *
348 * All the below code is macros in order to play type games.
349 */
350
351 struct static_key_true {
352 struct static_key key;
353 };
354
355 struct static_key_false {
356 struct static_key key;
357 };
358
359 #define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
360 #define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
361
362 #define DEFINE_STATIC_KEY_TRUE(name) \
363 struct static_key_true name = STATIC_KEY_TRUE_INIT
364
365 #define DEFINE_STATIC_KEY_TRUE_RO(name) \
366 struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
367
368 #define DECLARE_STATIC_KEY_TRUE(name) \
369 extern struct static_key_true name
370
371 #define DEFINE_STATIC_KEY_FALSE(name) \
372 struct static_key_false name = STATIC_KEY_FALSE_INIT
373
374 #define DEFINE_STATIC_KEY_FALSE_RO(name) \
375 struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
376
377 #define DECLARE_STATIC_KEY_FALSE(name) \
378 extern struct static_key_false name
379
380 #define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \
381 struct static_key_true name[count] = { \
382 [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \
383 }
384
385 #define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \
386 struct static_key_false name[count] = { \
387 [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
388 }
389
390 #define _DEFINE_STATIC_KEY_1(name) DEFINE_STATIC_KEY_TRUE(name)
391 #define _DEFINE_STATIC_KEY_0(name) DEFINE_STATIC_KEY_FALSE(name)
392 #define DEFINE_STATIC_KEY_MAYBE(cfg, name) \
393 __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name)
394
395 #define _DEFINE_STATIC_KEY_RO_1(name) DEFINE_STATIC_KEY_TRUE_RO(name)
396 #define _DEFINE_STATIC_KEY_RO_0(name) DEFINE_STATIC_KEY_FALSE_RO(name)
397 #define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name) \
398 __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name)
399
400 #define _DECLARE_STATIC_KEY_1(name) DECLARE_STATIC_KEY_TRUE(name)
401 #define _DECLARE_STATIC_KEY_0(name) DECLARE_STATIC_KEY_FALSE(name)
402 #define DECLARE_STATIC_KEY_MAYBE(cfg, name) \
403 __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name)
404
405 extern bool ____wrong_branch_error(void);
406
407 #define static_key_enabled(x) \
408 ({ \
409 if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \
410 !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\
411 !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
412 ____wrong_branch_error(); \
413 static_key_count((struct static_key *)x) > 0; \
414 })
415
416 #ifdef CONFIG_JUMP_LABEL
417
418 /*
419 * Combine the right initial value (type) with the right branch order
420 * to generate the desired result.
421 *
422 *
423 * type\branch| likely (1) | unlikely (0)
424 * -----------+-----------------------+------------------
425 * | |
426 * true (1) | ... | ...
427 * | NOP | JMP L
428 * | <br-stmts> | 1: ...
429 * | L: ... |
430 * | |
431 * | | L: <br-stmts>
432 * | | jmp 1b
433 * | |
434 * -----------+-----------------------+------------------
435 * | |
436 * false (0) | ... | ...
437 * | JMP L | NOP
438 * | <br-stmts> | 1: ...
439 * | L: ... |
440 * | |
441 * | | L: <br-stmts>
442 * | | jmp 1b
443 * | |
444 * -----------+-----------------------+------------------
445 *
446 * The initial value is encoded in the LSB of static_key::entries,
447 * type: 0 = false, 1 = true.
448 *
449 * The branch type is encoded in the LSB of jump_entry::key,
450 * branch: 0 = unlikely, 1 = likely.
451 *
452 * This gives the following logic table:
453 *
454 * enabled type branch instuction
455 * -----------------------------+-----------
456 * 0 0 0 | NOP
457 * 0 0 1 | JMP
458 * 0 1 0 | NOP
459 * 0 1 1 | JMP
460 *
461 * 1 0 0 | JMP
462 * 1 0 1 | NOP
463 * 1 1 0 | JMP
464 * 1 1 1 | NOP
465 *
466 * Which gives the following functions:
467 *
468 * dynamic: instruction = enabled ^ branch
469 * static: instruction = type ^ branch
470 *
471 * See jump_label_type() / jump_label_init_type().
472 */
473
474 #define static_branch_likely(x) \
475 ({ \
476 bool branch; \
477 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
478 branch = !arch_static_branch(&(x)->key, true); \
479 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
480 branch = !arch_static_branch_jump(&(x)->key, true); \
481 else \
482 branch = ____wrong_branch_error(); \
483 likely_notrace(branch); \
484 })
485
486 #define static_branch_unlikely(x) \
487 ({ \
488 bool branch; \
489 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
490 branch = arch_static_branch_jump(&(x)->key, false); \
491 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
492 branch = arch_static_branch(&(x)->key, false); \
493 else \
494 branch = ____wrong_branch_error(); \
495 unlikely_notrace(branch); \
496 })
497
498 #else /* !CONFIG_JUMP_LABEL */
499
500 #define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key))
501 #define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key))
502
503 #endif /* CONFIG_JUMP_LABEL */
504
505 #define static_branch_maybe(config, x) \
506 (IS_ENABLED(config) ? static_branch_likely(x) \
507 : static_branch_unlikely(x))
508
509 /*
510 * Advanced usage; refcount, branch is enabled when: count != 0
511 */
512
513 #define static_branch_inc(x) static_key_slow_inc(&(x)->key)
514 #define static_branch_dec(x) static_key_slow_dec(&(x)->key)
515 #define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
516 #define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
517
518 /*
519 * Normal usage; boolean enable/disable.
520 */
521
522 #define static_branch_enable(x) static_key_enable(&(x)->key)
523 #define static_branch_disable(x) static_key_disable(&(x)->key)
524 #define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
525 #define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
526
527 #endif /* __ASSEMBLY__ */
528
529 #endif /* _LINUX_JUMP_LABEL_H */
530