Lines Matching +full:key +full:- +full:code
1 // SPDX-License-Identifier: GPL-2.0-only
41 * Entrires are sorted by key. in jump_label_cmp()
44 return -1; in jump_label_cmp()
50 * In the batching mode, entries should also be sorted by the code in jump_label_cmp()
55 return -1; in jump_label_cmp()
65 long delta = (unsigned long)a - (unsigned long)b; in jump_label_swap()
70 jea->code = jeb->code - delta; in jump_label_swap()
71 jea->target = jeb->target - delta; in jump_label_swap()
72 jea->key = jeb->key - delta; in jump_label_swap()
74 jeb->code = tmp.code + delta; in jump_label_swap()
75 jeb->target = tmp.target + delta; in jump_label_swap()
76 jeb->key = tmp.key + delta; in jump_label_swap()
88 size = (((unsigned long)stop - (unsigned long)start) in jump_label_sort_entries()
93 static void jump_label_update(struct static_key *key);
104 int static_key_count(struct static_key *key) in static_key_count() argument
107 * -1 means the first static_key_slow_inc() is in progress. in static_key_count()
110 int n = atomic_read(&key->enabled); in static_key_count()
116 void static_key_slow_inc_cpuslocked(struct static_key *key) in static_key_slow_inc_cpuslocked() argument
120 STATIC_KEY_CHECK_USE(key); in static_key_slow_inc_cpuslocked()
128 * static_key_enabled(&key) for jumps to be updated properly. in static_key_slow_inc_cpuslocked()
130 * So give a special meaning to negative key->enabled: it sends in static_key_slow_inc_cpuslocked()
131 * static_key_slow_inc() down the slow path, and it is non-zero in static_key_slow_inc_cpuslocked()
135 for (v = atomic_read(&key->enabled); v > 0; v = v1) { in static_key_slow_inc_cpuslocked()
136 v1 = atomic_cmpxchg(&key->enabled, v, v + 1); in static_key_slow_inc_cpuslocked()
142 if (atomic_read(&key->enabled) == 0) { in static_key_slow_inc_cpuslocked()
143 atomic_set(&key->enabled, -1); in static_key_slow_inc_cpuslocked()
144 jump_label_update(key); in static_key_slow_inc_cpuslocked()
149 atomic_set_release(&key->enabled, 1); in static_key_slow_inc_cpuslocked()
151 atomic_inc(&key->enabled); in static_key_slow_inc_cpuslocked()
156 void static_key_slow_inc(struct static_key *key) in static_key_slow_inc() argument
159 static_key_slow_inc_cpuslocked(key); in static_key_slow_inc()
164 void static_key_enable_cpuslocked(struct static_key *key) in static_key_enable_cpuslocked() argument
166 STATIC_KEY_CHECK_USE(key); in static_key_enable_cpuslocked()
169 if (atomic_read(&key->enabled) > 0) { in static_key_enable_cpuslocked()
170 WARN_ON_ONCE(atomic_read(&key->enabled) != 1); in static_key_enable_cpuslocked()
175 if (atomic_read(&key->enabled) == 0) { in static_key_enable_cpuslocked()
176 atomic_set(&key->enabled, -1); in static_key_enable_cpuslocked()
177 jump_label_update(key); in static_key_enable_cpuslocked()
181 atomic_set_release(&key->enabled, 1); in static_key_enable_cpuslocked()
187 void static_key_enable(struct static_key *key) in static_key_enable() argument
190 static_key_enable_cpuslocked(key); in static_key_enable()
195 void static_key_disable_cpuslocked(struct static_key *key) in static_key_disable_cpuslocked() argument
197 STATIC_KEY_CHECK_USE(key); in static_key_disable_cpuslocked()
200 if (atomic_read(&key->enabled) != 1) { in static_key_disable_cpuslocked()
201 WARN_ON_ONCE(atomic_read(&key->enabled) != 0); in static_key_disable_cpuslocked()
206 if (atomic_cmpxchg(&key->enabled, 1, 0)) in static_key_disable_cpuslocked()
207 jump_label_update(key); in static_key_disable_cpuslocked()
212 void static_key_disable(struct static_key *key) in static_key_disable() argument
215 static_key_disable_cpuslocked(key); in static_key_disable()
220 static bool static_key_slow_try_dec(struct static_key *key) in static_key_slow_try_dec() argument
224 val = atomic_fetch_add_unless(&key->enabled, -1, 1); in static_key_slow_try_dec()
230 * key->enabled is in use by static_key_slow_inc(); a in static_key_slow_try_dec()
239 static void __static_key_slow_dec_cpuslocked(struct static_key *key) in __static_key_slow_dec_cpuslocked() argument
243 if (static_key_slow_try_dec(key)) in __static_key_slow_dec_cpuslocked()
247 if (atomic_dec_and_test(&key->enabled)) in __static_key_slow_dec_cpuslocked()
248 jump_label_update(key); in __static_key_slow_dec_cpuslocked()
252 static void __static_key_slow_dec(struct static_key *key) in __static_key_slow_dec() argument
255 __static_key_slow_dec_cpuslocked(key); in __static_key_slow_dec()
261 struct static_key_deferred *key = in jump_label_update_timeout() local
263 __static_key_slow_dec(&key->key); in jump_label_update_timeout()
267 void static_key_slow_dec(struct static_key *key) in static_key_slow_dec() argument
269 STATIC_KEY_CHECK_USE(key); in static_key_slow_dec()
270 __static_key_slow_dec(key); in static_key_slow_dec()
274 void static_key_slow_dec_cpuslocked(struct static_key *key) in static_key_slow_dec_cpuslocked() argument
276 STATIC_KEY_CHECK_USE(key); in static_key_slow_dec_cpuslocked()
277 __static_key_slow_dec_cpuslocked(key); in static_key_slow_dec_cpuslocked()
280 void __static_key_slow_dec_deferred(struct static_key *key, in __static_key_slow_dec_deferred() argument
284 STATIC_KEY_CHECK_USE(key); in __static_key_slow_dec_deferred()
286 if (static_key_slow_try_dec(key)) in __static_key_slow_dec_deferred()
293 void __static_key_deferred_flush(void *key, struct delayed_work *work) in __static_key_deferred_flush() argument
295 STATIC_KEY_CHECK_USE(key); in __static_key_deferred_flush()
300 void jump_label_rate_limit(struct static_key_deferred *key, in jump_label_rate_limit() argument
303 STATIC_KEY_CHECK_USE(key); in jump_label_rate_limit()
304 key->timeout = rl; in jump_label_rate_limit()
305 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); in jump_label_rate_limit()
334 * Update code which is definitely not currently executing.
336 * running code can override this to make the non-live update case
345 static inline struct jump_entry *static_key_entries(struct static_key *key) in static_key_entries() argument
347 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED); in static_key_entries()
348 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK); in static_key_entries()
351 static inline bool static_key_type(struct static_key *key) in static_key_type() argument
353 return key->type & JUMP_TYPE_TRUE; in static_key_type()
356 static inline bool static_key_linked(struct static_key *key) in static_key_linked() argument
358 return key->type & JUMP_TYPE_LINKED; in static_key_linked()
361 static inline void static_key_clear_linked(struct static_key *key) in static_key_clear_linked() argument
363 key->type &= ~JUMP_TYPE_LINKED; in static_key_clear_linked()
366 static inline void static_key_set_linked(struct static_key *key) in static_key_set_linked() argument
368 key->type |= JUMP_TYPE_LINKED; in static_key_set_linked()
380 static void static_key_set_entries(struct static_key *key, in static_key_set_entries() argument
386 type = key->type & JUMP_TYPE_MASK; in static_key_set_entries()
387 key->entries = entries; in static_key_set_entries()
388 key->type |= type; in static_key_set_entries()
393 struct static_key *key = jump_entry_key(entry); in jump_label_type() local
394 bool enabled = static_key_enabled(key); in jump_label_type()
404 * Cannot update code that was in an init text area. in jump_label_can_update()
420 static void __jump_label_update(struct static_key *key, in __jump_label_update() argument
425 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { in __jump_label_update()
431 static void __jump_label_update(struct static_key *key, in __jump_label_update() argument
436 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { in __jump_label_update()
457 struct static_key *key = NULL; in jump_label_init() local
487 if (iterk == key) in jump_label_init()
490 key = iterk; in jump_label_init()
491 static_key_set_entries(key, iter); in jump_label_init()
502 struct static_key *key = jump_entry_key(entry); in jump_label_init_type() local
503 bool type = static_key_type(key); in jump_label_init_type()
516 static inline struct static_key_mod *static_key_mod(struct static_key *key) in static_key_mod() argument
518 WARN_ON_ONCE(!static_key_linked(key)); in static_key_mod()
519 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); in static_key_mod()
523 * key->type and key->next are the same via union.
524 * This sets key->next and preserves the type bits.
528 static void static_key_set_mod(struct static_key *key, in static_key_set_mod() argument
534 type = key->type & JUMP_TYPE_MASK; in static_key_set_mod()
535 key->next = mod; in static_key_set_mod()
536 key->type |= type; in static_key_set_mod()
554 ret = __jump_label_text_reserved(mod->jump_entries, in __jump_label_mod_text_reserved()
555 mod->jump_entries + mod->num_jump_entries, in __jump_label_mod_text_reserved()
563 static void __jump_label_mod_update(struct static_key *key) in __jump_label_mod_update() argument
567 for (mod = static_key_mod(key); mod; mod = mod->next) { in __jump_label_mod_update()
575 if (!mod->entries) in __jump_label_mod_update()
578 m = mod->mod; in __jump_label_mod_update()
582 stop = m->jump_entries + m->num_jump_entries; in __jump_label_mod_update()
583 __jump_label_update(key, mod->entries, stop, in __jump_label_mod_update()
584 m && m->state == MODULE_STATE_COMING); in __jump_label_mod_update()
589 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
592 * Allow for run-time selection of the optimal nops. Before the module
594 * the arch specific jump label code.
598 struct jump_entry *iter_start = mod->jump_entries; in jump_label_apply_nops()
599 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; in jump_label_apply_nops()
615 struct jump_entry *iter_start = mod->jump_entries; in jump_label_add_module()
616 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; in jump_label_add_module()
618 struct static_key *key = NULL; in jump_label_add_module() local
634 if (iterk == key) in jump_label_add_module()
637 key = iterk; in jump_label_add_module()
638 if (within_module((unsigned long)key, mod)) { in jump_label_add_module()
639 static_key_set_entries(key, iter); in jump_label_add_module()
644 return -ENOMEM; in jump_label_add_module()
645 if (!static_key_linked(key)) { in jump_label_add_module()
650 return -ENOMEM; in jump_label_add_module()
653 jlm2->mod = __module_address((unsigned long)key); in jump_label_add_module()
655 jlm2->entries = static_key_entries(key); in jump_label_add_module()
656 jlm2->next = NULL; in jump_label_add_module()
657 static_key_set_mod(key, jlm2); in jump_label_add_module()
658 static_key_set_linked(key); in jump_label_add_module()
660 jlm->mod = mod; in jump_label_add_module()
661 jlm->entries = iter; in jump_label_add_module()
662 jlm->next = static_key_mod(key); in jump_label_add_module()
663 static_key_set_mod(key, jlm); in jump_label_add_module()
664 static_key_set_linked(key); in jump_label_add_module()
668 __jump_label_update(key, iter, iter_stop, true); in jump_label_add_module()
676 struct jump_entry *iter_start = mod->jump_entries; in jump_label_del_module()
677 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; in jump_label_del_module()
679 struct static_key *key = NULL; in jump_label_del_module() local
683 if (jump_entry_key(iter) == key) in jump_label_del_module()
686 key = jump_entry_key(iter); in jump_label_del_module()
688 if (within_module((unsigned long)key, mod)) in jump_label_del_module()
692 if (WARN_ON(!static_key_linked(key))) in jump_label_del_module()
695 prev = &key->next; in jump_label_del_module()
696 jlm = static_key_mod(key); in jump_label_del_module()
698 while (jlm && jlm->mod != mod) { in jump_label_del_module()
699 prev = &jlm->next; in jump_label_del_module()
700 jlm = jlm->next; in jump_label_del_module()
707 if (prev == &key->next) in jump_label_del_module()
708 static_key_set_mod(key, jlm->next); in jump_label_del_module()
710 *prev = jlm->next; in jump_label_del_module()
714 jlm = static_key_mod(key); in jump_label_del_module()
716 if (jlm->next == NULL) { in jump_label_del_module()
717 static_key_set_entries(key, jlm->entries); in jump_label_del_module()
718 static_key_clear_linked(key); in jump_label_del_module()
767 * jump_label_text_reserved - check if addr range is reserved
772 * overlaps with any of the jump label patch addresses. Code
793 static void jump_label_update(struct static_key *key) in jump_label_update() argument
800 if (static_key_linked(key)) { in jump_label_update()
801 __jump_label_mod_update(key); in jump_label_update()
806 mod = __module_address((unsigned long)key); in jump_label_update()
808 stop = mod->jump_entries + mod->num_jump_entries; in jump_label_update()
811 entry = static_key_entries(key); in jump_label_update()
814 __jump_label_update(key, entry, stop, in jump_label_update()
827 WARN_ON(static_key_enabled(&sk_true.key) != true); in jump_label_test()
828 WARN_ON(static_key_enabled(&sk_false.key) != false); in jump_label_test()
838 WARN_ON(static_key_enabled(&sk_true.key) == true); in jump_label_test()
839 WARN_ON(static_key_enabled(&sk_false.key) == false); in jump_label_test()