1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ASMARM_TLS_H 3 #define __ASMARM_TLS_H 4 5 #include <linux/compiler.h> 6 #include <asm/thread_info.h> 7 8 #ifdef __ASSEMBLY__ 9 #include <asm/asm-offsets.h> 10 .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 11 .endm 12 13 .macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2 14 mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register 15 @ TLS register update is deferred until return to user space 16 mcr p15, 0, \tpuser, c13, c0, 2 @ set the user r/w register 17 str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it 18 .endm 19 20 .macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2 21 ldr_va \tmp1, elf_hwcap 22 mov \tmp2, #0xffff0fff 23 tst \tmp1, #HWCAP_TLS @ hardware TLS available? 24 streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 25 mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register 26 #ifndef CONFIG_SMP 27 mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register 28 #endif 29 mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register 30 strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it 31 .endm 32 33 .macro switch_tls_software, base, tp, tpuser, tmp1, tmp2 34 mov \tmp1, #0xffff0fff 35 str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0 36 .endm 37 #endif 38 39 #ifdef CONFIG_TLS_REG_EMUL 40 #define tls_emu 1 41 #define has_tls_reg 1 42 #define defer_tls_reg_update 0 43 #define switch_tls switch_tls_none 44 #elif defined(CONFIG_CPU_V6) 45 #define tls_emu 0 46 #define has_tls_reg (elf_hwcap & HWCAP_TLS) 47 #define defer_tls_reg_update IS_ENABLED(CONFIG_SMP) 48 #define switch_tls switch_tls_v6 49 #elif defined(CONFIG_CPU_32v6K) 50 #define tls_emu 0 51 #define has_tls_reg 1 52 #define defer_tls_reg_update 1 53 #define switch_tls switch_tls_v6k 54 #else 55 #define tls_emu 0 56 #define has_tls_reg 0 57 #define defer_tls_reg_update 0 58 #define switch_tls switch_tls_software 59 #endif 60 61 #ifndef __ASSEMBLY__ 62 63 static inline void set_tls(unsigned long val) 64 { 65 struct thread_info *thread; 66 67 thread = current_thread_info(); 68 69 thread->tp_value[0] = val; 70 71 /* 72 * This code runs with preemption enabled and therefore must 73 * be reentrant with respect to switch_tls. 74 * 75 * We need to ensure ordering between the shadow state and the 76 * hardware state, so that we don't corrupt the hardware state 77 * with a stale shadow state during context switch. 78 * 79 * If we're preempted here, switch_tls will load TPIDRURO from 80 * thread_info upon resuming execution and the following mcr 81 * is merely redundant. 82 */ 83 barrier(); 84 85 if (!tls_emu) { 86 if (has_tls_reg && !defer_tls_reg_update) { 87 asm("mcr p15, 0, %0, c13, c0, 3" 88 : : "r" (val)); 89 } else if (!has_tls_reg) { 90 #ifdef CONFIG_KUSER_HELPERS 91 /* 92 * User space must never try to access this 93 * directly. Expect your app to break 94 * eventually if you do so. The user helper 95 * at 0xffff0fe0 must be used instead. (see 96 * entry-armv.S for details) 97 */ 98 *((unsigned int *)0xffff0ff0) = val; 99 #endif 100 } 101 102 } 103 } 104 105 static inline unsigned long get_tpuser(void) 106 { 107 unsigned long reg = 0; 108 109 if (has_tls_reg && !tls_emu) 110 __asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg)); 111 112 return reg; 113 } 114 115 static inline void set_tpuser(unsigned long val) 116 { 117 /* Since TPIDRURW is fully context-switched (unlike TPIDRURO), 118 * we need not update thread_info. 119 */ 120 if (has_tls_reg && !tls_emu) { 121 asm("mcr p15, 0, %0, c13, c0, 2" 122 : : "r" (val)); 123 } 124 } 125 126 static inline void flush_tls(void) 127 { 128 set_tls(0); 129 set_tpuser(0); 130 } 131 132 #endif 133 #endif /* __ASMARM_TLS_H */ 134