1 /* 2 * Software MMU support (per-target) 3 * 4 * This library is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU Lesser General Public 6 * License as published by the Free Software Foundation; either 7 * version 2.1 of the License, or (at your option) any later version. 8 * 9 * This library is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * Lesser General Public License for more details. 13 * 14 * You should have received a copy of the GNU Lesser General Public 15 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 16 * 17 */ 18 19 /* 20 * Generate inline load/store functions for all MMU modes (typically 21 * at least _user and _kernel) as well as _data versions, for all data 22 * sizes. 23 * 24 * Used by target op helpers. 25 * 26 * The syntax for the accessors is: 27 * 28 * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr) 29 * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr) 30 * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr) 31 * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr) 32 * 33 * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val) 34 * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr) 35 * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr) 36 * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr) 37 * 38 * sign is: 39 * (empty): for 32 and 64 bit sizes 40 * u : unsigned 41 * s : signed 42 * 43 * size is: 44 * b: 8 bits 45 * w: 16 bits 46 * l: 32 bits 47 * q: 64 bits 48 * 49 * end is: 50 * (empty): for target native endian, or for 8 bit access 51 * _be: for forced big endian 52 * _le: for forced little endian 53 * 54 * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx". 55 * The "mmuidx" suffix carries an extra mmu_idx argument that specifies 56 * the index to use; the "data" and "code" suffixes take the index from 57 * cpu_mmu_index(). 58 * 59 * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the 60 * MemOp including alignment requirements. The alignment will be enforced. 61 */ 62 #ifndef CPU_LDST_H 63 #define CPU_LDST_H 64 65 #ifndef CONFIG_TCG 66 #error Can only include this header with TCG 67 #endif 68 69 #include "exec/cpu-ldst-common.h" 70 #include "accel/tcg/cpu-mmu-index.h" 71 #include "exec/abi_ptr.h" 72 73 #if defined(CONFIG_USER_ONLY) 74 #include "user/guest-host.h" 75 #endif /* CONFIG_USER_ONLY */ 76 77 uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr); 78 int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr); 79 uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr); 80 int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr); 81 uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr); 82 uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr); 83 uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr); 84 int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr); 85 uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr); 86 uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr); 87 88 uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 89 int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 90 uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 91 int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 92 uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 93 uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 94 uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 95 int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 96 uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 97 uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 98 99 void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 100 void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 101 void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 102 void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val); 103 void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 104 void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 105 void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val); 106 107 void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, 108 uint32_t val, uintptr_t ra); 109 void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr, 110 uint32_t val, uintptr_t ra); 111 void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr, 112 uint32_t val, uintptr_t ra); 113 void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr, 114 uint64_t val, uintptr_t ra); 115 void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr, 116 uint32_t val, uintptr_t ra); 117 void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, 118 uint32_t val, uintptr_t ra); 119 void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr, 120 uint64_t val, uintptr_t ra); 121 122 static inline uint32_t 123 cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) 124 { 125 MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); 126 return cpu_ldb_mmu(env, addr, oi, ra); 127 } 128 129 static inline int 130 cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) 131 { 132 return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra); 133 } 134 135 static inline uint32_t 136 cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 137 int mmu_idx, uintptr_t ra) 138 { 139 MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); 140 return cpu_ldw_mmu(env, addr, oi, ra); 141 } 142 143 static inline int 144 cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 145 int mmu_idx, uintptr_t ra) 146 { 147 return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra); 148 } 149 150 static inline uint32_t 151 cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 152 int mmu_idx, uintptr_t ra) 153 { 154 MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); 155 return cpu_ldl_mmu(env, addr, oi, ra); 156 } 157 158 static inline uint64_t 159 cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 160 int mmu_idx, uintptr_t ra) 161 { 162 MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); 163 return cpu_ldq_mmu(env, addr, oi, ra); 164 } 165 166 static inline uint32_t 167 cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 168 int mmu_idx, uintptr_t ra) 169 { 170 MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); 171 return cpu_ldw_mmu(env, addr, oi, ra); 172 } 173 174 static inline int 175 cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 176 int mmu_idx, uintptr_t ra) 177 { 178 return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra); 179 } 180 181 static inline uint32_t 182 cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 183 int mmu_idx, uintptr_t ra) 184 { 185 MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); 186 return cpu_ldl_mmu(env, addr, oi, ra); 187 } 188 189 static inline uint64_t 190 cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 191 int mmu_idx, uintptr_t ra) 192 { 193 MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); 194 return cpu_ldq_mmu(env, addr, oi, ra); 195 } 196 197 static inline void 198 cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 199 int mmu_idx, uintptr_t ra) 200 { 201 MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); 202 cpu_stb_mmu(env, addr, val, oi, ra); 203 } 204 205 static inline void 206 cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 207 int mmu_idx, uintptr_t ra) 208 { 209 MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); 210 cpu_stw_mmu(env, addr, val, oi, ra); 211 } 212 213 static inline void 214 cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 215 int mmu_idx, uintptr_t ra) 216 { 217 MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); 218 cpu_stl_mmu(env, addr, val, oi, ra); 219 } 220 221 static inline void 222 cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, 223 int mmu_idx, uintptr_t ra) 224 { 225 MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); 226 cpu_stq_mmu(env, addr, val, oi, ra); 227 } 228 229 static inline void 230 cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 231 int mmu_idx, uintptr_t ra) 232 { 233 MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); 234 cpu_stw_mmu(env, addr, val, oi, ra); 235 } 236 237 static inline void 238 cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 239 int mmu_idx, uintptr_t ra) 240 { 241 MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); 242 cpu_stl_mmu(env, addr, val, oi, ra); 243 } 244 245 static inline void 246 cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, 247 int mmu_idx, uintptr_t ra) 248 { 249 MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); 250 cpu_stq_mmu(env, addr, val, oi, ra); 251 } 252 253 #if TARGET_BIG_ENDIAN 254 # define cpu_lduw_data cpu_lduw_be_data 255 # define cpu_ldsw_data cpu_ldsw_be_data 256 # define cpu_ldl_data cpu_ldl_be_data 257 # define cpu_ldq_data cpu_ldq_be_data 258 # define cpu_lduw_data_ra cpu_lduw_be_data_ra 259 # define cpu_ldsw_data_ra cpu_ldsw_be_data_ra 260 # define cpu_ldl_data_ra cpu_ldl_be_data_ra 261 # define cpu_ldq_data_ra cpu_ldq_be_data_ra 262 # define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra 263 # define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra 264 # define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra 265 # define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra 266 # define cpu_stw_data cpu_stw_be_data 267 # define cpu_stl_data cpu_stl_be_data 268 # define cpu_stq_data cpu_stq_be_data 269 # define cpu_stw_data_ra cpu_stw_be_data_ra 270 # define cpu_stl_data_ra cpu_stl_be_data_ra 271 # define cpu_stq_data_ra cpu_stq_be_data_ra 272 # define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra 273 # define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra 274 # define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra 275 #else 276 # define cpu_lduw_data cpu_lduw_le_data 277 # define cpu_ldsw_data cpu_ldsw_le_data 278 # define cpu_ldl_data cpu_ldl_le_data 279 # define cpu_ldq_data cpu_ldq_le_data 280 # define cpu_lduw_data_ra cpu_lduw_le_data_ra 281 # define cpu_ldsw_data_ra cpu_ldsw_le_data_ra 282 # define cpu_ldl_data_ra cpu_ldl_le_data_ra 283 # define cpu_ldq_data_ra cpu_ldq_le_data_ra 284 # define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra 285 # define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra 286 # define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra 287 # define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra 288 # define cpu_stw_data cpu_stw_le_data 289 # define cpu_stl_data cpu_stl_le_data 290 # define cpu_stq_data cpu_stq_le_data 291 # define cpu_stw_data_ra cpu_stw_le_data_ra 292 # define cpu_stl_data_ra cpu_stl_le_data_ra 293 # define cpu_stq_data_ra cpu_stq_le_data_ra 294 # define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra 295 # define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra 296 # define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra 297 #endif 298 299 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr); 300 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr); 301 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr); 302 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr); 303 304 /** 305 * tlb_vaddr_to_host: 306 * @env: CPUArchState 307 * @addr: guest virtual address to look up 308 * @access_type: 0 for read, 1 for write, 2 for execute 309 * @mmu_idx: MMU index to use for lookup 310 * 311 * Look up the specified guest virtual index in the TCG softmmu TLB. 312 * If we can translate a host virtual address suitable for direct RAM 313 * access, without causing a guest exception, then return it. 314 * Otherwise (TLB entry is for an I/O access, guest software 315 * TLB fill required, etc) return NULL. 316 */ 317 #ifdef CONFIG_USER_ONLY 318 static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 319 MMUAccessType access_type, int mmu_idx) 320 { 321 return g2h(env_cpu(env), addr); 322 } 323 #else 324 void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr, 325 MMUAccessType access_type, int mmu_idx); 326 #endif 327 328 /* 329 * For user-only, helpers that use guest to host address translation 330 * must protect the actual host memory access by recording 'retaddr' 331 * for the signal handler. This is required for a race condition in 332 * which another thread unmaps the page between a probe and the 333 * actual access. 334 */ 335 #ifdef CONFIG_USER_ONLY 336 extern __thread uintptr_t helper_retaddr; 337 338 static inline void set_helper_retaddr(uintptr_t ra) 339 { 340 helper_retaddr = ra; 341 /* 342 * Ensure that this write is visible to the SIGSEGV handler that 343 * may be invoked due to a subsequent invalid memory operation. 344 */ 345 signal_barrier(); 346 } 347 348 static inline void clear_helper_retaddr(void) 349 { 350 /* 351 * Ensure that previous memory operations have succeeded before 352 * removing the data visible to the signal handler. 353 */ 354 signal_barrier(); 355 helper_retaddr = 0; 356 } 357 #else 358 #define set_helper_retaddr(ra) do { } while (0) 359 #define clear_helper_retaddr() do { } while (0) 360 #endif 361 362 #endif /* CPU_LDST_H */ 363