1 /* 2 * ARM translation: M-profile NOCP special-case instructions 3 * 4 * Copyright (c) 2020 Linaro, Ltd. 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "tcg/tcg-op.h" 22 #include "tcg/tcg-op-gvec.h" 23 #include "translate.h" 24 #include "translate-a32.h" 25 26 #include "decode-m-nocp.c.inc" 27 28 /* 29 * Decode VLLDM and VLSTM are nonstandard because: 30 * * if there is no FPU then these insns must NOP in 31 * Secure state and UNDEF in Nonsecure state 32 * * if there is an FPU then these insns do not have 33 * the usual behaviour that vfp_access_check() provides of 34 * being controlled by CPACR/NSACR enable bits or the 35 * lazy-stacking logic. 36 */ 37 static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a) 38 { 39 TCGv_i32 fptr; 40 41 if (!arm_dc_feature(s, ARM_FEATURE_M) || 42 !arm_dc_feature(s, ARM_FEATURE_V8)) { 43 return false; 44 } 45 46 if (a->op) { 47 /* 48 * T2 encoding ({D0-D31} reglist): v8.1M and up. We choose not 49 * to take the IMPDEF option to make memory accesses to the stack 50 * slots that correspond to the D16-D31 registers (discarding 51 * read data and writing UNKNOWN values), so for us the T2 52 * encoding behaves identically to the T1 encoding. 53 */ 54 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { 55 return false; 56 } 57 } else { 58 /* 59 * T1 encoding ({D0-D15} reglist); undef if we have 32 Dregs. 60 * This is currently architecturally impossible, but we add the 61 * check to stay in line with the pseudocode. Note that we must 62 * emit code for the UNDEF so it takes precedence over the NOCP. 63 */ 64 if (dc_isar_feature(aa32_simd_r32, s)) { 65 unallocated_encoding(s); 66 return true; 67 } 68 } 69 70 /* 71 * If not secure, UNDEF. We must emit code for this 72 * rather than returning false so that this takes 73 * precedence over the m-nocp.decode NOCP fallback. 74 */ 75 if (!s->v8m_secure) { 76 unallocated_encoding(s); 77 return true; 78 } 79 80 s->eci_handled = true; 81 82 /* If no fpu, NOP. */ 83 if (!dc_isar_feature(aa32_vfp, s)) { 84 clear_eci_state(s); 85 return true; 86 } 87 88 fptr = load_reg(s, a->rn); 89 if (a->l) { 90 gen_helper_v7m_vlldm(cpu_env, fptr); 91 } else { 92 gen_helper_v7m_vlstm(cpu_env, fptr); 93 } 94 tcg_temp_free_i32(fptr); 95 96 clear_eci_state(s); 97 98 /* End the TB, because we have updated FP control bits */ 99 s->base.is_jmp = DISAS_UPDATE_EXIT; 100 return true; 101 } 102 103 static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a) 104 { 105 int btmreg, topreg; 106 TCGv_i64 zero; 107 TCGv_i32 aspen, sfpa; 108 109 if (!dc_isar_feature(aa32_m_sec_state, s)) { 110 /* Before v8.1M, fall through in decode to NOCP check */ 111 return false; 112 } 113 114 /* Explicitly UNDEF because this takes precedence over NOCP */ 115 if (!arm_dc_feature(s, ARM_FEATURE_M_MAIN) || !s->v8m_secure) { 116 unallocated_encoding(s); 117 return true; 118 } 119 120 s->eci_handled = true; 121 122 if (!dc_isar_feature(aa32_vfp_simd, s)) { 123 /* NOP if we have neither FP nor MVE */ 124 clear_eci_state(s); 125 return true; 126 } 127 128 /* 129 * If FPCCR.ASPEN != 0 && CONTROL_S.SFPA == 0 then there is no 130 * active floating point context so we must NOP (without doing 131 * any lazy state preservation or the NOCP check). 132 */ 133 aspen = load_cpu_field(v7m.fpccr[M_REG_S]); 134 sfpa = load_cpu_field(v7m.control[M_REG_S]); 135 tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK); 136 tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK); 137 tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK); 138 tcg_gen_or_i32(sfpa, sfpa, aspen); 139 arm_gen_condlabel(s); 140 tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel); 141 142 if (s->fp_excp_el != 0) { 143 gen_exception_insn(s, s->pc_curr, EXCP_NOCP, 144 syn_uncategorized(), s->fp_excp_el); 145 return true; 146 } 147 148 topreg = a->vd + a->imm - 1; 149 btmreg = a->vd; 150 151 /* Convert to Sreg numbers if the insn specified in Dregs */ 152 if (a->size == 3) { 153 topreg = topreg * 2 + 1; 154 btmreg *= 2; 155 } 156 157 if (topreg > 63 || (topreg > 31 && !(topreg & 1))) { 158 /* UNPREDICTABLE: we choose to undef */ 159 unallocated_encoding(s); 160 return true; 161 } 162 163 /* Silently ignore requests to clear D16-D31 if they don't exist */ 164 if (topreg > 31 && !dc_isar_feature(aa32_simd_r32, s)) { 165 topreg = 31; 166 } 167 168 if (!vfp_access_check(s)) { 169 return true; 170 } 171 172 /* Zero the Sregs from btmreg to topreg inclusive. */ 173 zero = tcg_const_i64(0); 174 if (btmreg & 1) { 175 write_neon_element64(zero, btmreg >> 1, 1, MO_32); 176 btmreg++; 177 } 178 for (; btmreg + 1 <= topreg; btmreg += 2) { 179 write_neon_element64(zero, btmreg >> 1, 0, MO_64); 180 } 181 if (btmreg == topreg) { 182 write_neon_element64(zero, btmreg >> 1, 0, MO_32); 183 btmreg++; 184 } 185 assert(btmreg == topreg + 1); 186 if (dc_isar_feature(aa32_mve, s)) { 187 TCGv_i32 z32 = tcg_const_i32(0); 188 store_cpu_field(z32, v7m.vpr); 189 } 190 191 clear_eci_state(s); 192 return true; 193 } 194 195 /* 196 * M-profile provides two different sets of instructions that can 197 * access floating point system registers: VMSR/VMRS (which move 198 * to/from a general purpose register) and VLDR/VSTR sysreg (which 199 * move directly to/from memory). In some cases there are also side 200 * effects which must happen after any write to memory (which could 201 * cause an exception). So we implement the common logic for the 202 * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(), 203 * which take pointers to callback functions which will perform the 204 * actual "read/write general purpose register" and "read/write 205 * memory" operations. 206 */ 207 208 /* 209 * Emit code to store the sysreg to its final destination; frees the 210 * TCG temp 'value' it is passed. do_access is true to do the store, 211 * and false to skip it and only perform side-effects like base 212 * register writeback. 213 */ 214 typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value, 215 bool do_access); 216 /* 217 * Emit code to load the value to be copied to the sysreg; returns 218 * a new TCG temporary. do_access is true to do the store, 219 * and false to skip it and only perform side-effects like base 220 * register writeback. 221 */ 222 typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque, 223 bool do_access); 224 225 /* Common decode/access checks for fp sysreg read/write */ 226 typedef enum FPSysRegCheckResult { 227 FPSysRegCheckFailed, /* caller should return false */ 228 FPSysRegCheckDone, /* caller should return true */ 229 FPSysRegCheckContinue, /* caller should continue generating code */ 230 } FPSysRegCheckResult; 231 232 static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno) 233 { 234 if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) { 235 return FPSysRegCheckFailed; 236 } 237 238 switch (regno) { 239 case ARM_VFP_FPSCR: 240 case QEMU_VFP_FPSCR_NZCV: 241 break; 242 case ARM_VFP_FPSCR_NZCVQC: 243 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { 244 return FPSysRegCheckFailed; 245 } 246 break; 247 case ARM_VFP_FPCXT_S: 248 case ARM_VFP_FPCXT_NS: 249 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { 250 return FPSysRegCheckFailed; 251 } 252 if (!s->v8m_secure) { 253 return FPSysRegCheckFailed; 254 } 255 break; 256 case ARM_VFP_VPR: 257 case ARM_VFP_P0: 258 if (!dc_isar_feature(aa32_mve, s)) { 259 return FPSysRegCheckFailed; 260 } 261 break; 262 default: 263 return FPSysRegCheckFailed; 264 } 265 266 /* 267 * FPCXT_NS is a special case: it has specific handling for 268 * "current FP state is inactive", and must do the PreserveFPState() 269 * but not the usual full set of actions done by ExecuteFPCheck(). 270 * So we don't call vfp_access_check() and the callers must handle this. 271 */ 272 if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) { 273 return FPSysRegCheckDone; 274 } 275 return FPSysRegCheckContinue; 276 } 277 278 static void gen_branch_fpInactive(DisasContext *s, TCGCond cond, 279 TCGLabel *label) 280 { 281 /* 282 * FPCXT_NS is a special case: it has specific handling for 283 * "current FP state is inactive", and must do the PreserveFPState() 284 * but not the usual full set of actions done by ExecuteFPCheck(). 285 * We don't have a TB flag that matches the fpInactive check, so we 286 * do it at runtime as we don't expect FPCXT_NS accesses to be frequent. 287 * 288 * Emit code that checks fpInactive and does a conditional 289 * branch to label based on it: 290 * if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive) 291 * if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active) 292 */ 293 assert(cond == TCG_COND_EQ || cond == TCG_COND_NE); 294 295 /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */ 296 TCGv_i32 aspen, fpca; 297 aspen = load_cpu_field(v7m.fpccr[M_REG_NS]); 298 fpca = load_cpu_field(v7m.control[M_REG_S]); 299 tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK); 300 tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK); 301 tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK); 302 tcg_gen_or_i32(fpca, fpca, aspen); 303 tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label); 304 tcg_temp_free_i32(aspen); 305 tcg_temp_free_i32(fpca); 306 } 307 308 static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, 309 fp_sysreg_loadfn *loadfn, 310 void *opaque) 311 { 312 /* Do a write to an M-profile floating point system register */ 313 TCGv_i32 tmp; 314 TCGLabel *lab_end = NULL; 315 316 switch (fp_sysreg_checks(s, regno)) { 317 case FPSysRegCheckFailed: 318 return false; 319 case FPSysRegCheckDone: 320 return true; 321 case FPSysRegCheckContinue: 322 break; 323 } 324 325 switch (regno) { 326 case ARM_VFP_FPSCR: 327 tmp = loadfn(s, opaque, true); 328 gen_helper_vfp_set_fpscr(cpu_env, tmp); 329 tcg_temp_free_i32(tmp); 330 gen_lookup_tb(s); 331 break; 332 case ARM_VFP_FPSCR_NZCVQC: 333 { 334 TCGv_i32 fpscr; 335 tmp = loadfn(s, opaque, true); 336 if (dc_isar_feature(aa32_mve, s)) { 337 /* QC is only present for MVE; otherwise RES0 */ 338 TCGv_i32 qc = tcg_temp_new_i32(); 339 tcg_gen_andi_i32(qc, tmp, FPCR_QC); 340 /* 341 * The 4 vfp.qc[] fields need only be "zero" vs "non-zero"; 342 * here writing the same value into all elements is simplest. 343 */ 344 tcg_gen_gvec_dup_i32(MO_32, offsetof(CPUARMState, vfp.qc), 345 16, 16, qc); 346 } 347 tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK); 348 fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]); 349 tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK); 350 tcg_gen_or_i32(fpscr, fpscr, tmp); 351 store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]); 352 tcg_temp_free_i32(tmp); 353 break; 354 } 355 case ARM_VFP_FPCXT_NS: 356 { 357 TCGLabel *lab_active = gen_new_label(); 358 359 lab_end = gen_new_label(); 360 gen_branch_fpInactive(s, TCG_COND_EQ, lab_active); 361 /* 362 * fpInactive case: write is a NOP, so only do side effects 363 * like register writeback before we branch to end 364 */ 365 loadfn(s, opaque, false); 366 tcg_gen_br(lab_end); 367 368 gen_set_label(lab_active); 369 /* 370 * !fpInactive: if FPU disabled, take NOCP exception; 371 * otherwise PreserveFPState(), and then FPCXT_NS writes 372 * behave the same as FPCXT_S writes. 373 */ 374 if (s->fp_excp_el) { 375 gen_exception_insn(s, s->pc_curr, EXCP_NOCP, 376 syn_uncategorized(), s->fp_excp_el); 377 /* 378 * This was only a conditional exception, so override 379 * gen_exception_insn()'s default to DISAS_NORETURN 380 */ 381 s->base.is_jmp = DISAS_NEXT; 382 break; 383 } 384 gen_preserve_fp_state(s); 385 } 386 /* fall through */ 387 case ARM_VFP_FPCXT_S: 388 { 389 TCGv_i32 sfpa, control; 390 /* 391 * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes 392 * bits [27:0] from value and zeroes bits [31:28]. 393 */ 394 tmp = loadfn(s, opaque, true); 395 sfpa = tcg_temp_new_i32(); 396 tcg_gen_shri_i32(sfpa, tmp, 31); 397 control = load_cpu_field(v7m.control[M_REG_S]); 398 tcg_gen_deposit_i32(control, control, sfpa, 399 R_V7M_CONTROL_SFPA_SHIFT, 1); 400 store_cpu_field(control, v7m.control[M_REG_S]); 401 tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK); 402 gen_helper_vfp_set_fpscr(cpu_env, tmp); 403 tcg_temp_free_i32(tmp); 404 tcg_temp_free_i32(sfpa); 405 break; 406 } 407 case ARM_VFP_VPR: 408 /* Behaves as NOP if not privileged */ 409 if (IS_USER(s)) { 410 loadfn(s, opaque, false); 411 break; 412 } 413 tmp = loadfn(s, opaque, true); 414 store_cpu_field(tmp, v7m.vpr); 415 break; 416 case ARM_VFP_P0: 417 { 418 TCGv_i32 vpr; 419 tmp = loadfn(s, opaque, true); 420 vpr = load_cpu_field(v7m.vpr); 421 tcg_gen_deposit_i32(vpr, vpr, tmp, 422 R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH); 423 store_cpu_field(vpr, v7m.vpr); 424 tcg_temp_free_i32(tmp); 425 break; 426 } 427 default: 428 g_assert_not_reached(); 429 } 430 if (lab_end) { 431 gen_set_label(lab_end); 432 } 433 return true; 434 } 435 436 static bool gen_M_fp_sysreg_read(DisasContext *s, int regno, 437 fp_sysreg_storefn *storefn, 438 void *opaque) 439 { 440 /* Do a read from an M-profile floating point system register */ 441 TCGv_i32 tmp; 442 TCGLabel *lab_end = NULL; 443 bool lookup_tb = false; 444 445 switch (fp_sysreg_checks(s, regno)) { 446 case FPSysRegCheckFailed: 447 return false; 448 case FPSysRegCheckDone: 449 return true; 450 case FPSysRegCheckContinue: 451 break; 452 } 453 454 if (regno == ARM_VFP_FPSCR_NZCVQC && !dc_isar_feature(aa32_mve, s)) { 455 /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */ 456 regno = QEMU_VFP_FPSCR_NZCV; 457 } 458 459 switch (regno) { 460 case ARM_VFP_FPSCR: 461 tmp = tcg_temp_new_i32(); 462 gen_helper_vfp_get_fpscr(tmp, cpu_env); 463 storefn(s, opaque, tmp, true); 464 break; 465 case ARM_VFP_FPSCR_NZCVQC: 466 tmp = tcg_temp_new_i32(); 467 gen_helper_vfp_get_fpscr(tmp, cpu_env); 468 tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK); 469 storefn(s, opaque, tmp, true); 470 break; 471 case QEMU_VFP_FPSCR_NZCV: 472 /* 473 * Read just NZCV; this is a special case to avoid the 474 * helper call for the "VMRS to CPSR.NZCV" insn. 475 */ 476 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]); 477 tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK); 478 storefn(s, opaque, tmp, true); 479 break; 480 case ARM_VFP_FPCXT_S: 481 { 482 TCGv_i32 control, sfpa, fpscr; 483 /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */ 484 tmp = tcg_temp_new_i32(); 485 sfpa = tcg_temp_new_i32(); 486 gen_helper_vfp_get_fpscr(tmp, cpu_env); 487 tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK); 488 control = load_cpu_field(v7m.control[M_REG_S]); 489 tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK); 490 tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT); 491 tcg_gen_or_i32(tmp, tmp, sfpa); 492 tcg_temp_free_i32(sfpa); 493 /* 494 * Store result before updating FPSCR etc, in case 495 * it is a memory write which causes an exception. 496 */ 497 storefn(s, opaque, tmp, true); 498 /* 499 * Now we must reset FPSCR from FPDSCR_NS, and clear 500 * CONTROL.SFPA; so we'll end the TB here. 501 */ 502 tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK); 503 store_cpu_field(control, v7m.control[M_REG_S]); 504 fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]); 505 gen_helper_vfp_set_fpscr(cpu_env, fpscr); 506 tcg_temp_free_i32(fpscr); 507 lookup_tb = true; 508 break; 509 } 510 case ARM_VFP_FPCXT_NS: 511 { 512 TCGv_i32 control, sfpa, fpscr, fpdscr, zero; 513 TCGLabel *lab_active = gen_new_label(); 514 515 lookup_tb = true; 516 517 gen_branch_fpInactive(s, TCG_COND_EQ, lab_active); 518 /* fpInactive case: reads as FPDSCR_NS */ 519 TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]); 520 storefn(s, opaque, tmp, true); 521 lab_end = gen_new_label(); 522 tcg_gen_br(lab_end); 523 524 gen_set_label(lab_active); 525 /* 526 * !fpInactive: if FPU disabled, take NOCP exception; 527 * otherwise PreserveFPState(), and then FPCXT_NS 528 * reads the same as FPCXT_S. 529 */ 530 if (s->fp_excp_el) { 531 gen_exception_insn(s, s->pc_curr, EXCP_NOCP, 532 syn_uncategorized(), s->fp_excp_el); 533 /* 534 * This was only a conditional exception, so override 535 * gen_exception_insn()'s default to DISAS_NORETURN 536 */ 537 s->base.is_jmp = DISAS_NEXT; 538 break; 539 } 540 gen_preserve_fp_state(s); 541 tmp = tcg_temp_new_i32(); 542 sfpa = tcg_temp_new_i32(); 543 fpscr = tcg_temp_new_i32(); 544 gen_helper_vfp_get_fpscr(fpscr, cpu_env); 545 tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK); 546 control = load_cpu_field(v7m.control[M_REG_S]); 547 tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK); 548 tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT); 549 tcg_gen_or_i32(tmp, tmp, sfpa); 550 tcg_temp_free_i32(control); 551 /* Store result before updating FPSCR, in case it faults */ 552 storefn(s, opaque, tmp, true); 553 /* If SFPA is zero then set FPSCR from FPDSCR_NS */ 554 fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]); 555 zero = tcg_const_i32(0); 556 tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, zero, fpdscr, fpscr); 557 gen_helper_vfp_set_fpscr(cpu_env, fpscr); 558 tcg_temp_free_i32(zero); 559 tcg_temp_free_i32(sfpa); 560 tcg_temp_free_i32(fpdscr); 561 tcg_temp_free_i32(fpscr); 562 break; 563 } 564 case ARM_VFP_VPR: 565 /* Behaves as NOP if not privileged */ 566 if (IS_USER(s)) { 567 storefn(s, opaque, NULL, false); 568 break; 569 } 570 tmp = load_cpu_field(v7m.vpr); 571 storefn(s, opaque, tmp, true); 572 break; 573 case ARM_VFP_P0: 574 tmp = load_cpu_field(v7m.vpr); 575 tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH); 576 storefn(s, opaque, tmp, true); 577 break; 578 default: 579 g_assert_not_reached(); 580 } 581 582 if (lab_end) { 583 gen_set_label(lab_end); 584 } 585 if (lookup_tb) { 586 gen_lookup_tb(s); 587 } 588 return true; 589 } 590 591 static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value, 592 bool do_access) 593 { 594 arg_VMSR_VMRS *a = opaque; 595 596 if (!do_access) { 597 return; 598 } 599 600 if (a->rt == 15) { 601 /* Set the 4 flag bits in the CPSR */ 602 gen_set_nzcv(value); 603 tcg_temp_free_i32(value); 604 } else { 605 store_reg(s, a->rt, value); 606 } 607 } 608 609 static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque, bool do_access) 610 { 611 arg_VMSR_VMRS *a = opaque; 612 613 if (!do_access) { 614 return NULL; 615 } 616 return load_reg(s, a->rt); 617 } 618 619 static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a) 620 { 621 /* 622 * Accesses to R15 are UNPREDICTABLE; we choose to undef. 623 * FPSCR -> r15 is a special case which writes to the PSR flags; 624 * set a->reg to a special value to tell gen_M_fp_sysreg_read() 625 * we only care about the top 4 bits of FPSCR there. 626 */ 627 if (a->rt == 15) { 628 if (a->l && a->reg == ARM_VFP_FPSCR) { 629 a->reg = QEMU_VFP_FPSCR_NZCV; 630 } else { 631 return false; 632 } 633 } 634 635 if (a->l) { 636 /* VMRS, move FP system register to gp register */ 637 return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a); 638 } else { 639 /* VMSR, move gp register to FP system register */ 640 return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a); 641 } 642 } 643 644 static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value, 645 bool do_access) 646 { 647 arg_vldr_sysreg *a = opaque; 648 uint32_t offset = a->imm; 649 TCGv_i32 addr; 650 651 if (!a->a) { 652 offset = -offset; 653 } 654 655 if (!do_access && !a->w) { 656 return; 657 } 658 659 addr = load_reg(s, a->rn); 660 if (a->p) { 661 tcg_gen_addi_i32(addr, addr, offset); 662 } 663 664 if (s->v8m_stackcheck && a->rn == 13 && a->w) { 665 gen_helper_v8m_stackcheck(cpu_env, addr); 666 } 667 668 if (do_access) { 669 gen_aa32_st_i32(s, value, addr, get_mem_index(s), 670 MO_UL | MO_ALIGN | s->be_data); 671 tcg_temp_free_i32(value); 672 } 673 674 if (a->w) { 675 /* writeback */ 676 if (!a->p) { 677 tcg_gen_addi_i32(addr, addr, offset); 678 } 679 store_reg(s, a->rn, addr); 680 } else { 681 tcg_temp_free_i32(addr); 682 } 683 } 684 685 static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque, 686 bool do_access) 687 { 688 arg_vldr_sysreg *a = opaque; 689 uint32_t offset = a->imm; 690 TCGv_i32 addr; 691 TCGv_i32 value = NULL; 692 693 if (!a->a) { 694 offset = -offset; 695 } 696 697 if (!do_access && !a->w) { 698 return NULL; 699 } 700 701 addr = load_reg(s, a->rn); 702 if (a->p) { 703 tcg_gen_addi_i32(addr, addr, offset); 704 } 705 706 if (s->v8m_stackcheck && a->rn == 13 && a->w) { 707 gen_helper_v8m_stackcheck(cpu_env, addr); 708 } 709 710 if (do_access) { 711 value = tcg_temp_new_i32(); 712 gen_aa32_ld_i32(s, value, addr, get_mem_index(s), 713 MO_UL | MO_ALIGN | s->be_data); 714 } 715 716 if (a->w) { 717 /* writeback */ 718 if (!a->p) { 719 tcg_gen_addi_i32(addr, addr, offset); 720 } 721 store_reg(s, a->rn, addr); 722 } else { 723 tcg_temp_free_i32(addr); 724 } 725 return value; 726 } 727 728 static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a) 729 { 730 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { 731 return false; 732 } 733 if (a->rn == 15) { 734 return false; 735 } 736 return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a); 737 } 738 739 static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a) 740 { 741 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { 742 return false; 743 } 744 if (a->rn == 15) { 745 return false; 746 } 747 return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a); 748 } 749 750 static bool trans_NOCP(DisasContext *s, arg_nocp *a) 751 { 752 /* 753 * Handle M-profile early check for disabled coprocessor: 754 * all we need to do here is emit the NOCP exception if 755 * the coprocessor is disabled. Otherwise we return false 756 * and the real VFP/etc decode will handle the insn. 757 */ 758 assert(arm_dc_feature(s, ARM_FEATURE_M)); 759 760 if (a->cp == 11) { 761 a->cp = 10; 762 } 763 if (arm_dc_feature(s, ARM_FEATURE_V8_1M) && 764 (a->cp == 8 || a->cp == 9 || a->cp == 14 || a->cp == 15)) { 765 /* in v8.1M cp 8, 9, 14, 15 also are governed by the cp10 enable */ 766 a->cp = 10; 767 } 768 769 if (a->cp != 10) { 770 gen_exception_insn(s, s->pc_curr, EXCP_NOCP, 771 syn_uncategorized(), default_exception_el(s)); 772 return true; 773 } 774 775 if (s->fp_excp_el != 0) { 776 gen_exception_insn(s, s->pc_curr, EXCP_NOCP, 777 syn_uncategorized(), s->fp_excp_el); 778 return true; 779 } 780 781 return false; 782 } 783 784 static bool trans_NOCP_8_1(DisasContext *s, arg_nocp *a) 785 { 786 /* This range needs a coprocessor check for v8.1M and later only */ 787 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) { 788 return false; 789 } 790 return trans_NOCP(s, a); 791 } 792