1 /* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "qemu/host-utils.h" 23 #include "exec/exec-all.h" 24 #include "exec/page-protection.h" 25 #include "tcg/tcg-op.h" 26 #include "tcg/tcg-op-gvec.h" 27 #include "exec/helper-proto.h" 28 #include "exec/helper-gen.h" 29 #include "exec/translator.h" 30 #include "exec/translation-block.h" 31 #include "exec/target_page.h" 32 #include "exec/log.h" 33 34 #define HELPER_H "helper.h" 35 #include "exec/helper-info.c.inc" 36 #undef HELPER_H 37 38 /* Choose to use explicit sizes within this file. */ 39 #undef tcg_temp_new 40 41 typedef struct DisasCond { 42 TCGCond c; 43 TCGv_i64 a0, a1; 44 } DisasCond; 45 46 typedef struct DisasIAQE { 47 /* IASQ; may be null for no change from TB. */ 48 TCGv_i64 space; 49 /* IAOQ base; may be null for relative address. */ 50 TCGv_i64 base; 51 /* IAOQ addend; if base is null, relative to cpu_iaoq_f. */ 52 int64_t disp; 53 } DisasIAQE; 54 55 typedef struct DisasDelayException { 56 struct DisasDelayException *next; 57 TCGLabel *lab; 58 uint32_t insn; 59 bool set_iir; 60 int8_t set_n; 61 uint8_t excp; 62 /* Saved state at parent insn. */ 63 DisasIAQE iaq_f, iaq_b; 64 } DisasDelayException; 65 66 typedef struct DisasContext { 67 DisasContextBase base; 68 CPUState *cs; 69 70 /* IAQ_Front, IAQ_Back. */ 71 DisasIAQE iaq_f, iaq_b; 72 /* IAQ_Next, for jumps, otherwise null for simple advance. */ 73 DisasIAQE iaq_j, *iaq_n; 74 75 /* IAOQ_Front at entry to TB. */ 76 uint64_t iaoq_first; 77 uint64_t gva_offset_mask; 78 79 DisasCond null_cond; 80 TCGLabel *null_lab; 81 82 DisasDelayException *delay_excp_list; 83 TCGv_i64 zero; 84 85 uint32_t insn; 86 uint32_t tb_flags; 87 int mmu_idx; 88 int privilege; 89 uint32_t psw_xb; 90 bool psw_n_nonzero; 91 bool psw_b_next; 92 bool is_pa20; 93 bool insn_start_updated; 94 95 #ifdef CONFIG_USER_ONLY 96 MemOp unalign; 97 #endif 98 } DisasContext; 99 100 #ifdef CONFIG_USER_ONLY 101 #define UNALIGN(C) (C)->unalign 102 #define MMU_DISABLED(C) false 103 #else 104 #define UNALIGN(C) MO_ALIGN 105 #define MMU_DISABLED(C) MMU_IDX_MMU_DISABLED((C)->mmu_idx) 106 #endif 107 108 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ 109 static int expand_sm_imm(DisasContext *ctx, int val) 110 { 111 /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */ 112 if (ctx->is_pa20) { 113 if (val & PSW_SM_W) { 114 val |= PSW_W; 115 } 116 val &= ~(PSW_SM_W | PSW_SM_E | PSW_G); 117 } else { 118 val &= ~(PSW_SM_W | PSW_SM_E | PSW_O); 119 } 120 return val; 121 } 122 123 /* Inverted space register indicates 0 means sr0 not inferred from base. */ 124 static int expand_sr3x(DisasContext *ctx, int val) 125 { 126 return ~val; 127 } 128 129 /* Convert the M:A bits within a memory insn to the tri-state value 130 we use for the final M. */ 131 static int ma_to_m(DisasContext *ctx, int val) 132 { 133 return val & 2 ? (val & 1 ? -1 : 1) : 0; 134 } 135 136 /* Convert the sign of the displacement to a pre or post-modify. */ 137 static int pos_to_m(DisasContext *ctx, int val) 138 { 139 return val ? 1 : -1; 140 } 141 142 static int neg_to_m(DisasContext *ctx, int val) 143 { 144 return val ? -1 : 1; 145 } 146 147 /* Used for branch targets and fp memory ops. */ 148 static int expand_shl2(DisasContext *ctx, int val) 149 { 150 return val << 2; 151 } 152 153 /* Used for assemble_21. */ 154 static int expand_shl11(DisasContext *ctx, int val) 155 { 156 return val << 11; 157 } 158 159 static int assemble_6(DisasContext *ctx, int val) 160 { 161 /* 162 * Officially, 32 * x + 32 - y. 163 * Here, x is already in bit 5, and y is [4:0]. 164 * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1, 165 * with the overflow from bit 4 summing with x. 166 */ 167 return (val ^ 31) + 1; 168 } 169 170 /* Expander for assemble_16a(s,cat(im10a,0),i). */ 171 static int expand_11a(DisasContext *ctx, int val) 172 { 173 /* 174 * @val is bit 0 and bits [4:15]. 175 * Swizzle thing around depending on PSW.W. 176 */ 177 int im10a = extract32(val, 1, 10); 178 int s = extract32(val, 11, 2); 179 int i = (-(val & 1) << 13) | (im10a << 3); 180 181 if (ctx->tb_flags & PSW_W) { 182 i ^= s << 13; 183 } 184 return i; 185 } 186 187 /* Expander for assemble_16a(s,im11a,i). */ 188 static int expand_12a(DisasContext *ctx, int val) 189 { 190 /* 191 * @val is bit 0 and bits [3:15]. 192 * Swizzle thing around depending on PSW.W. 193 */ 194 int im11a = extract32(val, 1, 11); 195 int s = extract32(val, 12, 2); 196 int i = (-(val & 1) << 13) | (im11a << 2); 197 198 if (ctx->tb_flags & PSW_W) { 199 i ^= s << 13; 200 } 201 return i; 202 } 203 204 /* Expander for assemble_16(s,im14). */ 205 static int expand_16(DisasContext *ctx, int val) 206 { 207 /* 208 * @val is bits [0:15], containing both im14 and s. 209 * Swizzle thing around depending on PSW.W. 210 */ 211 int s = extract32(val, 14, 2); 212 int i = (-(val & 1) << 13) | extract32(val, 1, 13); 213 214 if (ctx->tb_flags & PSW_W) { 215 i ^= s << 13; 216 } 217 return i; 218 } 219 220 /* The sp field is only present with !PSW_W. */ 221 static int sp0_if_wide(DisasContext *ctx, int sp) 222 { 223 return ctx->tb_flags & PSW_W ? 0 : sp; 224 } 225 226 /* Translate CMPI doubleword conditions to standard. */ 227 static int cmpbid_c(DisasContext *ctx, int val) 228 { 229 return val ? val : 4; /* 0 == "*<<" */ 230 } 231 232 /* 233 * In many places pa1.x did not decode the bit that later became 234 * the pa2.0 D bit. Suppress D unless the cpu is pa2.0. 235 */ 236 static int pa20_d(DisasContext *ctx, int val) 237 { 238 return ctx->is_pa20 & val; 239 } 240 241 /* Include the auto-generated decoder. */ 242 #include "decode-insns.c.inc" 243 244 /* We are not using a goto_tb (for whatever reason), but have updated 245 the iaq (for whatever reason), so don't do it again on exit. */ 246 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0 247 248 /* We are exiting the TB, but have neither emitted a goto_tb, nor 249 updated the iaq for the next instruction to be executed. */ 250 #define DISAS_IAQ_N_STALE DISAS_TARGET_1 251 252 /* Similarly, but we want to return to the main loop immediately 253 to recognize unmasked interrupts. */ 254 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2 255 #define DISAS_EXIT DISAS_TARGET_3 256 257 /* global register indexes */ 258 static TCGv_i64 cpu_gr[32]; 259 static TCGv_i64 cpu_sr[4]; 260 static TCGv_i64 cpu_srH; 261 static TCGv_i64 cpu_iaoq_f; 262 static TCGv_i64 cpu_iaoq_b; 263 static TCGv_i64 cpu_iasq_f; 264 static TCGv_i64 cpu_iasq_b; 265 static TCGv_i64 cpu_sar; 266 static TCGv_i64 cpu_psw_n; 267 static TCGv_i64 cpu_psw_v; 268 static TCGv_i64 cpu_psw_cb; 269 static TCGv_i64 cpu_psw_cb_msb; 270 static TCGv_i32 cpu_psw_xb; 271 272 void hppa_translate_init(void) 273 { 274 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) } 275 276 typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar; 277 static const GlobalVar vars[] = { 278 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) }, 279 DEF_VAR(psw_n), 280 DEF_VAR(psw_v), 281 DEF_VAR(psw_cb), 282 DEF_VAR(psw_cb_msb), 283 DEF_VAR(iaoq_f), 284 DEF_VAR(iaoq_b), 285 }; 286 287 #undef DEF_VAR 288 289 /* Use the symbolic register names that match the disassembler. */ 290 static const char gr_names[32][4] = { 291 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 292 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 293 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 294 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" 295 }; 296 /* SR[4-7] are not global registers so that we can index them. */ 297 static const char sr_names[5][4] = { 298 "sr0", "sr1", "sr2", "sr3", "srH" 299 }; 300 301 int i; 302 303 cpu_gr[0] = NULL; 304 for (i = 1; i < 32; i++) { 305 cpu_gr[i] = tcg_global_mem_new(tcg_env, 306 offsetof(CPUHPPAState, gr[i]), 307 gr_names[i]); 308 } 309 for (i = 0; i < 4; i++) { 310 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env, 311 offsetof(CPUHPPAState, sr[i]), 312 sr_names[i]); 313 } 314 cpu_srH = tcg_global_mem_new_i64(tcg_env, 315 offsetof(CPUHPPAState, sr[4]), 316 sr_names[4]); 317 318 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 319 const GlobalVar *v = &vars[i]; 320 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name); 321 } 322 323 cpu_psw_xb = tcg_global_mem_new_i32(tcg_env, 324 offsetof(CPUHPPAState, psw_xb), 325 "psw_xb"); 326 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env, 327 offsetof(CPUHPPAState, iasq_f), 328 "iasq_f"); 329 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env, 330 offsetof(CPUHPPAState, iasq_b), 331 "iasq_b"); 332 } 333 334 static void set_insn_breg(DisasContext *ctx, int breg) 335 { 336 assert(!ctx->insn_start_updated); 337 ctx->insn_start_updated = true; 338 tcg_set_insn_start_param(ctx->base.insn_start, 2, breg); 339 } 340 341 static DisasCond cond_make_f(void) 342 { 343 return (DisasCond){ 344 .c = TCG_COND_NEVER, 345 .a0 = NULL, 346 .a1 = NULL, 347 }; 348 } 349 350 static DisasCond cond_make_t(void) 351 { 352 return (DisasCond){ 353 .c = TCG_COND_ALWAYS, 354 .a0 = NULL, 355 .a1 = NULL, 356 }; 357 } 358 359 static DisasCond cond_make_n(void) 360 { 361 return (DisasCond){ 362 .c = TCG_COND_NE, 363 .a0 = cpu_psw_n, 364 .a1 = tcg_constant_i64(0) 365 }; 366 } 367 368 static DisasCond cond_make_tt(TCGCond c, TCGv_i64 a0, TCGv_i64 a1) 369 { 370 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 371 return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 }; 372 } 373 374 static DisasCond cond_make_ti(TCGCond c, TCGv_i64 a0, uint64_t imm) 375 { 376 return cond_make_tt(c, a0, tcg_constant_i64(imm)); 377 } 378 379 static DisasCond cond_make_vi(TCGCond c, TCGv_i64 a0, uint64_t imm) 380 { 381 TCGv_i64 tmp = tcg_temp_new_i64(); 382 tcg_gen_mov_i64(tmp, a0); 383 return cond_make_ti(c, tmp, imm); 384 } 385 386 static DisasCond cond_make_vv(TCGCond c, TCGv_i64 a0, TCGv_i64 a1) 387 { 388 TCGv_i64 t0 = tcg_temp_new_i64(); 389 TCGv_i64 t1 = tcg_temp_new_i64(); 390 391 tcg_gen_mov_i64(t0, a0); 392 tcg_gen_mov_i64(t1, a1); 393 return cond_make_tt(c, t0, t1); 394 } 395 396 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg) 397 { 398 if (reg == 0) { 399 return ctx->zero; 400 } else { 401 return cpu_gr[reg]; 402 } 403 } 404 405 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg) 406 { 407 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) { 408 return tcg_temp_new_i64(); 409 } else { 410 return cpu_gr[reg]; 411 } 412 } 413 414 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t) 415 { 416 if (ctx->null_cond.c != TCG_COND_NEVER) { 417 tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0, 418 ctx->null_cond.a1, dest, t); 419 } else { 420 tcg_gen_mov_i64(dest, t); 421 } 422 } 423 424 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t) 425 { 426 if (reg != 0) { 427 save_or_nullify(ctx, cpu_gr[reg], t); 428 } 429 } 430 431 #if HOST_BIG_ENDIAN 432 # define HI_OFS 0 433 # define LO_OFS 4 434 #else 435 # define HI_OFS 4 436 # define LO_OFS 0 437 #endif 438 439 static TCGv_i32 load_frw_i32(unsigned rt) 440 { 441 TCGv_i32 ret = tcg_temp_new_i32(); 442 tcg_gen_ld_i32(ret, tcg_env, 443 offsetof(CPUHPPAState, fr[rt & 31]) 444 + (rt & 32 ? LO_OFS : HI_OFS)); 445 return ret; 446 } 447 448 static TCGv_i32 load_frw0_i32(unsigned rt) 449 { 450 if (rt == 0) { 451 TCGv_i32 ret = tcg_temp_new_i32(); 452 tcg_gen_movi_i32(ret, 0); 453 return ret; 454 } else { 455 return load_frw_i32(rt); 456 } 457 } 458 459 static TCGv_i64 load_frw0_i64(unsigned rt) 460 { 461 TCGv_i64 ret = tcg_temp_new_i64(); 462 if (rt == 0) { 463 tcg_gen_movi_i64(ret, 0); 464 } else { 465 tcg_gen_ld32u_i64(ret, tcg_env, 466 offsetof(CPUHPPAState, fr[rt & 31]) 467 + (rt & 32 ? LO_OFS : HI_OFS)); 468 } 469 return ret; 470 } 471 472 static void save_frw_i32(unsigned rt, TCGv_i32 val) 473 { 474 tcg_gen_st_i32(val, tcg_env, 475 offsetof(CPUHPPAState, fr[rt & 31]) 476 + (rt & 32 ? LO_OFS : HI_OFS)); 477 } 478 479 #undef HI_OFS 480 #undef LO_OFS 481 482 static TCGv_i64 load_frd(unsigned rt) 483 { 484 TCGv_i64 ret = tcg_temp_new_i64(); 485 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt])); 486 return ret; 487 } 488 489 static TCGv_i64 load_frd0(unsigned rt) 490 { 491 if (rt == 0) { 492 TCGv_i64 ret = tcg_temp_new_i64(); 493 tcg_gen_movi_i64(ret, 0); 494 return ret; 495 } else { 496 return load_frd(rt); 497 } 498 } 499 500 static void save_frd(unsigned rt, TCGv_i64 val) 501 { 502 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt])); 503 } 504 505 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg) 506 { 507 #ifdef CONFIG_USER_ONLY 508 tcg_gen_movi_i64(dest, 0); 509 #else 510 if (reg < 4) { 511 tcg_gen_mov_i64(dest, cpu_sr[reg]); 512 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) { 513 tcg_gen_mov_i64(dest, cpu_srH); 514 } else { 515 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg])); 516 } 517 #endif 518 } 519 520 /* 521 * Write a value to psw_xb, bearing in mind the known value. 522 * To be used just before exiting the TB, so do not update the known value. 523 */ 524 static void store_psw_xb(DisasContext *ctx, uint32_t xb) 525 { 526 tcg_debug_assert(xb == 0 || xb == PSW_B); 527 if (ctx->psw_xb != xb) { 528 tcg_gen_movi_i32(cpu_psw_xb, xb); 529 } 530 } 531 532 /* Write a value to psw_xb, and update the known value. */ 533 static void set_psw_xb(DisasContext *ctx, uint32_t xb) 534 { 535 store_psw_xb(ctx, xb); 536 ctx->psw_xb = xb; 537 } 538 539 /* Skip over the implementation of an insn that has been nullified. 540 Use this when the insn is too complex for a conditional move. */ 541 static void nullify_over(DisasContext *ctx) 542 { 543 if (ctx->null_cond.c != TCG_COND_NEVER) { 544 /* The always condition should have been handled in the main loop. */ 545 assert(ctx->null_cond.c != TCG_COND_ALWAYS); 546 547 ctx->null_lab = gen_new_label(); 548 549 /* If we're using PSW[N], copy it to a temp because... */ 550 if (ctx->null_cond.a0 == cpu_psw_n) { 551 ctx->null_cond.a0 = tcg_temp_new_i64(); 552 tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n); 553 } 554 /* ... we clear it before branching over the implementation, 555 so that (1) it's clear after nullifying this insn and 556 (2) if this insn nullifies the next, PSW[N] is valid. */ 557 if (ctx->psw_n_nonzero) { 558 ctx->psw_n_nonzero = false; 559 tcg_gen_movi_i64(cpu_psw_n, 0); 560 } 561 562 tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0, 563 ctx->null_cond.a1, ctx->null_lab); 564 ctx->null_cond = cond_make_f(); 565 } 566 } 567 568 /* Save the current nullification state to PSW[N]. */ 569 static void nullify_save(DisasContext *ctx) 570 { 571 if (ctx->null_cond.c == TCG_COND_NEVER) { 572 if (ctx->psw_n_nonzero) { 573 tcg_gen_movi_i64(cpu_psw_n, 0); 574 } 575 return; 576 } 577 if (ctx->null_cond.a0 != cpu_psw_n) { 578 tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n, 579 ctx->null_cond.a0, ctx->null_cond.a1); 580 ctx->psw_n_nonzero = true; 581 } 582 ctx->null_cond = cond_make_f(); 583 } 584 585 /* Set a PSW[N] to X. The intention is that this is used immediately 586 before a goto_tb/exit_tb, so that there is no fallthru path to other 587 code within the TB. Therefore we do not update psw_n_nonzero. */ 588 static void nullify_set(DisasContext *ctx, bool x) 589 { 590 if (ctx->psw_n_nonzero || x) { 591 tcg_gen_movi_i64(cpu_psw_n, x); 592 } 593 } 594 595 /* Mark the end of an instruction that may have been nullified. 596 This is the pair to nullify_over. Always returns true so that 597 it may be tail-called from a translate function. */ 598 static bool nullify_end(DisasContext *ctx) 599 { 600 TCGLabel *null_lab = ctx->null_lab; 601 DisasJumpType status = ctx->base.is_jmp; 602 603 /* For NEXT, NORETURN, STALE, we can easily continue (or exit). 604 For UPDATED, we cannot update on the nullified path. */ 605 assert(status != DISAS_IAQ_N_UPDATED); 606 /* Taken branches are handled manually. */ 607 assert(!ctx->psw_b_next); 608 609 if (likely(null_lab == NULL)) { 610 /* The current insn wasn't conditional or handled the condition 611 applied to it without a branch, so the (new) setting of 612 NULL_COND can be applied directly to the next insn. */ 613 return true; 614 } 615 ctx->null_lab = NULL; 616 617 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) { 618 /* The next instruction will be unconditional, 619 and NULL_COND already reflects that. */ 620 gen_set_label(null_lab); 621 } else { 622 /* The insn that we just executed is itself nullifying the next 623 instruction. Store the condition in the PSW[N] global. 624 We asserted PSW[N] = 0 in nullify_over, so that after the 625 label we have the proper value in place. */ 626 nullify_save(ctx); 627 gen_set_label(null_lab); 628 ctx->null_cond = cond_make_n(); 629 } 630 if (status == DISAS_NORETURN) { 631 ctx->base.is_jmp = DISAS_NEXT; 632 } 633 return true; 634 } 635 636 static bool iaqe_variable(const DisasIAQE *e) 637 { 638 return e->base || e->space; 639 } 640 641 static DisasIAQE iaqe_incr(const DisasIAQE *e, int64_t disp) 642 { 643 return (DisasIAQE){ 644 .space = e->space, 645 .base = e->base, 646 .disp = e->disp + disp, 647 }; 648 } 649 650 static DisasIAQE iaqe_branchi(DisasContext *ctx, int64_t disp) 651 { 652 return (DisasIAQE){ 653 .space = ctx->iaq_b.space, 654 .disp = ctx->iaq_f.disp + 8 + disp, 655 }; 656 } 657 658 static DisasIAQE iaqe_next_absv(DisasContext *ctx, TCGv_i64 var) 659 { 660 return (DisasIAQE){ 661 .space = ctx->iaq_b.space, 662 .base = var, 663 }; 664 } 665 666 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest, 667 const DisasIAQE *src) 668 { 669 tcg_gen_addi_i64(dest, src->base ? : cpu_iaoq_f, src->disp); 670 } 671 672 static void install_iaq_entries(DisasContext *ctx, const DisasIAQE *f, 673 const DisasIAQE *b) 674 { 675 DisasIAQE b_next; 676 677 if (b == NULL) { 678 b_next = iaqe_incr(f, 4); 679 b = &b_next; 680 } 681 682 /* 683 * There is an edge case 684 * bv r0(rN) 685 * b,l disp,r0 686 * for which F will use cpu_iaoq_b (from the indirect branch), 687 * and B will use cpu_iaoq_f (from the direct branch). 688 * In this case we need an extra temporary. 689 */ 690 if (f->base != cpu_iaoq_b) { 691 copy_iaoq_entry(ctx, cpu_iaoq_b, b); 692 copy_iaoq_entry(ctx, cpu_iaoq_f, f); 693 } else if (f->base == b->base) { 694 copy_iaoq_entry(ctx, cpu_iaoq_f, f); 695 tcg_gen_addi_i64(cpu_iaoq_b, cpu_iaoq_f, b->disp - f->disp); 696 } else { 697 TCGv_i64 tmp = tcg_temp_new_i64(); 698 copy_iaoq_entry(ctx, tmp, b); 699 copy_iaoq_entry(ctx, cpu_iaoq_f, f); 700 tcg_gen_mov_i64(cpu_iaoq_b, tmp); 701 } 702 703 if (f->space) { 704 tcg_gen_mov_i64(cpu_iasq_f, f->space); 705 } 706 if (b->space || f->space) { 707 tcg_gen_mov_i64(cpu_iasq_b, b->space ? : f->space); 708 } 709 } 710 711 static void install_link(DisasContext *ctx, unsigned link, bool with_sr0) 712 { 713 tcg_debug_assert(ctx->null_cond.c == TCG_COND_NEVER); 714 if (!link) { 715 return; 716 } 717 DisasIAQE next = iaqe_incr(&ctx->iaq_b, 4); 718 copy_iaoq_entry(ctx, cpu_gr[link], &next); 719 #ifndef CONFIG_USER_ONLY 720 if (with_sr0) { 721 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b); 722 } 723 #endif 724 } 725 726 static void gen_excp_1(int exception) 727 { 728 gen_helper_excp(tcg_env, tcg_constant_i32(exception)); 729 } 730 731 static void gen_excp(DisasContext *ctx, int exception) 732 { 733 install_iaq_entries(ctx, &ctx->iaq_f, &ctx->iaq_b); 734 nullify_save(ctx); 735 gen_excp_1(exception); 736 ctx->base.is_jmp = DISAS_NORETURN; 737 } 738 739 static DisasDelayException *delay_excp(DisasContext *ctx, uint8_t excp) 740 { 741 DisasDelayException *e = tcg_malloc(sizeof(DisasDelayException)); 742 743 memset(e, 0, sizeof(*e)); 744 e->next = ctx->delay_excp_list; 745 ctx->delay_excp_list = e; 746 747 e->lab = gen_new_label(); 748 e->insn = ctx->insn; 749 e->set_iir = true; 750 e->set_n = ctx->psw_n_nonzero ? 0 : -1; 751 e->excp = excp; 752 e->iaq_f = ctx->iaq_f; 753 e->iaq_b = ctx->iaq_b; 754 755 return e; 756 } 757 758 static bool gen_excp_iir(DisasContext *ctx, int exc) 759 { 760 if (ctx->null_cond.c == TCG_COND_NEVER) { 761 tcg_gen_st_i64(tcg_constant_i64(ctx->insn), 762 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR])); 763 gen_excp(ctx, exc); 764 } else { 765 DisasDelayException *e = delay_excp(ctx, exc); 766 tcg_gen_brcond_i64(tcg_invert_cond(ctx->null_cond.c), 767 ctx->null_cond.a0, ctx->null_cond.a1, e->lab); 768 ctx->null_cond = cond_make_f(); 769 } 770 return true; 771 } 772 773 static bool gen_illegal(DisasContext *ctx) 774 { 775 return gen_excp_iir(ctx, EXCP_ILL); 776 } 777 778 #ifdef CONFIG_USER_ONLY 779 #define CHECK_MOST_PRIVILEGED(EXCP) \ 780 return gen_excp_iir(ctx, EXCP) 781 #else 782 #define CHECK_MOST_PRIVILEGED(EXCP) \ 783 do { \ 784 if (ctx->privilege != 0) { \ 785 return gen_excp_iir(ctx, EXCP); \ 786 } \ 787 } while (0) 788 #endif 789 790 static bool use_goto_tb(DisasContext *ctx, const DisasIAQE *f, 791 const DisasIAQE *b) 792 { 793 return (!iaqe_variable(f) && 794 (b == NULL || !iaqe_variable(b)) && 795 translator_use_goto_tb(&ctx->base, ctx->iaoq_first + f->disp)); 796 } 797 798 /* If the next insn is to be nullified, and it's on the same page, 799 and we're not attempting to set a breakpoint on it, then we can 800 totally skip the nullified insn. This avoids creating and 801 executing a TB that merely branches to the next TB. */ 802 static bool use_nullify_skip(DisasContext *ctx) 803 { 804 return (!(tb_cflags(ctx->base.tb) & CF_BP_PAGE) 805 && !iaqe_variable(&ctx->iaq_b) 806 && (((ctx->iaoq_first + ctx->iaq_b.disp) ^ ctx->iaoq_first) 807 & TARGET_PAGE_MASK) == 0); 808 } 809 810 static void gen_goto_tb(DisasContext *ctx, int which, 811 const DisasIAQE *f, const DisasIAQE *b) 812 { 813 install_iaq_entries(ctx, f, b); 814 if (use_goto_tb(ctx, f, b)) { 815 tcg_gen_goto_tb(which); 816 tcg_gen_exit_tb(ctx->base.tb, which); 817 } else { 818 tcg_gen_lookup_and_goto_ptr(); 819 } 820 } 821 822 static bool cond_need_sv(int c) 823 { 824 return c == 2 || c == 3 || c == 6; 825 } 826 827 static bool cond_need_cb(int c) 828 { 829 return c == 4 || c == 5; 830 } 831 832 /* 833 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of 834 * the Parisc 1.1 Architecture Reference Manual for details. 835 */ 836 837 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d, 838 TCGv_i64 res, TCGv_i64 uv, TCGv_i64 sv) 839 { 840 TCGCond sign_cond, zero_cond; 841 uint64_t sign_imm, zero_imm; 842 DisasCond cond; 843 TCGv_i64 tmp; 844 845 if (d) { 846 /* 64-bit condition. */ 847 sign_imm = 0; 848 sign_cond = TCG_COND_LT; 849 zero_imm = 0; 850 zero_cond = TCG_COND_EQ; 851 } else { 852 /* 32-bit condition. */ 853 sign_imm = 1ull << 31; 854 sign_cond = TCG_COND_TSTNE; 855 zero_imm = UINT32_MAX; 856 zero_cond = TCG_COND_TSTEQ; 857 } 858 859 switch (cf >> 1) { 860 case 0: /* Never / TR (0 / 1) */ 861 cond = cond_make_f(); 862 break; 863 case 1: /* = / <> (Z / !Z) */ 864 cond = cond_make_vi(zero_cond, res, zero_imm); 865 break; 866 case 2: /* < / >= (N ^ V / !(N ^ V) */ 867 tmp = tcg_temp_new_i64(); 868 tcg_gen_xor_i64(tmp, res, sv); 869 cond = cond_make_ti(sign_cond, tmp, sign_imm); 870 break; 871 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */ 872 /* 873 * Simplify: 874 * (N ^ V) | Z 875 * ((res < 0) ^ (sv < 0)) | !res 876 * ((res ^ sv) < 0) | !res 877 * ((res ^ sv) < 0 ? 1 : !res) 878 * !((res ^ sv) < 0 ? 0 : res) 879 */ 880 tmp = tcg_temp_new_i64(); 881 tcg_gen_xor_i64(tmp, res, sv); 882 tcg_gen_movcond_i64(sign_cond, tmp, 883 tmp, tcg_constant_i64(sign_imm), 884 ctx->zero, res); 885 cond = cond_make_ti(zero_cond, tmp, zero_imm); 886 break; 887 case 4: /* NUV / UV (!UV / UV) */ 888 cond = cond_make_vi(TCG_COND_EQ, uv, 0); 889 break; 890 case 5: /* ZNV / VNZ (!UV | Z / UV & !Z) */ 891 tmp = tcg_temp_new_i64(); 892 tcg_gen_movcond_i64(TCG_COND_EQ, tmp, uv, ctx->zero, ctx->zero, res); 893 cond = cond_make_ti(zero_cond, tmp, zero_imm); 894 break; 895 case 6: /* SV / NSV (V / !V) */ 896 cond = cond_make_vi(sign_cond, sv, sign_imm); 897 break; 898 case 7: /* OD / EV */ 899 cond = cond_make_vi(TCG_COND_TSTNE, res, 1); 900 break; 901 default: 902 g_assert_not_reached(); 903 } 904 if (cf & 1) { 905 cond.c = tcg_invert_cond(cond.c); 906 } 907 908 return cond; 909 } 910 911 /* Similar, but for the special case of subtraction without borrow, we 912 can use the inputs directly. This can allow other computation to be 913 deleted as unused. */ 914 915 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d, 916 TCGv_i64 res, TCGv_i64 in1, 917 TCGv_i64 in2, TCGv_i64 sv) 918 { 919 TCGCond tc; 920 bool ext_uns; 921 922 switch (cf >> 1) { 923 case 1: /* = / <> */ 924 tc = TCG_COND_EQ; 925 ext_uns = true; 926 break; 927 case 2: /* < / >= */ 928 tc = TCG_COND_LT; 929 ext_uns = false; 930 break; 931 case 3: /* <= / > */ 932 tc = TCG_COND_LE; 933 ext_uns = false; 934 break; 935 case 4: /* << / >>= */ 936 tc = TCG_COND_LTU; 937 ext_uns = true; 938 break; 939 case 5: /* <<= / >> */ 940 tc = TCG_COND_LEU; 941 ext_uns = true; 942 break; 943 default: 944 return do_cond(ctx, cf, d, res, NULL, sv); 945 } 946 947 if (cf & 1) { 948 tc = tcg_invert_cond(tc); 949 } 950 if (!d) { 951 TCGv_i64 t1 = tcg_temp_new_i64(); 952 TCGv_i64 t2 = tcg_temp_new_i64(); 953 954 if (ext_uns) { 955 tcg_gen_ext32u_i64(t1, in1); 956 tcg_gen_ext32u_i64(t2, in2); 957 } else { 958 tcg_gen_ext32s_i64(t1, in1); 959 tcg_gen_ext32s_i64(t2, in2); 960 } 961 return cond_make_tt(tc, t1, t2); 962 } 963 return cond_make_vv(tc, in1, in2); 964 } 965 966 /* 967 * Similar, but for logicals, where the carry and overflow bits are not 968 * computed, and use of them is undefined. 969 * 970 * Undefined or not, hardware does not trap. It seems reasonable to 971 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's 972 * how cases c={2,3} are treated. 973 */ 974 975 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d, 976 TCGv_i64 res) 977 { 978 TCGCond tc; 979 uint64_t imm; 980 981 switch (cf >> 1) { 982 case 0: /* never / always */ 983 case 4: /* undef, C */ 984 case 5: /* undef, C & !Z */ 985 case 6: /* undef, V */ 986 return cf & 1 ? cond_make_t() : cond_make_f(); 987 case 1: /* == / <> */ 988 tc = d ? TCG_COND_EQ : TCG_COND_TSTEQ; 989 imm = d ? 0 : UINT32_MAX; 990 break; 991 case 2: /* < / >= */ 992 tc = d ? TCG_COND_LT : TCG_COND_TSTNE; 993 imm = d ? 0 : 1ull << 31; 994 break; 995 case 3: /* <= / > */ 996 tc = cf & 1 ? TCG_COND_GT : TCG_COND_LE; 997 if (!d) { 998 TCGv_i64 tmp = tcg_temp_new_i64(); 999 tcg_gen_ext32s_i64(tmp, res); 1000 return cond_make_ti(tc, tmp, 0); 1001 } 1002 return cond_make_vi(tc, res, 0); 1003 case 7: /* OD / EV */ 1004 tc = TCG_COND_TSTNE; 1005 imm = 1; 1006 break; 1007 default: 1008 g_assert_not_reached(); 1009 } 1010 if (cf & 1) { 1011 tc = tcg_invert_cond(tc); 1012 } 1013 return cond_make_vi(tc, res, imm); 1014 } 1015 1016 /* Similar, but for shift/extract/deposit conditions. */ 1017 1018 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d, 1019 TCGv_i64 res) 1020 { 1021 unsigned c, f; 1022 1023 /* Convert the compressed condition codes to standard. 1024 0-2 are the same as logicals (nv,<,<=), while 3 is OD. 1025 4-7 are the reverse of 0-3. */ 1026 c = orig & 3; 1027 if (c == 3) { 1028 c = 7; 1029 } 1030 f = (orig & 4) / 4; 1031 1032 return do_log_cond(ctx, c * 2 + f, d, res); 1033 } 1034 1035 /* Similar, but for unit zero conditions. */ 1036 static DisasCond do_unit_zero_cond(unsigned cf, bool d, TCGv_i64 res) 1037 { 1038 TCGv_i64 tmp; 1039 uint64_t d_repl = d ? 0x0000000100000001ull : 1; 1040 uint64_t ones = 0, sgns = 0; 1041 1042 switch (cf >> 1) { 1043 case 1: /* SBW / NBW */ 1044 if (d) { 1045 ones = d_repl; 1046 sgns = d_repl << 31; 1047 } 1048 break; 1049 case 2: /* SBZ / NBZ */ 1050 ones = d_repl * 0x01010101u; 1051 sgns = ones << 7; 1052 break; 1053 case 3: /* SHZ / NHZ */ 1054 ones = d_repl * 0x00010001u; 1055 sgns = ones << 15; 1056 break; 1057 } 1058 if (ones == 0) { 1059 /* Undefined, or 0/1 (never/always). */ 1060 return cf & 1 ? cond_make_t() : cond_make_f(); 1061 } 1062 1063 /* 1064 * See hasless(v,1) from 1065 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 1066 */ 1067 tmp = tcg_temp_new_i64(); 1068 tcg_gen_subi_i64(tmp, res, ones); 1069 tcg_gen_andc_i64(tmp, tmp, res); 1070 1071 return cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE, tmp, sgns); 1072 } 1073 1074 static TCGv_i64 get_carry(DisasContext *ctx, bool d, 1075 TCGv_i64 cb, TCGv_i64 cb_msb) 1076 { 1077 if (!d) { 1078 TCGv_i64 t = tcg_temp_new_i64(); 1079 tcg_gen_extract_i64(t, cb, 32, 1); 1080 return t; 1081 } 1082 return cb_msb; 1083 } 1084 1085 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d) 1086 { 1087 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb); 1088 } 1089 1090 /* Compute signed overflow for addition. */ 1091 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res, 1092 TCGv_i64 in1, TCGv_i64 in2, 1093 TCGv_i64 orig_in1, int shift, bool d) 1094 { 1095 TCGv_i64 sv = tcg_temp_new_i64(); 1096 TCGv_i64 tmp = tcg_temp_new_i64(); 1097 1098 tcg_gen_xor_i64(sv, res, in1); 1099 tcg_gen_xor_i64(tmp, in1, in2); 1100 tcg_gen_andc_i64(sv, sv, tmp); 1101 1102 switch (shift) { 1103 case 0: 1104 break; 1105 case 1: 1106 /* Shift left by one and compare the sign. */ 1107 tcg_gen_add_i64(tmp, orig_in1, orig_in1); 1108 tcg_gen_xor_i64(tmp, tmp, orig_in1); 1109 /* Incorporate into the overflow. */ 1110 tcg_gen_or_i64(sv, sv, tmp); 1111 break; 1112 default: 1113 { 1114 int sign_bit = d ? 63 : 31; 1115 1116 /* Compare the sign against all lower bits. */ 1117 tcg_gen_sextract_i64(tmp, orig_in1, sign_bit, 1); 1118 tcg_gen_xor_i64(tmp, tmp, orig_in1); 1119 /* 1120 * If one of the bits shifting into or through the sign 1121 * differs, then we have overflow. 1122 */ 1123 tcg_gen_extract_i64(tmp, tmp, sign_bit - shift, shift); 1124 tcg_gen_movcond_i64(TCG_COND_NE, sv, tmp, ctx->zero, 1125 tcg_constant_i64(-1), sv); 1126 } 1127 } 1128 return sv; 1129 } 1130 1131 /* Compute unsigned overflow for addition. */ 1132 static TCGv_i64 do_add_uv(DisasContext *ctx, TCGv_i64 cb, TCGv_i64 cb_msb, 1133 TCGv_i64 in1, int shift, bool d) 1134 { 1135 if (shift == 0) { 1136 return get_carry(ctx, d, cb, cb_msb); 1137 } else { 1138 TCGv_i64 tmp = tcg_temp_new_i64(); 1139 tcg_gen_extract_i64(tmp, in1, (d ? 63 : 31) - shift, shift); 1140 tcg_gen_or_i64(tmp, tmp, get_carry(ctx, d, cb, cb_msb)); 1141 return tmp; 1142 } 1143 } 1144 1145 /* Compute signed overflow for subtraction. */ 1146 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res, 1147 TCGv_i64 in1, TCGv_i64 in2) 1148 { 1149 TCGv_i64 sv = tcg_temp_new_i64(); 1150 TCGv_i64 tmp = tcg_temp_new_i64(); 1151 1152 tcg_gen_xor_i64(sv, res, in1); 1153 tcg_gen_xor_i64(tmp, in1, in2); 1154 tcg_gen_and_i64(sv, sv, tmp); 1155 1156 return sv; 1157 } 1158 1159 static void gen_tc(DisasContext *ctx, DisasCond *cond) 1160 { 1161 DisasDelayException *e; 1162 1163 switch (cond->c) { 1164 case TCG_COND_NEVER: 1165 break; 1166 case TCG_COND_ALWAYS: 1167 gen_excp_iir(ctx, EXCP_COND); 1168 break; 1169 default: 1170 e = delay_excp(ctx, EXCP_COND); 1171 tcg_gen_brcond_i64(cond->c, cond->a0, cond->a1, e->lab); 1172 /* In the non-trap path, the condition is known false. */ 1173 *cond = cond_make_f(); 1174 break; 1175 } 1176 } 1177 1178 static void gen_tsv(DisasContext *ctx, TCGv_i64 *sv, bool d) 1179 { 1180 DisasCond cond = do_cond(ctx, /* SV */ 12, d, NULL, NULL, *sv); 1181 DisasDelayException *e = delay_excp(ctx, EXCP_OVERFLOW); 1182 1183 tcg_gen_brcond_i64(cond.c, cond.a0, cond.a1, e->lab); 1184 1185 /* In the non-trap path, V is known zero. */ 1186 *sv = tcg_constant_i64(0); 1187 } 1188 1189 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1, 1190 TCGv_i64 in2, unsigned shift, bool is_l, 1191 bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d) 1192 { 1193 TCGv_i64 dest, cb, cb_msb, in1, uv, sv, tmp; 1194 unsigned c = cf >> 1; 1195 DisasCond cond; 1196 1197 dest = tcg_temp_new_i64(); 1198 cb = NULL; 1199 cb_msb = NULL; 1200 1201 in1 = orig_in1; 1202 if (shift) { 1203 tmp = tcg_temp_new_i64(); 1204 tcg_gen_shli_i64(tmp, in1, shift); 1205 in1 = tmp; 1206 } 1207 1208 if (!is_l || cond_need_cb(c)) { 1209 cb_msb = tcg_temp_new_i64(); 1210 cb = tcg_temp_new_i64(); 1211 1212 if (is_c) { 1213 tcg_gen_addcio_i64(dest, cb_msb, in1, in2, get_psw_carry(ctx, d)); 1214 } else { 1215 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero); 1216 } 1217 tcg_gen_xor_i64(cb, in1, in2); 1218 tcg_gen_xor_i64(cb, cb, dest); 1219 } else { 1220 tcg_gen_add_i64(dest, in1, in2); 1221 if (is_c) { 1222 tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d)); 1223 } 1224 } 1225 1226 /* Compute signed overflow if required. */ 1227 sv = NULL; 1228 if (is_tsv || cond_need_sv(c)) { 1229 sv = do_add_sv(ctx, dest, in1, in2, orig_in1, shift, d); 1230 if (is_tsv) { 1231 gen_tsv(ctx, &sv, d); 1232 } 1233 } 1234 1235 /* Compute unsigned overflow if required. */ 1236 uv = NULL; 1237 if (cond_need_cb(c)) { 1238 uv = do_add_uv(ctx, cb, cb_msb, orig_in1, shift, d); 1239 } 1240 1241 /* Emit any conditional trap before any writeback. */ 1242 cond = do_cond(ctx, cf, d, dest, uv, sv); 1243 if (is_tc) { 1244 gen_tc(ctx, &cond); 1245 } 1246 1247 /* Write back the result. */ 1248 if (!is_l) { 1249 save_or_nullify(ctx, cpu_psw_cb, cb); 1250 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1251 } 1252 save_gpr(ctx, rt, dest); 1253 1254 /* Install the new nullification. */ 1255 ctx->null_cond = cond; 1256 } 1257 1258 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a, 1259 bool is_l, bool is_tsv, bool is_tc, bool is_c) 1260 { 1261 TCGv_i64 tcg_r1, tcg_r2; 1262 1263 if (unlikely(is_tc && a->cf == 1)) { 1264 /* Unconditional trap on condition. */ 1265 return gen_excp_iir(ctx, EXCP_COND); 1266 } 1267 if (a->cf) { 1268 nullify_over(ctx); 1269 } 1270 tcg_r1 = load_gpr(ctx, a->r1); 1271 tcg_r2 = load_gpr(ctx, a->r2); 1272 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, 1273 is_tsv, is_tc, is_c, a->cf, a->d); 1274 return nullify_end(ctx); 1275 } 1276 1277 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a, 1278 bool is_tsv, bool is_tc) 1279 { 1280 TCGv_i64 tcg_im, tcg_r2; 1281 1282 if (unlikely(is_tc && a->cf == 1)) { 1283 /* Unconditional trap on condition. */ 1284 return gen_excp_iir(ctx, EXCP_COND); 1285 } 1286 if (a->cf) { 1287 nullify_over(ctx); 1288 } 1289 tcg_im = tcg_constant_i64(a->i); 1290 tcg_r2 = load_gpr(ctx, a->r); 1291 /* All ADDI conditions are 32-bit. */ 1292 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false); 1293 return nullify_end(ctx); 1294 } 1295 1296 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1297 TCGv_i64 in2, bool is_tsv, bool is_b, 1298 bool is_tc, unsigned cf, bool d) 1299 { 1300 TCGv_i64 dest, sv, cb, cb_msb; 1301 unsigned c = cf >> 1; 1302 DisasCond cond; 1303 1304 dest = tcg_temp_new_i64(); 1305 cb = tcg_temp_new_i64(); 1306 cb_msb = tcg_temp_new_i64(); 1307 1308 if (is_b) { 1309 /* DEST,C = IN1 + ~IN2 + C. */ 1310 tcg_gen_not_i64(cb, in2); 1311 tcg_gen_addcio_i64(dest, cb_msb, in1, cb, get_psw_carry(ctx, d)); 1312 tcg_gen_xor_i64(cb, cb, in1); 1313 tcg_gen_xor_i64(cb, cb, dest); 1314 } else { 1315 /* 1316 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer 1317 * operations by seeding the high word with 1 and subtracting. 1318 */ 1319 TCGv_i64 one = tcg_constant_i64(1); 1320 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero); 1321 tcg_gen_eqv_i64(cb, in1, in2); 1322 tcg_gen_xor_i64(cb, cb, dest); 1323 } 1324 1325 /* Compute signed overflow if required. */ 1326 sv = NULL; 1327 if (is_tsv || cond_need_sv(c)) { 1328 sv = do_sub_sv(ctx, dest, in1, in2); 1329 if (is_tsv) { 1330 gen_tsv(ctx, &sv, d); 1331 } 1332 } 1333 1334 /* Compute the condition. We cannot use the special case for borrow. */ 1335 if (!is_b) { 1336 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv); 1337 } else { 1338 cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv); 1339 } 1340 1341 /* Emit any conditional trap before any writeback. */ 1342 if (is_tc) { 1343 gen_tc(ctx, &cond); 1344 } 1345 1346 /* Write back the result. */ 1347 save_or_nullify(ctx, cpu_psw_cb, cb); 1348 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1349 save_gpr(ctx, rt, dest); 1350 1351 /* Install the new nullification. */ 1352 ctx->null_cond = cond; 1353 } 1354 1355 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a, 1356 bool is_tsv, bool is_b, bool is_tc) 1357 { 1358 TCGv_i64 tcg_r1, tcg_r2; 1359 1360 if (a->cf) { 1361 nullify_over(ctx); 1362 } 1363 tcg_r1 = load_gpr(ctx, a->r1); 1364 tcg_r2 = load_gpr(ctx, a->r2); 1365 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d); 1366 return nullify_end(ctx); 1367 } 1368 1369 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv) 1370 { 1371 TCGv_i64 tcg_im, tcg_r2; 1372 1373 if (a->cf) { 1374 nullify_over(ctx); 1375 } 1376 tcg_im = tcg_constant_i64(a->i); 1377 tcg_r2 = load_gpr(ctx, a->r); 1378 /* All SUBI conditions are 32-bit. */ 1379 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false); 1380 return nullify_end(ctx); 1381 } 1382 1383 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1384 TCGv_i64 in2, unsigned cf, bool d) 1385 { 1386 TCGv_i64 dest, sv; 1387 DisasCond cond; 1388 1389 dest = tcg_temp_new_i64(); 1390 tcg_gen_sub_i64(dest, in1, in2); 1391 1392 /* Compute signed overflow if required. */ 1393 sv = NULL; 1394 if (cond_need_sv(cf >> 1)) { 1395 sv = do_sub_sv(ctx, dest, in1, in2); 1396 } 1397 1398 /* Form the condition for the compare. */ 1399 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv); 1400 1401 /* Clear. */ 1402 tcg_gen_movi_i64(dest, 0); 1403 save_gpr(ctx, rt, dest); 1404 1405 /* Install the new nullification. */ 1406 ctx->null_cond = cond; 1407 } 1408 1409 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1410 TCGv_i64 in2, unsigned cf, bool d, 1411 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 1412 { 1413 TCGv_i64 dest = dest_gpr(ctx, rt); 1414 1415 /* Perform the operation, and writeback. */ 1416 fn(dest, in1, in2); 1417 save_gpr(ctx, rt, dest); 1418 1419 /* Install the new nullification. */ 1420 ctx->null_cond = do_log_cond(ctx, cf, d, dest); 1421 } 1422 1423 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a, 1424 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 1425 { 1426 TCGv_i64 tcg_r1, tcg_r2; 1427 1428 if (a->cf) { 1429 nullify_over(ctx); 1430 } 1431 tcg_r1 = load_gpr(ctx, a->r1); 1432 tcg_r2 = load_gpr(ctx, a->r2); 1433 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn); 1434 return nullify_end(ctx); 1435 } 1436 1437 static void do_unit_addsub(DisasContext *ctx, unsigned rt, TCGv_i64 in1, 1438 TCGv_i64 in2, unsigned cf, bool d, 1439 bool is_tc, bool is_add) 1440 { 1441 TCGv_i64 dest = tcg_temp_new_i64(); 1442 uint64_t test_cb = 0; 1443 DisasCond cond; 1444 1445 /* Select which carry-out bits to test. */ 1446 switch (cf >> 1) { 1447 case 4: /* NDC / SDC -- 4-bit carries */ 1448 test_cb = dup_const(MO_8, 0x88); 1449 break; 1450 case 5: /* NWC / SWC -- 32-bit carries */ 1451 if (d) { 1452 test_cb = dup_const(MO_32, INT32_MIN); 1453 } else { 1454 cf &= 1; /* undefined -- map to never/always */ 1455 } 1456 break; 1457 case 6: /* NBC / SBC -- 8-bit carries */ 1458 test_cb = dup_const(MO_8, INT8_MIN); 1459 break; 1460 case 7: /* NHC / SHC -- 16-bit carries */ 1461 test_cb = dup_const(MO_16, INT16_MIN); 1462 break; 1463 } 1464 if (!d) { 1465 test_cb = (uint32_t)test_cb; 1466 } 1467 1468 if (!test_cb) { 1469 /* No need to compute carries if we don't need to test them. */ 1470 if (is_add) { 1471 tcg_gen_add_i64(dest, in1, in2); 1472 } else { 1473 tcg_gen_sub_i64(dest, in1, in2); 1474 } 1475 cond = do_unit_zero_cond(cf, d, dest); 1476 } else { 1477 TCGv_i64 cb = tcg_temp_new_i64(); 1478 1479 if (d) { 1480 TCGv_i64 cb_msb = tcg_temp_new_i64(); 1481 if (is_add) { 1482 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero); 1483 tcg_gen_xor_i64(cb, in1, in2); 1484 } else { 1485 /* See do_sub, !is_b. */ 1486 TCGv_i64 one = tcg_constant_i64(1); 1487 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero); 1488 tcg_gen_eqv_i64(cb, in1, in2); 1489 } 1490 tcg_gen_xor_i64(cb, cb, dest); 1491 tcg_gen_extract2_i64(cb, cb, cb_msb, 1); 1492 } else { 1493 if (is_add) { 1494 tcg_gen_add_i64(dest, in1, in2); 1495 tcg_gen_xor_i64(cb, in1, in2); 1496 } else { 1497 tcg_gen_sub_i64(dest, in1, in2); 1498 tcg_gen_eqv_i64(cb, in1, in2); 1499 } 1500 tcg_gen_xor_i64(cb, cb, dest); 1501 tcg_gen_shri_i64(cb, cb, 1); 1502 } 1503 1504 cond = cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE, 1505 cb, test_cb); 1506 } 1507 1508 if (is_tc) { 1509 gen_tc(ctx, &cond); 1510 } 1511 save_gpr(ctx, rt, dest); 1512 1513 ctx->null_cond = cond; 1514 } 1515 1516 #ifndef CONFIG_USER_ONLY 1517 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space 1518 from the top 2 bits of the base register. There are a few system 1519 instructions that have a 3-bit space specifier, for which SR0 is 1520 not special. To handle this, pass ~SP. */ 1521 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base) 1522 { 1523 TCGv_ptr ptr; 1524 TCGv_i64 tmp; 1525 TCGv_i64 spc; 1526 1527 if (sp != 0) { 1528 if (sp < 0) { 1529 sp = ~sp; 1530 } 1531 spc = tcg_temp_new_i64(); 1532 load_spr(ctx, spc, sp); 1533 return spc; 1534 } 1535 if (ctx->tb_flags & TB_FLAG_SR_SAME) { 1536 return cpu_srH; 1537 } 1538 1539 ptr = tcg_temp_new_ptr(); 1540 tmp = tcg_temp_new_i64(); 1541 spc = tcg_temp_new_i64(); 1542 1543 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */ 1544 tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5); 1545 tcg_gen_andi_i64(tmp, tmp, 030); 1546 tcg_gen_trunc_i64_ptr(ptr, tmp); 1547 1548 tcg_gen_add_ptr(ptr, ptr, tcg_env); 1549 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4])); 1550 1551 return spc; 1552 } 1553 #endif 1554 1555 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs, 1556 unsigned rb, unsigned rx, int scale, int64_t disp, 1557 unsigned sp, int modify, bool is_phys) 1558 { 1559 TCGv_i64 base = load_gpr(ctx, rb); 1560 TCGv_i64 ofs; 1561 TCGv_i64 addr; 1562 1563 set_insn_breg(ctx, rb); 1564 1565 /* Note that RX is mutually exclusive with DISP. */ 1566 if (rx) { 1567 ofs = tcg_temp_new_i64(); 1568 tcg_gen_shli_i64(ofs, cpu_gr[rx], scale); 1569 tcg_gen_add_i64(ofs, ofs, base); 1570 } else if (disp || modify) { 1571 ofs = tcg_temp_new_i64(); 1572 tcg_gen_addi_i64(ofs, base, disp); 1573 } else { 1574 ofs = base; 1575 } 1576 1577 *pofs = ofs; 1578 *pgva = addr = tcg_temp_new_i64(); 1579 tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, 1580 ctx->gva_offset_mask); 1581 #ifndef CONFIG_USER_ONLY 1582 if (!is_phys) { 1583 tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base)); 1584 } 1585 #endif 1586 } 1587 1588 /* Emit a memory load. The modify parameter should be 1589 * < 0 for pre-modify, 1590 * > 0 for post-modify, 1591 * = 0 for no base register update. 1592 */ 1593 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, 1594 unsigned rx, int scale, int64_t disp, 1595 unsigned sp, int modify, MemOp mop) 1596 { 1597 TCGv_i64 ofs; 1598 TCGv_i64 addr; 1599 1600 /* Caller uses nullify_over/nullify_end. */ 1601 assert(ctx->null_cond.c == TCG_COND_NEVER); 1602 1603 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1604 MMU_DISABLED(ctx)); 1605 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1606 if (modify) { 1607 save_gpr(ctx, rb, ofs); 1608 } 1609 } 1610 1611 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, 1612 unsigned rx, int scale, int64_t disp, 1613 unsigned sp, int modify, MemOp mop) 1614 { 1615 TCGv_i64 ofs; 1616 TCGv_i64 addr; 1617 1618 /* Caller uses nullify_over/nullify_end. */ 1619 assert(ctx->null_cond.c == TCG_COND_NEVER); 1620 1621 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1622 MMU_DISABLED(ctx)); 1623 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1624 if (modify) { 1625 save_gpr(ctx, rb, ofs); 1626 } 1627 } 1628 1629 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, 1630 unsigned rx, int scale, int64_t disp, 1631 unsigned sp, int modify, MemOp mop) 1632 { 1633 TCGv_i64 ofs; 1634 TCGv_i64 addr; 1635 1636 /* Caller uses nullify_over/nullify_end. */ 1637 assert(ctx->null_cond.c == TCG_COND_NEVER); 1638 1639 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1640 MMU_DISABLED(ctx)); 1641 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1642 if (modify) { 1643 save_gpr(ctx, rb, ofs); 1644 } 1645 } 1646 1647 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, 1648 unsigned rx, int scale, int64_t disp, 1649 unsigned sp, int modify, MemOp mop) 1650 { 1651 TCGv_i64 ofs; 1652 TCGv_i64 addr; 1653 1654 /* Caller uses nullify_over/nullify_end. */ 1655 assert(ctx->null_cond.c == TCG_COND_NEVER); 1656 1657 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1658 MMU_DISABLED(ctx)); 1659 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1660 if (modify) { 1661 save_gpr(ctx, rb, ofs); 1662 } 1663 } 1664 1665 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, 1666 unsigned rx, int scale, int64_t disp, 1667 unsigned sp, int modify, MemOp mop) 1668 { 1669 TCGv_i64 dest; 1670 1671 nullify_over(ctx); 1672 1673 if (modify == 0) { 1674 /* No base register update. */ 1675 dest = dest_gpr(ctx, rt); 1676 } else { 1677 /* Make sure if RT == RB, we see the result of the load. */ 1678 dest = tcg_temp_new_i64(); 1679 } 1680 do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop); 1681 save_gpr(ctx, rt, dest); 1682 1683 return nullify_end(ctx); 1684 } 1685 1686 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, 1687 unsigned rx, int scale, int64_t disp, 1688 unsigned sp, int modify) 1689 { 1690 TCGv_i32 tmp; 1691 1692 nullify_over(ctx); 1693 1694 tmp = tcg_temp_new_i32(); 1695 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1696 save_frw_i32(rt, tmp); 1697 1698 if (rt == 0) { 1699 gen_helper_loaded_fr0(tcg_env); 1700 } 1701 1702 return nullify_end(ctx); 1703 } 1704 1705 static bool trans_fldw(DisasContext *ctx, arg_ldst *a) 1706 { 1707 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1708 a->disp, a->sp, a->m); 1709 } 1710 1711 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, 1712 unsigned rx, int scale, int64_t disp, 1713 unsigned sp, int modify) 1714 { 1715 TCGv_i64 tmp; 1716 1717 nullify_over(ctx); 1718 1719 tmp = tcg_temp_new_i64(); 1720 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1721 save_frd(rt, tmp); 1722 1723 if (rt == 0) { 1724 gen_helper_loaded_fr0(tcg_env); 1725 } 1726 1727 return nullify_end(ctx); 1728 } 1729 1730 static bool trans_fldd(DisasContext *ctx, arg_ldst *a) 1731 { 1732 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1733 a->disp, a->sp, a->m); 1734 } 1735 1736 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb, 1737 int64_t disp, unsigned sp, 1738 int modify, MemOp mop) 1739 { 1740 nullify_over(ctx); 1741 do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); 1742 return nullify_end(ctx); 1743 } 1744 1745 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, 1746 unsigned rx, int scale, int64_t disp, 1747 unsigned sp, int modify) 1748 { 1749 TCGv_i32 tmp; 1750 1751 nullify_over(ctx); 1752 1753 tmp = load_frw_i32(rt); 1754 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1755 1756 return nullify_end(ctx); 1757 } 1758 1759 static bool trans_fstw(DisasContext *ctx, arg_ldst *a) 1760 { 1761 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1762 a->disp, a->sp, a->m); 1763 } 1764 1765 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, 1766 unsigned rx, int scale, int64_t disp, 1767 unsigned sp, int modify) 1768 { 1769 TCGv_i64 tmp; 1770 1771 nullify_over(ctx); 1772 1773 tmp = load_frd(rt); 1774 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1775 1776 return nullify_end(ctx); 1777 } 1778 1779 static bool trans_fstd(DisasContext *ctx, arg_ldst *a) 1780 { 1781 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1782 a->disp, a->sp, a->m); 1783 } 1784 1785 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra, 1786 void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) 1787 { 1788 TCGv_i32 tmp; 1789 1790 nullify_over(ctx); 1791 tmp = load_frw0_i32(ra); 1792 1793 func(tmp, tcg_env, tmp); 1794 1795 save_frw_i32(rt, tmp); 1796 return nullify_end(ctx); 1797 } 1798 1799 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra, 1800 void (*func)(TCGv_i32, TCGv_env, TCGv_i64)) 1801 { 1802 TCGv_i32 dst; 1803 TCGv_i64 src; 1804 1805 nullify_over(ctx); 1806 src = load_frd(ra); 1807 dst = tcg_temp_new_i32(); 1808 1809 func(dst, tcg_env, src); 1810 1811 save_frw_i32(rt, dst); 1812 return nullify_end(ctx); 1813 } 1814 1815 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra, 1816 void (*func)(TCGv_i64, TCGv_env, TCGv_i64)) 1817 { 1818 TCGv_i64 tmp; 1819 1820 nullify_over(ctx); 1821 tmp = load_frd0(ra); 1822 1823 func(tmp, tcg_env, tmp); 1824 1825 save_frd(rt, tmp); 1826 return nullify_end(ctx); 1827 } 1828 1829 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra, 1830 void (*func)(TCGv_i64, TCGv_env, TCGv_i32)) 1831 { 1832 TCGv_i32 src; 1833 TCGv_i64 dst; 1834 1835 nullify_over(ctx); 1836 src = load_frw0_i32(ra); 1837 dst = tcg_temp_new_i64(); 1838 1839 func(dst, tcg_env, src); 1840 1841 save_frd(rt, dst); 1842 return nullify_end(ctx); 1843 } 1844 1845 static bool do_fop_weww(DisasContext *ctx, unsigned rt, 1846 unsigned ra, unsigned rb, 1847 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) 1848 { 1849 TCGv_i32 a, b; 1850 1851 nullify_over(ctx); 1852 a = load_frw0_i32(ra); 1853 b = load_frw0_i32(rb); 1854 1855 func(a, tcg_env, a, b); 1856 1857 save_frw_i32(rt, a); 1858 return nullify_end(ctx); 1859 } 1860 1861 static bool do_fop_dedd(DisasContext *ctx, unsigned rt, 1862 unsigned ra, unsigned rb, 1863 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64)) 1864 { 1865 TCGv_i64 a, b; 1866 1867 nullify_over(ctx); 1868 a = load_frd0(ra); 1869 b = load_frd0(rb); 1870 1871 func(a, tcg_env, a, b); 1872 1873 save_frd(rt, a); 1874 return nullify_end(ctx); 1875 } 1876 1877 /* Emit an unconditional branch to a direct target, which may or may not 1878 have already had nullification handled. */ 1879 static bool do_dbranch(DisasContext *ctx, int64_t disp, 1880 unsigned link, bool is_n) 1881 { 1882 ctx->iaq_j = iaqe_branchi(ctx, disp); 1883 1884 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 1885 install_link(ctx, link, false); 1886 if (is_n) { 1887 if (use_nullify_skip(ctx)) { 1888 nullify_set(ctx, 0); 1889 store_psw_xb(ctx, 0); 1890 gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL); 1891 ctx->base.is_jmp = DISAS_NORETURN; 1892 return true; 1893 } 1894 ctx->null_cond.c = TCG_COND_ALWAYS; 1895 } 1896 ctx->iaq_n = &ctx->iaq_j; 1897 ctx->psw_b_next = true; 1898 } else { 1899 nullify_over(ctx); 1900 1901 install_link(ctx, link, false); 1902 if (is_n && use_nullify_skip(ctx)) { 1903 nullify_set(ctx, 0); 1904 store_psw_xb(ctx, 0); 1905 gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL); 1906 } else { 1907 nullify_set(ctx, is_n); 1908 store_psw_xb(ctx, PSW_B); 1909 gen_goto_tb(ctx, 0, &ctx->iaq_b, &ctx->iaq_j); 1910 } 1911 nullify_end(ctx); 1912 1913 nullify_set(ctx, 0); 1914 store_psw_xb(ctx, 0); 1915 gen_goto_tb(ctx, 1, &ctx->iaq_b, NULL); 1916 ctx->base.is_jmp = DISAS_NORETURN; 1917 } 1918 return true; 1919 } 1920 1921 /* Emit a conditional branch to a direct target. If the branch itself 1922 is nullified, we should have already used nullify_over. */ 1923 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n, 1924 DisasCond *cond) 1925 { 1926 DisasIAQE next; 1927 TCGLabel *taken = NULL; 1928 TCGCond c = cond->c; 1929 bool n; 1930 1931 assert(ctx->null_cond.c == TCG_COND_NEVER); 1932 1933 /* Handle TRUE and NEVER as direct branches. */ 1934 if (c == TCG_COND_ALWAYS) { 1935 return do_dbranch(ctx, disp, 0, is_n && disp >= 0); 1936 } 1937 1938 taken = gen_new_label(); 1939 tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken); 1940 1941 /* Not taken: Condition not satisfied; nullify on backward branches. */ 1942 n = is_n && disp < 0; 1943 if (n && use_nullify_skip(ctx)) { 1944 nullify_set(ctx, 0); 1945 store_psw_xb(ctx, 0); 1946 next = iaqe_incr(&ctx->iaq_b, 4); 1947 gen_goto_tb(ctx, 0, &next, NULL); 1948 } else { 1949 if (!n && ctx->null_lab) { 1950 gen_set_label(ctx->null_lab); 1951 ctx->null_lab = NULL; 1952 } 1953 nullify_set(ctx, n); 1954 store_psw_xb(ctx, 0); 1955 gen_goto_tb(ctx, 0, &ctx->iaq_b, NULL); 1956 } 1957 1958 gen_set_label(taken); 1959 1960 /* Taken: Condition satisfied; nullify on forward branches. */ 1961 n = is_n && disp >= 0; 1962 1963 next = iaqe_branchi(ctx, disp); 1964 if (n && use_nullify_skip(ctx)) { 1965 nullify_set(ctx, 0); 1966 store_psw_xb(ctx, 0); 1967 gen_goto_tb(ctx, 1, &next, NULL); 1968 } else { 1969 nullify_set(ctx, n); 1970 store_psw_xb(ctx, PSW_B); 1971 gen_goto_tb(ctx, 1, &ctx->iaq_b, &next); 1972 } 1973 1974 /* Not taken: the branch itself was nullified. */ 1975 if (ctx->null_lab) { 1976 gen_set_label(ctx->null_lab); 1977 ctx->null_lab = NULL; 1978 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 1979 } else { 1980 ctx->base.is_jmp = DISAS_NORETURN; 1981 } 1982 return true; 1983 } 1984 1985 /* 1986 * Emit an unconditional branch to an indirect target, in ctx->iaq_j. 1987 * This handles nullification of the branch itself. 1988 */ 1989 static bool do_ibranch(DisasContext *ctx, unsigned link, 1990 bool with_sr0, bool is_n) 1991 { 1992 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 1993 install_link(ctx, link, with_sr0); 1994 if (is_n) { 1995 if (use_nullify_skip(ctx)) { 1996 install_iaq_entries(ctx, &ctx->iaq_j, NULL); 1997 nullify_set(ctx, 0); 1998 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 1999 return true; 2000 } 2001 ctx->null_cond.c = TCG_COND_ALWAYS; 2002 } 2003 ctx->iaq_n = &ctx->iaq_j; 2004 ctx->psw_b_next = true; 2005 return true; 2006 } 2007 2008 nullify_over(ctx); 2009 2010 install_link(ctx, link, with_sr0); 2011 if (is_n && use_nullify_skip(ctx)) { 2012 install_iaq_entries(ctx, &ctx->iaq_j, NULL); 2013 nullify_set(ctx, 0); 2014 store_psw_xb(ctx, 0); 2015 } else { 2016 install_iaq_entries(ctx, &ctx->iaq_b, &ctx->iaq_j); 2017 nullify_set(ctx, is_n); 2018 store_psw_xb(ctx, PSW_B); 2019 } 2020 2021 tcg_gen_lookup_and_goto_ptr(); 2022 ctx->base.is_jmp = DISAS_NORETURN; 2023 return nullify_end(ctx); 2024 } 2025 2026 /* Implement 2027 * if (IAOQ_Front{30..31} < GR[b]{30..31}) 2028 * IAOQ_Next{30..31} ← GR[b]{30..31}; 2029 * else 2030 * IAOQ_Next{30..31} ← IAOQ_Front{30..31}; 2031 * which keeps the privilege level from being increased. 2032 */ 2033 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset) 2034 { 2035 TCGv_i64 dest = tcg_temp_new_i64(); 2036 switch (ctx->privilege) { 2037 case 0: 2038 /* Privilege 0 is maximum and is allowed to decrease. */ 2039 tcg_gen_mov_i64(dest, offset); 2040 break; 2041 case 3: 2042 /* Privilege 3 is minimum and is never allowed to increase. */ 2043 tcg_gen_ori_i64(dest, offset, 3); 2044 break; 2045 default: 2046 tcg_gen_andi_i64(dest, offset, -4); 2047 tcg_gen_ori_i64(dest, dest, ctx->privilege); 2048 tcg_gen_umax_i64(dest, dest, offset); 2049 break; 2050 } 2051 return dest; 2052 } 2053 2054 #ifdef CONFIG_USER_ONLY 2055 /* On Linux, page zero is normally marked execute only + gateway. 2056 Therefore normal read or write is supposed to fail, but specific 2057 offsets have kernel code mapped to raise permissions to implement 2058 system calls. Handling this via an explicit check here, rather 2059 in than the "be disp(sr2,r0)" instruction that probably sent us 2060 here, is the easiest way to handle the branch delay slot on the 2061 aforementioned BE. */ 2062 static void do_page_zero(DisasContext *ctx) 2063 { 2064 assert(ctx->iaq_f.disp == 0); 2065 2066 /* If by some means we get here with PSW[N]=1, that implies that 2067 the B,GATE instruction would be skipped, and we'd fault on the 2068 next insn within the privileged page. */ 2069 switch (ctx->null_cond.c) { 2070 case TCG_COND_NEVER: 2071 break; 2072 case TCG_COND_ALWAYS: 2073 tcg_gen_movi_i64(cpu_psw_n, 0); 2074 goto do_sigill; 2075 default: 2076 /* Since this is always the first (and only) insn within the 2077 TB, we should know the state of PSW[N] from TB->FLAGS. */ 2078 g_assert_not_reached(); 2079 } 2080 2081 /* If PSW[B] is set, the B,GATE insn would trap. */ 2082 if (ctx->psw_xb & PSW_B) { 2083 goto do_sigill; 2084 } 2085 2086 switch (ctx->base.pc_first) { 2087 case 0x00: /* Null pointer call */ 2088 gen_excp_1(EXCP_IMP); 2089 ctx->base.is_jmp = DISAS_NORETURN; 2090 break; 2091 2092 case 0xb0: /* LWS */ 2093 gen_excp_1(EXCP_SYSCALL_LWS); 2094 ctx->base.is_jmp = DISAS_NORETURN; 2095 break; 2096 2097 case 0xe0: /* SET_THREAD_POINTER */ 2098 { 2099 DisasIAQE next = { .base = tcg_temp_new_i64() }; 2100 2101 tcg_gen_st_i64(cpu_gr[26], tcg_env, 2102 offsetof(CPUHPPAState, cr[27])); 2103 tcg_gen_ori_i64(next.base, cpu_gr[31], PRIV_USER); 2104 install_iaq_entries(ctx, &next, NULL); 2105 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 2106 } 2107 break; 2108 2109 case 0x100: /* SYSCALL */ 2110 gen_excp_1(EXCP_SYSCALL); 2111 ctx->base.is_jmp = DISAS_NORETURN; 2112 break; 2113 2114 default: 2115 do_sigill: 2116 gen_excp_1(EXCP_ILL); 2117 ctx->base.is_jmp = DISAS_NORETURN; 2118 break; 2119 } 2120 } 2121 #endif 2122 2123 static bool trans_nop(DisasContext *ctx, arg_nop *a) 2124 { 2125 ctx->null_cond = cond_make_f(); 2126 return true; 2127 } 2128 2129 static bool trans_break(DisasContext *ctx, arg_break *a) 2130 { 2131 return gen_excp_iir(ctx, EXCP_BREAK); 2132 } 2133 2134 static bool trans_sync(DisasContext *ctx, arg_sync *a) 2135 { 2136 /* No point in nullifying the memory barrier. */ 2137 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); 2138 2139 ctx->null_cond = cond_make_f(); 2140 return true; 2141 } 2142 2143 static bool trans_mfia(DisasContext *ctx, arg_mfia *a) 2144 { 2145 TCGv_i64 dest = dest_gpr(ctx, a->t); 2146 2147 copy_iaoq_entry(ctx, dest, &ctx->iaq_f); 2148 tcg_gen_andi_i64(dest, dest, -4); 2149 2150 save_gpr(ctx, a->t, dest); 2151 ctx->null_cond = cond_make_f(); 2152 return true; 2153 } 2154 2155 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a) 2156 { 2157 unsigned rt = a->t; 2158 unsigned rs = a->sp; 2159 TCGv_i64 t0 = tcg_temp_new_i64(); 2160 2161 load_spr(ctx, t0, rs); 2162 tcg_gen_shri_i64(t0, t0, 32); 2163 2164 save_gpr(ctx, rt, t0); 2165 2166 ctx->null_cond = cond_make_f(); 2167 return true; 2168 } 2169 2170 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a) 2171 { 2172 unsigned rt = a->t; 2173 unsigned ctl = a->r; 2174 TCGv_i64 tmp; 2175 2176 switch (ctl) { 2177 case CR_SAR: 2178 if (a->e == 0) { 2179 /* MFSAR without ,W masks low 5 bits. */ 2180 tmp = dest_gpr(ctx, rt); 2181 tcg_gen_andi_i64(tmp, cpu_sar, 31); 2182 save_gpr(ctx, rt, tmp); 2183 goto done; 2184 } 2185 save_gpr(ctx, rt, cpu_sar); 2186 goto done; 2187 case CR_IT: /* Interval Timer */ 2188 /* FIXME: Respect PSW_S bit. */ 2189 nullify_over(ctx); 2190 tmp = dest_gpr(ctx, rt); 2191 if (translator_io_start(&ctx->base)) { 2192 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2193 } 2194 gen_helper_read_interval_timer(tmp); 2195 save_gpr(ctx, rt, tmp); 2196 return nullify_end(ctx); 2197 case 26: 2198 case 27: 2199 break; 2200 default: 2201 /* All other control registers are privileged. */ 2202 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2203 break; 2204 } 2205 2206 tmp = tcg_temp_new_i64(); 2207 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2208 save_gpr(ctx, rt, tmp); 2209 2210 done: 2211 ctx->null_cond = cond_make_f(); 2212 return true; 2213 } 2214 2215 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a) 2216 { 2217 unsigned rr = a->r; 2218 unsigned rs = a->sp; 2219 TCGv_i64 tmp; 2220 2221 if (rs >= 5) { 2222 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2223 } 2224 nullify_over(ctx); 2225 2226 tmp = tcg_temp_new_i64(); 2227 tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32); 2228 2229 if (rs >= 4) { 2230 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs])); 2231 ctx->tb_flags &= ~TB_FLAG_SR_SAME; 2232 } else { 2233 tcg_gen_mov_i64(cpu_sr[rs], tmp); 2234 } 2235 2236 return nullify_end(ctx); 2237 } 2238 2239 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) 2240 { 2241 unsigned ctl = a->t; 2242 TCGv_i64 reg; 2243 TCGv_i64 tmp; 2244 2245 if (ctl == CR_SAR) { 2246 reg = load_gpr(ctx, a->r); 2247 tmp = tcg_temp_new_i64(); 2248 tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31); 2249 save_or_nullify(ctx, cpu_sar, tmp); 2250 2251 ctx->null_cond = cond_make_f(); 2252 return true; 2253 } 2254 2255 /* All other control registers are privileged or read-only. */ 2256 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2257 2258 #ifndef CONFIG_USER_ONLY 2259 nullify_over(ctx); 2260 2261 if (ctx->is_pa20) { 2262 reg = load_gpr(ctx, a->r); 2263 } else { 2264 reg = tcg_temp_new_i64(); 2265 tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r)); 2266 } 2267 2268 switch (ctl) { 2269 case CR_IT: 2270 if (translator_io_start(&ctx->base)) { 2271 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2272 } 2273 gen_helper_write_interval_timer(tcg_env, reg); 2274 break; 2275 case CR_EIRR: 2276 /* Helper modifies interrupt lines and is therefore IO. */ 2277 translator_io_start(&ctx->base); 2278 gen_helper_write_eirr(tcg_env, reg); 2279 /* Exit to re-evaluate interrupts in the main loop. */ 2280 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2281 break; 2282 2283 case CR_IIASQ: 2284 case CR_IIAOQ: 2285 /* FIXME: Respect PSW_Q bit */ 2286 /* The write advances the queue and stores to the back element. */ 2287 tmp = tcg_temp_new_i64(); 2288 tcg_gen_ld_i64(tmp, tcg_env, 2289 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2290 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2291 tcg_gen_st_i64(reg, tcg_env, 2292 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2293 break; 2294 2295 case CR_PID1: 2296 case CR_PID2: 2297 case CR_PID3: 2298 case CR_PID4: 2299 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2300 #ifndef CONFIG_USER_ONLY 2301 gen_helper_change_prot_id(tcg_env); 2302 #endif 2303 break; 2304 2305 case CR_EIEM: 2306 /* Exit to re-evaluate interrupts in the main loop. */ 2307 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2308 /* FALLTHRU */ 2309 default: 2310 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2311 break; 2312 } 2313 return nullify_end(ctx); 2314 #endif 2315 } 2316 2317 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a) 2318 { 2319 TCGv_i64 tmp = tcg_temp_new_i64(); 2320 2321 tcg_gen_not_i64(tmp, load_gpr(ctx, a->r)); 2322 tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31); 2323 save_or_nullify(ctx, cpu_sar, tmp); 2324 2325 ctx->null_cond = cond_make_f(); 2326 return true; 2327 } 2328 2329 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a) 2330 { 2331 TCGv_i64 dest = dest_gpr(ctx, a->t); 2332 2333 #ifdef CONFIG_USER_ONLY 2334 /* We don't implement space registers in user mode. */ 2335 tcg_gen_movi_i64(dest, 0); 2336 #else 2337 tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b))); 2338 tcg_gen_shri_i64(dest, dest, 32); 2339 #endif 2340 save_gpr(ctx, a->t, dest); 2341 2342 ctx->null_cond = cond_make_f(); 2343 return true; 2344 } 2345 2346 static bool trans_rsm(DisasContext *ctx, arg_rsm *a) 2347 { 2348 #ifdef CONFIG_USER_ONLY 2349 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2350 #else 2351 TCGv_i64 tmp; 2352 2353 /* HP-UX 11i and HP ODE use rsm for read-access to PSW */ 2354 if (a->i) { 2355 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2356 } 2357 2358 nullify_over(ctx); 2359 2360 tmp = tcg_temp_new_i64(); 2361 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw)); 2362 tcg_gen_andi_i64(tmp, tmp, ~a->i); 2363 gen_helper_swap_system_mask(tmp, tcg_env, tmp); 2364 save_gpr(ctx, a->t, tmp); 2365 2366 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */ 2367 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2368 return nullify_end(ctx); 2369 #endif 2370 } 2371 2372 static bool trans_ssm(DisasContext *ctx, arg_ssm *a) 2373 { 2374 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2375 #ifndef CONFIG_USER_ONLY 2376 TCGv_i64 tmp; 2377 2378 nullify_over(ctx); 2379 2380 tmp = tcg_temp_new_i64(); 2381 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw)); 2382 tcg_gen_ori_i64(tmp, tmp, a->i); 2383 gen_helper_swap_system_mask(tmp, tcg_env, tmp); 2384 save_gpr(ctx, a->t, tmp); 2385 2386 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */ 2387 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2388 return nullify_end(ctx); 2389 #endif 2390 } 2391 2392 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a) 2393 { 2394 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2395 #ifndef CONFIG_USER_ONLY 2396 TCGv_i64 tmp, reg; 2397 nullify_over(ctx); 2398 2399 reg = load_gpr(ctx, a->r); 2400 tmp = tcg_temp_new_i64(); 2401 gen_helper_swap_system_mask(tmp, tcg_env, reg); 2402 2403 /* Exit the TB to recognize new interrupts. */ 2404 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2405 return nullify_end(ctx); 2406 #endif 2407 } 2408 2409 static bool do_rfi(DisasContext *ctx, bool rfi_r) 2410 { 2411 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2412 #ifndef CONFIG_USER_ONLY 2413 nullify_over(ctx); 2414 2415 if (rfi_r) { 2416 gen_helper_rfi_r(tcg_env); 2417 } else { 2418 gen_helper_rfi(tcg_env); 2419 } 2420 /* Exit the TB to recognize new interrupts. */ 2421 tcg_gen_exit_tb(NULL, 0); 2422 ctx->base.is_jmp = DISAS_NORETURN; 2423 2424 return nullify_end(ctx); 2425 #endif 2426 } 2427 2428 static bool trans_rfi(DisasContext *ctx, arg_rfi *a) 2429 { 2430 return do_rfi(ctx, false); 2431 } 2432 2433 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a) 2434 { 2435 return do_rfi(ctx, true); 2436 } 2437 2438 static bool trans_halt(DisasContext *ctx, arg_halt *a) 2439 { 2440 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2441 #ifndef CONFIG_USER_ONLY 2442 set_psw_xb(ctx, 0); 2443 nullify_over(ctx); 2444 gen_helper_halt(tcg_env); 2445 ctx->base.is_jmp = DISAS_NORETURN; 2446 return nullify_end(ctx); 2447 #endif 2448 } 2449 2450 static bool trans_reset(DisasContext *ctx, arg_reset *a) 2451 { 2452 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2453 #ifndef CONFIG_USER_ONLY 2454 set_psw_xb(ctx, 0); 2455 nullify_over(ctx); 2456 gen_helper_reset(tcg_env); 2457 ctx->base.is_jmp = DISAS_NORETURN; 2458 return nullify_end(ctx); 2459 #endif 2460 } 2461 2462 static bool do_getshadowregs(DisasContext *ctx) 2463 { 2464 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2465 nullify_over(ctx); 2466 tcg_gen_ld_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0])); 2467 tcg_gen_ld_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1])); 2468 tcg_gen_ld_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2])); 2469 tcg_gen_ld_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3])); 2470 tcg_gen_ld_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4])); 2471 tcg_gen_ld_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5])); 2472 tcg_gen_ld_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6])); 2473 return nullify_end(ctx); 2474 } 2475 2476 static bool do_putshadowregs(DisasContext *ctx) 2477 { 2478 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2479 nullify_over(ctx); 2480 tcg_gen_st_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0])); 2481 tcg_gen_st_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1])); 2482 tcg_gen_st_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2])); 2483 tcg_gen_st_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3])); 2484 tcg_gen_st_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4])); 2485 tcg_gen_st_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5])); 2486 tcg_gen_st_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6])); 2487 return nullify_end(ctx); 2488 } 2489 2490 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a) 2491 { 2492 return do_getshadowregs(ctx); 2493 } 2494 2495 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) 2496 { 2497 if (a->m) { 2498 TCGv_i64 dest = dest_gpr(ctx, a->b); 2499 TCGv_i64 src1 = load_gpr(ctx, a->b); 2500 TCGv_i64 src2 = load_gpr(ctx, a->x); 2501 2502 /* The only thing we need to do is the base register modification. */ 2503 tcg_gen_add_i64(dest, src1, src2); 2504 save_gpr(ctx, a->b, dest); 2505 } 2506 ctx->null_cond = cond_make_f(); 2507 return true; 2508 } 2509 2510 static bool trans_fic(DisasContext *ctx, arg_ldst *a) 2511 { 2512 /* End TB for flush instruction cache, so we pick up new insns. */ 2513 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2514 return trans_nop_addrx(ctx, a); 2515 } 2516 2517 static bool trans_probe(DisasContext *ctx, arg_probe *a) 2518 { 2519 TCGv_i64 dest, ofs; 2520 TCGv_i32 level, want; 2521 TCGv_i64 addr; 2522 2523 nullify_over(ctx); 2524 2525 dest = dest_gpr(ctx, a->t); 2526 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2527 2528 if (a->imm) { 2529 level = tcg_constant_i32(a->ri & 3); 2530 } else { 2531 level = tcg_temp_new_i32(); 2532 tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri)); 2533 tcg_gen_andi_i32(level, level, 3); 2534 } 2535 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ); 2536 2537 gen_helper_probe(dest, tcg_env, addr, level, want); 2538 2539 save_gpr(ctx, a->t, dest); 2540 return nullify_end(ctx); 2541 } 2542 2543 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a) 2544 { 2545 if (ctx->is_pa20) { 2546 return false; 2547 } 2548 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2549 #ifndef CONFIG_USER_ONLY 2550 TCGv_i64 addr; 2551 TCGv_i64 ofs, reg; 2552 2553 nullify_over(ctx); 2554 2555 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2556 reg = load_gpr(ctx, a->r); 2557 if (a->addr) { 2558 gen_helper_itlba_pa11(tcg_env, addr, reg); 2559 } else { 2560 gen_helper_itlbp_pa11(tcg_env, addr, reg); 2561 } 2562 2563 /* Exit TB for TLB change if mmu is enabled. */ 2564 if (ctx->tb_flags & PSW_C) { 2565 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2566 } 2567 return nullify_end(ctx); 2568 #endif 2569 } 2570 2571 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local) 2572 { 2573 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2574 #ifndef CONFIG_USER_ONLY 2575 TCGv_i64 addr; 2576 TCGv_i64 ofs; 2577 2578 nullify_over(ctx); 2579 2580 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2581 2582 /* 2583 * Page align now, rather than later, so that we can add in the 2584 * page_size field from pa2.0 from the low 4 bits of GR[b]. 2585 */ 2586 tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK); 2587 if (ctx->is_pa20) { 2588 tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4); 2589 } 2590 2591 if (local) { 2592 gen_helper_ptlb_l(tcg_env, addr); 2593 } else { 2594 gen_helper_ptlb(tcg_env, addr); 2595 } 2596 2597 if (a->m) { 2598 save_gpr(ctx, a->b, ofs); 2599 } 2600 2601 /* Exit TB for TLB change if mmu is enabled. */ 2602 if (ctx->tb_flags & PSW_C) { 2603 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2604 } 2605 return nullify_end(ctx); 2606 #endif 2607 } 2608 2609 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a) 2610 { 2611 return do_pxtlb(ctx, a, false); 2612 } 2613 2614 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a) 2615 { 2616 return ctx->is_pa20 && do_pxtlb(ctx, a, true); 2617 } 2618 2619 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a) 2620 { 2621 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2622 #ifndef CONFIG_USER_ONLY 2623 nullify_over(ctx); 2624 2625 trans_nop_addrx(ctx, a); 2626 gen_helper_ptlbe(tcg_env); 2627 2628 /* Exit TB for TLB change if mmu is enabled. */ 2629 if (ctx->tb_flags & PSW_C) { 2630 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2631 } 2632 return nullify_end(ctx); 2633 #endif 2634 } 2635 2636 /* 2637 * Implement the pcxl and pcxl2 Fast TLB Insert instructions. 2638 * See 2639 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf 2640 * page 13-9 (195/206) 2641 */ 2642 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) 2643 { 2644 if (ctx->is_pa20) { 2645 return false; 2646 } 2647 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2648 #ifndef CONFIG_USER_ONLY 2649 TCGv_i64 addr, atl, stl; 2650 TCGv_i64 reg; 2651 2652 nullify_over(ctx); 2653 2654 /* 2655 * FIXME: 2656 * if (not (pcxl or pcxl2)) 2657 * return gen_illegal(ctx); 2658 */ 2659 2660 atl = tcg_temp_new_i64(); 2661 stl = tcg_temp_new_i64(); 2662 addr = tcg_temp_new_i64(); 2663 2664 tcg_gen_ld32u_i64(stl, tcg_env, 2665 a->data ? offsetof(CPUHPPAState, cr[CR_ISR]) 2666 : offsetof(CPUHPPAState, cr[CR_IIASQ])); 2667 tcg_gen_ld32u_i64(atl, tcg_env, 2668 a->data ? offsetof(CPUHPPAState, cr[CR_IOR]) 2669 : offsetof(CPUHPPAState, cr[CR_IIAOQ])); 2670 tcg_gen_shli_i64(stl, stl, 32); 2671 tcg_gen_or_i64(addr, atl, stl); 2672 2673 reg = load_gpr(ctx, a->r); 2674 if (a->addr) { 2675 gen_helper_itlba_pa11(tcg_env, addr, reg); 2676 } else { 2677 gen_helper_itlbp_pa11(tcg_env, addr, reg); 2678 } 2679 2680 /* Exit TB for TLB change if mmu is enabled. */ 2681 if (ctx->tb_flags & PSW_C) { 2682 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2683 } 2684 return nullify_end(ctx); 2685 #endif 2686 } 2687 2688 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a) 2689 { 2690 if (!ctx->is_pa20) { 2691 return false; 2692 } 2693 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2694 #ifndef CONFIG_USER_ONLY 2695 nullify_over(ctx); 2696 { 2697 TCGv_i64 src1 = load_gpr(ctx, a->r1); 2698 TCGv_i64 src2 = load_gpr(ctx, a->r2); 2699 2700 if (a->data) { 2701 gen_helper_idtlbt_pa20(tcg_env, src1, src2); 2702 } else { 2703 gen_helper_iitlbt_pa20(tcg_env, src1, src2); 2704 } 2705 } 2706 /* Exit TB for TLB change if mmu is enabled. */ 2707 if (ctx->tb_flags & PSW_C) { 2708 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2709 } 2710 return nullify_end(ctx); 2711 #endif 2712 } 2713 2714 static bool trans_lpa(DisasContext *ctx, arg_ldst *a) 2715 { 2716 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2717 #ifndef CONFIG_USER_ONLY 2718 TCGv_i64 vaddr; 2719 TCGv_i64 ofs, paddr; 2720 2721 nullify_over(ctx); 2722 2723 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2724 2725 paddr = tcg_temp_new_i64(); 2726 gen_helper_lpa(paddr, tcg_env, vaddr); 2727 2728 /* Note that physical address result overrides base modification. */ 2729 if (a->m) { 2730 save_gpr(ctx, a->b, ofs); 2731 } 2732 save_gpr(ctx, a->t, paddr); 2733 2734 return nullify_end(ctx); 2735 #endif 2736 } 2737 2738 static bool trans_lci(DisasContext *ctx, arg_lci *a) 2739 { 2740 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2741 2742 /* The Coherence Index is an implementation-defined function of the 2743 physical address. Two addresses with the same CI have a coherent 2744 view of the cache. Our implementation is to return 0 for all, 2745 since the entire address space is coherent. */ 2746 save_gpr(ctx, a->t, ctx->zero); 2747 2748 ctx->null_cond = cond_make_f(); 2749 return true; 2750 } 2751 2752 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2753 { 2754 return do_add_reg(ctx, a, false, false, false, false); 2755 } 2756 2757 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2758 { 2759 return do_add_reg(ctx, a, true, false, false, false); 2760 } 2761 2762 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2763 { 2764 return do_add_reg(ctx, a, false, true, false, false); 2765 } 2766 2767 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2768 { 2769 return do_add_reg(ctx, a, false, false, false, true); 2770 } 2771 2772 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2773 { 2774 return do_add_reg(ctx, a, false, true, false, true); 2775 } 2776 2777 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a) 2778 { 2779 return do_sub_reg(ctx, a, false, false, false); 2780 } 2781 2782 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a) 2783 { 2784 return do_sub_reg(ctx, a, true, false, false); 2785 } 2786 2787 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2788 { 2789 return do_sub_reg(ctx, a, false, false, true); 2790 } 2791 2792 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2793 { 2794 return do_sub_reg(ctx, a, true, false, true); 2795 } 2796 2797 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a) 2798 { 2799 return do_sub_reg(ctx, a, false, true, false); 2800 } 2801 2802 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a) 2803 { 2804 return do_sub_reg(ctx, a, true, true, false); 2805 } 2806 2807 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a) 2808 { 2809 return do_log_reg(ctx, a, tcg_gen_andc_i64); 2810 } 2811 2812 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a) 2813 { 2814 return do_log_reg(ctx, a, tcg_gen_and_i64); 2815 } 2816 2817 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a) 2818 { 2819 if (a->cf == 0) { 2820 unsigned r2 = a->r2; 2821 unsigned r1 = a->r1; 2822 unsigned rt = a->t; 2823 2824 if (rt == 0) { /* NOP */ 2825 ctx->null_cond = cond_make_f(); 2826 return true; 2827 } 2828 if (r2 == 0) { /* COPY */ 2829 if (r1 == 0) { 2830 TCGv_i64 dest = dest_gpr(ctx, rt); 2831 tcg_gen_movi_i64(dest, 0); 2832 save_gpr(ctx, rt, dest); 2833 } else { 2834 save_gpr(ctx, rt, cpu_gr[r1]); 2835 } 2836 ctx->null_cond = cond_make_f(); 2837 return true; 2838 } 2839 #ifndef CONFIG_USER_ONLY 2840 /* These are QEMU extensions and are nops in the real architecture: 2841 * 2842 * or %r10,%r10,%r10 -- idle loop; wait for interrupt 2843 * or %r31,%r31,%r31 -- death loop; offline cpu 2844 * currently implemented as idle. 2845 */ 2846 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */ 2847 /* No need to check for supervisor, as userland can only pause 2848 until the next timer interrupt. */ 2849 2850 set_psw_xb(ctx, 0); 2851 2852 nullify_over(ctx); 2853 2854 /* Advance the instruction queue. */ 2855 install_iaq_entries(ctx, &ctx->iaq_b, NULL); 2856 nullify_set(ctx, 0); 2857 2858 /* Tell the qemu main loop to halt until this cpu has work. */ 2859 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, 2860 offsetof(CPUState, halted) - offsetof(HPPACPU, env)); 2861 gen_excp_1(EXCP_HALTED); 2862 ctx->base.is_jmp = DISAS_NORETURN; 2863 2864 return nullify_end(ctx); 2865 } 2866 #endif 2867 } 2868 return do_log_reg(ctx, a, tcg_gen_or_i64); 2869 } 2870 2871 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a) 2872 { 2873 return do_log_reg(ctx, a, tcg_gen_xor_i64); 2874 } 2875 2876 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a) 2877 { 2878 TCGv_i64 tcg_r1, tcg_r2; 2879 2880 if (a->cf) { 2881 nullify_over(ctx); 2882 } 2883 tcg_r1 = load_gpr(ctx, a->r1); 2884 tcg_r2 = load_gpr(ctx, a->r2); 2885 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d); 2886 return nullify_end(ctx); 2887 } 2888 2889 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a) 2890 { 2891 TCGv_i64 tcg_r1, tcg_r2, dest; 2892 2893 if (a->cf) { 2894 nullify_over(ctx); 2895 } 2896 2897 tcg_r1 = load_gpr(ctx, a->r1); 2898 tcg_r2 = load_gpr(ctx, a->r2); 2899 dest = dest_gpr(ctx, a->t); 2900 2901 tcg_gen_xor_i64(dest, tcg_r1, tcg_r2); 2902 save_gpr(ctx, a->t, dest); 2903 2904 ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest); 2905 return nullify_end(ctx); 2906 } 2907 2908 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc) 2909 { 2910 TCGv_i64 tcg_r1, tcg_r2, tmp; 2911 2912 if (a->cf == 0) { 2913 tcg_r2 = load_gpr(ctx, a->r2); 2914 tmp = dest_gpr(ctx, a->t); 2915 2916 if (a->r1 == 0) { 2917 /* UADDCM r0,src,dst is the common idiom for dst = ~src. */ 2918 tcg_gen_not_i64(tmp, tcg_r2); 2919 } else { 2920 /* 2921 * Recall that r1 - r2 == r1 + ~r2 + 1. 2922 * Thus r1 + ~r2 == r1 - r2 - 1, 2923 * which does not require an extra temporary. 2924 */ 2925 tcg_r1 = load_gpr(ctx, a->r1); 2926 tcg_gen_sub_i64(tmp, tcg_r1, tcg_r2); 2927 tcg_gen_subi_i64(tmp, tmp, 1); 2928 } 2929 save_gpr(ctx, a->t, tmp); 2930 ctx->null_cond = cond_make_f(); 2931 return true; 2932 } 2933 2934 nullify_over(ctx); 2935 tcg_r1 = load_gpr(ctx, a->r1); 2936 tcg_r2 = load_gpr(ctx, a->r2); 2937 tmp = tcg_temp_new_i64(); 2938 tcg_gen_not_i64(tmp, tcg_r2); 2939 do_unit_addsub(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, true); 2940 return nullify_end(ctx); 2941 } 2942 2943 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a) 2944 { 2945 return do_uaddcm(ctx, a, false); 2946 } 2947 2948 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2949 { 2950 return do_uaddcm(ctx, a, true); 2951 } 2952 2953 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i) 2954 { 2955 TCGv_i64 tmp; 2956 2957 nullify_over(ctx); 2958 2959 tmp = tcg_temp_new_i64(); 2960 tcg_gen_extract2_i64(tmp, cpu_psw_cb, cpu_psw_cb_msb, 4); 2961 if (!is_i) { 2962 tcg_gen_not_i64(tmp, tmp); 2963 } 2964 tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull); 2965 tcg_gen_muli_i64(tmp, tmp, 6); 2966 do_unit_addsub(ctx, a->t, load_gpr(ctx, a->r), tmp, 2967 a->cf, a->d, false, is_i); 2968 return nullify_end(ctx); 2969 } 2970 2971 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a) 2972 { 2973 return do_dcor(ctx, a, false); 2974 } 2975 2976 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a) 2977 { 2978 return do_dcor(ctx, a, true); 2979 } 2980 2981 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) 2982 { 2983 TCGv_i64 dest, add1, add2, addc, in1, in2; 2984 2985 nullify_over(ctx); 2986 2987 in1 = load_gpr(ctx, a->r1); 2988 in2 = load_gpr(ctx, a->r2); 2989 2990 add1 = tcg_temp_new_i64(); 2991 add2 = tcg_temp_new_i64(); 2992 addc = tcg_temp_new_i64(); 2993 dest = tcg_temp_new_i64(); 2994 2995 /* Form R1 << 1 | PSW[CB]{8}. */ 2996 tcg_gen_add_i64(add1, in1, in1); 2997 tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false)); 2998 2999 /* 3000 * Add or subtract R2, depending on PSW[V]. Proper computation of 3001 * carry requires that we subtract via + ~R2 + 1, as described in 3002 * the manual. By extracting and masking V, we can produce the 3003 * proper inputs to the addition without movcond. 3004 */ 3005 tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1); 3006 tcg_gen_xor_i64(add2, in2, addc); 3007 tcg_gen_andi_i64(addc, addc, 1); 3008 3009 tcg_gen_addcio_i64(dest, cpu_psw_cb_msb, add1, add2, addc); 3010 3011 /* Write back the result register. */ 3012 save_gpr(ctx, a->t, dest); 3013 3014 /* Write back PSW[CB]. */ 3015 tcg_gen_xor_i64(cpu_psw_cb, add1, add2); 3016 tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest); 3017 3018 /* 3019 * Write back PSW[V] for the division step. 3020 * Shift cb{8} from where it lives in bit 32 to bit 31, 3021 * so that it overlaps r2{32} in bit 31. 3022 */ 3023 tcg_gen_shri_i64(cpu_psw_v, cpu_psw_cb, 1); 3024 tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2); 3025 3026 /* Install the new nullification. */ 3027 if (a->cf) { 3028 TCGv_i64 sv = NULL, uv = NULL; 3029 if (cond_need_sv(a->cf >> 1)) { 3030 sv = do_add_sv(ctx, dest, add1, add2, in1, 1, false); 3031 } else if (cond_need_cb(a->cf >> 1)) { 3032 uv = do_add_uv(ctx, cpu_psw_cb, NULL, in1, 1, false); 3033 } 3034 ctx->null_cond = do_cond(ctx, a->cf, false, dest, uv, sv); 3035 } 3036 3037 return nullify_end(ctx); 3038 } 3039 3040 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a) 3041 { 3042 return do_add_imm(ctx, a, false, false); 3043 } 3044 3045 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a) 3046 { 3047 return do_add_imm(ctx, a, true, false); 3048 } 3049 3050 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a) 3051 { 3052 return do_add_imm(ctx, a, false, true); 3053 } 3054 3055 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a) 3056 { 3057 return do_add_imm(ctx, a, true, true); 3058 } 3059 3060 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a) 3061 { 3062 return do_sub_imm(ctx, a, false); 3063 } 3064 3065 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a) 3066 { 3067 return do_sub_imm(ctx, a, true); 3068 } 3069 3070 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a) 3071 { 3072 TCGv_i64 tcg_im, tcg_r2; 3073 3074 if (a->cf) { 3075 nullify_over(ctx); 3076 } 3077 3078 tcg_im = tcg_constant_i64(a->i); 3079 tcg_r2 = load_gpr(ctx, a->r); 3080 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d); 3081 3082 return nullify_end(ctx); 3083 } 3084 3085 static bool do_multimedia(DisasContext *ctx, arg_rrr *a, 3086 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 3087 { 3088 TCGv_i64 r1, r2, dest; 3089 3090 if (!ctx->is_pa20) { 3091 return false; 3092 } 3093 3094 nullify_over(ctx); 3095 3096 r1 = load_gpr(ctx, a->r1); 3097 r2 = load_gpr(ctx, a->r2); 3098 dest = dest_gpr(ctx, a->t); 3099 3100 fn(dest, r1, r2); 3101 save_gpr(ctx, a->t, dest); 3102 3103 return nullify_end(ctx); 3104 } 3105 3106 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a, 3107 void (*fn)(TCGv_i64, TCGv_i64, int64_t)) 3108 { 3109 TCGv_i64 r, dest; 3110 3111 if (!ctx->is_pa20) { 3112 return false; 3113 } 3114 3115 nullify_over(ctx); 3116 3117 r = load_gpr(ctx, a->r); 3118 dest = dest_gpr(ctx, a->t); 3119 3120 fn(dest, r, a->i); 3121 save_gpr(ctx, a->t, dest); 3122 3123 return nullify_end(ctx); 3124 } 3125 3126 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a, 3127 void (*fn)(TCGv_i64, TCGv_i64, 3128 TCGv_i64, TCGv_i32)) 3129 { 3130 TCGv_i64 r1, r2, dest; 3131 3132 if (!ctx->is_pa20) { 3133 return false; 3134 } 3135 3136 nullify_over(ctx); 3137 3138 r1 = load_gpr(ctx, a->r1); 3139 r2 = load_gpr(ctx, a->r2); 3140 dest = dest_gpr(ctx, a->t); 3141 3142 fn(dest, r1, r2, tcg_constant_i32(a->sh)); 3143 save_gpr(ctx, a->t, dest); 3144 3145 return nullify_end(ctx); 3146 } 3147 3148 static bool trans_hadd(DisasContext *ctx, arg_rrr *a) 3149 { 3150 return do_multimedia(ctx, a, tcg_gen_vec_add16_i64); 3151 } 3152 3153 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a) 3154 { 3155 return do_multimedia(ctx, a, gen_helper_hadd_ss); 3156 } 3157 3158 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a) 3159 { 3160 return do_multimedia(ctx, a, gen_helper_hadd_us); 3161 } 3162 3163 static bool trans_havg(DisasContext *ctx, arg_rrr *a) 3164 { 3165 return do_multimedia(ctx, a, gen_helper_havg); 3166 } 3167 3168 static bool trans_hshl(DisasContext *ctx, arg_rri *a) 3169 { 3170 return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64); 3171 } 3172 3173 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a) 3174 { 3175 return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64); 3176 } 3177 3178 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a) 3179 { 3180 return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64); 3181 } 3182 3183 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a) 3184 { 3185 return do_multimedia_shadd(ctx, a, gen_helper_hshladd); 3186 } 3187 3188 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a) 3189 { 3190 return do_multimedia_shadd(ctx, a, gen_helper_hshradd); 3191 } 3192 3193 static bool trans_hsub(DisasContext *ctx, arg_rrr *a) 3194 { 3195 return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64); 3196 } 3197 3198 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a) 3199 { 3200 return do_multimedia(ctx, a, gen_helper_hsub_ss); 3201 } 3202 3203 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a) 3204 { 3205 return do_multimedia(ctx, a, gen_helper_hsub_us); 3206 } 3207 3208 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 3209 { 3210 uint64_t mask = 0xffff0000ffff0000ull; 3211 TCGv_i64 tmp = tcg_temp_new_i64(); 3212 3213 tcg_gen_andi_i64(tmp, r2, mask); 3214 tcg_gen_andi_i64(dst, r1, mask); 3215 tcg_gen_shri_i64(tmp, tmp, 16); 3216 tcg_gen_or_i64(dst, dst, tmp); 3217 } 3218 3219 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a) 3220 { 3221 return do_multimedia(ctx, a, gen_mixh_l); 3222 } 3223 3224 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 3225 { 3226 uint64_t mask = 0x0000ffff0000ffffull; 3227 TCGv_i64 tmp = tcg_temp_new_i64(); 3228 3229 tcg_gen_andi_i64(tmp, r1, mask); 3230 tcg_gen_andi_i64(dst, r2, mask); 3231 tcg_gen_shli_i64(tmp, tmp, 16); 3232 tcg_gen_or_i64(dst, dst, tmp); 3233 } 3234 3235 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a) 3236 { 3237 return do_multimedia(ctx, a, gen_mixh_r); 3238 } 3239 3240 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 3241 { 3242 TCGv_i64 tmp = tcg_temp_new_i64(); 3243 3244 tcg_gen_shri_i64(tmp, r2, 32); 3245 tcg_gen_deposit_i64(dst, r1, tmp, 0, 32); 3246 } 3247 3248 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a) 3249 { 3250 return do_multimedia(ctx, a, gen_mixw_l); 3251 } 3252 3253 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2) 3254 { 3255 tcg_gen_deposit_i64(dst, r2, r1, 32, 32); 3256 } 3257 3258 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a) 3259 { 3260 return do_multimedia(ctx, a, gen_mixw_r); 3261 } 3262 3263 static bool trans_permh(DisasContext *ctx, arg_permh *a) 3264 { 3265 TCGv_i64 r, t0, t1, t2, t3; 3266 3267 if (!ctx->is_pa20) { 3268 return false; 3269 } 3270 3271 nullify_over(ctx); 3272 3273 r = load_gpr(ctx, a->r1); 3274 t0 = tcg_temp_new_i64(); 3275 t1 = tcg_temp_new_i64(); 3276 t2 = tcg_temp_new_i64(); 3277 t3 = tcg_temp_new_i64(); 3278 3279 tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16); 3280 tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16); 3281 tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16); 3282 tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16); 3283 3284 tcg_gen_deposit_i64(t0, t1, t0, 16, 48); 3285 tcg_gen_deposit_i64(t2, t3, t2, 16, 48); 3286 tcg_gen_deposit_i64(t0, t2, t0, 32, 32); 3287 3288 save_gpr(ctx, a->t, t0); 3289 return nullify_end(ctx); 3290 } 3291 3292 static bool trans_ld(DisasContext *ctx, arg_ldst *a) 3293 { 3294 if (ctx->is_pa20) { 3295 /* 3296 * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches. 3297 * Any base modification still occurs. 3298 */ 3299 if (a->t == 0) { 3300 return trans_nop_addrx(ctx, a); 3301 } 3302 } else if (a->size > MO_32) { 3303 return gen_illegal(ctx); 3304 } 3305 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0, 3306 a->disp, a->sp, a->m, a->size | MO_TE); 3307 } 3308 3309 static bool trans_st(DisasContext *ctx, arg_ldst *a) 3310 { 3311 assert(a->x == 0 && a->scale == 0); 3312 if (!ctx->is_pa20 && a->size > MO_32) { 3313 return gen_illegal(ctx); 3314 } 3315 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE); 3316 } 3317 3318 static bool trans_ldc(DisasContext *ctx, arg_ldst *a) 3319 { 3320 MemOp mop = MO_TE | MO_ALIGN | a->size; 3321 TCGv_i64 dest, ofs; 3322 TCGv_i64 addr; 3323 3324 if (!ctx->is_pa20 && a->size > MO_32) { 3325 return gen_illegal(ctx); 3326 } 3327 3328 nullify_over(ctx); 3329 3330 if (a->m) { 3331 /* Base register modification. Make sure if RT == RB, 3332 we see the result of the load. */ 3333 dest = tcg_temp_new_i64(); 3334 } else { 3335 dest = dest_gpr(ctx, a->t); 3336 } 3337 3338 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? 3 : 0, 3339 a->disp, a->sp, a->m, MMU_DISABLED(ctx)); 3340 3341 /* 3342 * For hppa1.1, LDCW is undefined unless aligned mod 16. 3343 * However actual hardware succeeds with aligned mod 4. 3344 * Detect this case and log a GUEST_ERROR. 3345 * 3346 * TODO: HPPA64 relaxes the over-alignment requirement 3347 * with the ,co completer. 3348 */ 3349 gen_helper_ldc_check(addr); 3350 3351 tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop); 3352 3353 if (a->m) { 3354 save_gpr(ctx, a->b, ofs); 3355 } 3356 save_gpr(ctx, a->t, dest); 3357 3358 return nullify_end(ctx); 3359 } 3360 3361 static bool trans_stby(DisasContext *ctx, arg_stby *a) 3362 { 3363 TCGv_i64 ofs, val; 3364 TCGv_i64 addr; 3365 3366 nullify_over(ctx); 3367 3368 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 3369 MMU_DISABLED(ctx)); 3370 val = load_gpr(ctx, a->r); 3371 if (a->a) { 3372 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3373 gen_helper_stby_e_parallel(tcg_env, addr, val); 3374 } else { 3375 gen_helper_stby_e(tcg_env, addr, val); 3376 } 3377 } else { 3378 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3379 gen_helper_stby_b_parallel(tcg_env, addr, val); 3380 } else { 3381 gen_helper_stby_b(tcg_env, addr, val); 3382 } 3383 } 3384 if (a->m) { 3385 tcg_gen_andi_i64(ofs, ofs, ~3); 3386 save_gpr(ctx, a->b, ofs); 3387 } 3388 3389 return nullify_end(ctx); 3390 } 3391 3392 static bool trans_stdby(DisasContext *ctx, arg_stby *a) 3393 { 3394 TCGv_i64 ofs, val; 3395 TCGv_i64 addr; 3396 3397 if (!ctx->is_pa20) { 3398 return false; 3399 } 3400 nullify_over(ctx); 3401 3402 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 3403 MMU_DISABLED(ctx)); 3404 val = load_gpr(ctx, a->r); 3405 if (a->a) { 3406 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3407 gen_helper_stdby_e_parallel(tcg_env, addr, val); 3408 } else { 3409 gen_helper_stdby_e(tcg_env, addr, val); 3410 } 3411 } else { 3412 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3413 gen_helper_stdby_b_parallel(tcg_env, addr, val); 3414 } else { 3415 gen_helper_stdby_b(tcg_env, addr, val); 3416 } 3417 } 3418 if (a->m) { 3419 tcg_gen_andi_i64(ofs, ofs, ~7); 3420 save_gpr(ctx, a->b, ofs); 3421 } 3422 3423 return nullify_end(ctx); 3424 } 3425 3426 static bool trans_lda(DisasContext *ctx, arg_ldst *a) 3427 { 3428 int hold_mmu_idx = ctx->mmu_idx; 3429 3430 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 3431 ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX; 3432 trans_ld(ctx, a); 3433 ctx->mmu_idx = hold_mmu_idx; 3434 return true; 3435 } 3436 3437 static bool trans_sta(DisasContext *ctx, arg_ldst *a) 3438 { 3439 int hold_mmu_idx = ctx->mmu_idx; 3440 3441 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 3442 ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX; 3443 trans_st(ctx, a); 3444 ctx->mmu_idx = hold_mmu_idx; 3445 return true; 3446 } 3447 3448 static bool trans_ldil(DisasContext *ctx, arg_ldil *a) 3449 { 3450 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t); 3451 3452 tcg_gen_movi_i64(tcg_rt, a->i); 3453 save_gpr(ctx, a->t, tcg_rt); 3454 ctx->null_cond = cond_make_f(); 3455 return true; 3456 } 3457 3458 static bool trans_addil(DisasContext *ctx, arg_addil *a) 3459 { 3460 TCGv_i64 tcg_rt = load_gpr(ctx, a->r); 3461 TCGv_i64 tcg_r1 = dest_gpr(ctx, 1); 3462 3463 tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i); 3464 save_gpr(ctx, 1, tcg_r1); 3465 ctx->null_cond = cond_make_f(); 3466 return true; 3467 } 3468 3469 static bool trans_ldo(DisasContext *ctx, arg_ldo *a) 3470 { 3471 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t); 3472 3473 /* Special case rb == 0, for the LDI pseudo-op. 3474 The COPY pseudo-op is handled for free within tcg_gen_addi_i64. */ 3475 if (a->b == 0) { 3476 tcg_gen_movi_i64(tcg_rt, a->i); 3477 } else { 3478 tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i); 3479 } 3480 save_gpr(ctx, a->t, tcg_rt); 3481 ctx->null_cond = cond_make_f(); 3482 return true; 3483 } 3484 3485 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1, 3486 unsigned c, unsigned f, bool d, unsigned n, int disp) 3487 { 3488 TCGv_i64 dest, in2, sv; 3489 DisasCond cond; 3490 3491 in2 = load_gpr(ctx, r); 3492 dest = tcg_temp_new_i64(); 3493 3494 tcg_gen_sub_i64(dest, in1, in2); 3495 3496 sv = NULL; 3497 if (cond_need_sv(c)) { 3498 sv = do_sub_sv(ctx, dest, in1, in2); 3499 } 3500 3501 cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv); 3502 return do_cbranch(ctx, disp, n, &cond); 3503 } 3504 3505 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a) 3506 { 3507 if (!ctx->is_pa20 && a->d) { 3508 return false; 3509 } 3510 nullify_over(ctx); 3511 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), 3512 a->c, a->f, a->d, a->n, a->disp); 3513 } 3514 3515 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a) 3516 { 3517 if (!ctx->is_pa20 && a->d) { 3518 return false; 3519 } 3520 nullify_over(ctx); 3521 return do_cmpb(ctx, a->r, tcg_constant_i64(a->i), 3522 a->c, a->f, a->d, a->n, a->disp); 3523 } 3524 3525 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1, 3526 unsigned c, unsigned f, unsigned n, int disp) 3527 { 3528 TCGv_i64 dest, in2, sv, cb_cond; 3529 DisasCond cond; 3530 bool d = false; 3531 3532 /* 3533 * For hppa64, the ADDB conditions change with PSW.W, 3534 * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE. 3535 */ 3536 if (ctx->tb_flags & PSW_W) { 3537 d = c >= 5; 3538 if (d) { 3539 c &= 3; 3540 } 3541 } 3542 3543 in2 = load_gpr(ctx, r); 3544 dest = tcg_temp_new_i64(); 3545 sv = NULL; 3546 cb_cond = NULL; 3547 3548 if (cond_need_cb(c)) { 3549 TCGv_i64 cb = tcg_temp_new_i64(); 3550 TCGv_i64 cb_msb = tcg_temp_new_i64(); 3551 3552 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero); 3553 tcg_gen_xor_i64(cb, in1, in2); 3554 tcg_gen_xor_i64(cb, cb, dest); 3555 cb_cond = get_carry(ctx, d, cb, cb_msb); 3556 } else { 3557 tcg_gen_add_i64(dest, in1, in2); 3558 } 3559 if (cond_need_sv(c)) { 3560 sv = do_add_sv(ctx, dest, in1, in2, in1, 0, d); 3561 } 3562 3563 cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv); 3564 save_gpr(ctx, r, dest); 3565 return do_cbranch(ctx, disp, n, &cond); 3566 } 3567 3568 static bool trans_addb(DisasContext *ctx, arg_addb *a) 3569 { 3570 nullify_over(ctx); 3571 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); 3572 } 3573 3574 static bool trans_addbi(DisasContext *ctx, arg_addbi *a) 3575 { 3576 nullify_over(ctx); 3577 return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp); 3578 } 3579 3580 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a) 3581 { 3582 TCGv_i64 tmp, tcg_r; 3583 DisasCond cond; 3584 3585 nullify_over(ctx); 3586 3587 tmp = tcg_temp_new_i64(); 3588 tcg_r = load_gpr(ctx, a->r); 3589 if (a->d) { 3590 tcg_gen_shl_i64(tmp, tcg_r, cpu_sar); 3591 } else { 3592 /* Force shift into [32,63] */ 3593 tcg_gen_ori_i64(tmp, cpu_sar, 32); 3594 tcg_gen_shl_i64(tmp, tcg_r, tmp); 3595 } 3596 3597 cond = cond_make_ti(a->c ? TCG_COND_GE : TCG_COND_LT, tmp, 0); 3598 return do_cbranch(ctx, a->disp, a->n, &cond); 3599 } 3600 3601 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) 3602 { 3603 DisasCond cond; 3604 int p = a->p | (a->d ? 0 : 32); 3605 3606 nullify_over(ctx); 3607 cond = cond_make_vi(a->c ? TCG_COND_TSTEQ : TCG_COND_TSTNE, 3608 load_gpr(ctx, a->r), 1ull << (63 - p)); 3609 return do_cbranch(ctx, a->disp, a->n, &cond); 3610 } 3611 3612 static bool trans_movb(DisasContext *ctx, arg_movb *a) 3613 { 3614 TCGv_i64 dest; 3615 DisasCond cond; 3616 3617 nullify_over(ctx); 3618 3619 dest = dest_gpr(ctx, a->r2); 3620 if (a->r1 == 0) { 3621 tcg_gen_movi_i64(dest, 0); 3622 } else { 3623 tcg_gen_mov_i64(dest, cpu_gr[a->r1]); 3624 } 3625 3626 /* All MOVB conditions are 32-bit. */ 3627 cond = do_sed_cond(ctx, a->c, false, dest); 3628 return do_cbranch(ctx, a->disp, a->n, &cond); 3629 } 3630 3631 static bool trans_movbi(DisasContext *ctx, arg_movbi *a) 3632 { 3633 TCGv_i64 dest; 3634 DisasCond cond; 3635 3636 nullify_over(ctx); 3637 3638 dest = dest_gpr(ctx, a->r); 3639 tcg_gen_movi_i64(dest, a->i); 3640 3641 /* All MOVBI conditions are 32-bit. */ 3642 cond = do_sed_cond(ctx, a->c, false, dest); 3643 return do_cbranch(ctx, a->disp, a->n, &cond); 3644 } 3645 3646 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a) 3647 { 3648 TCGv_i64 dest, src2; 3649 3650 if (!ctx->is_pa20 && a->d) { 3651 return false; 3652 } 3653 if (a->c) { 3654 nullify_over(ctx); 3655 } 3656 3657 dest = dest_gpr(ctx, a->t); 3658 src2 = load_gpr(ctx, a->r2); 3659 if (a->r1 == 0) { 3660 if (a->d) { 3661 tcg_gen_shr_i64(dest, src2, cpu_sar); 3662 } else { 3663 TCGv_i64 tmp = tcg_temp_new_i64(); 3664 3665 tcg_gen_ext32u_i64(dest, src2); 3666 tcg_gen_andi_i64(tmp, cpu_sar, 31); 3667 tcg_gen_shr_i64(dest, dest, tmp); 3668 } 3669 } else if (a->r1 == a->r2) { 3670 if (a->d) { 3671 tcg_gen_rotr_i64(dest, src2, cpu_sar); 3672 } else { 3673 TCGv_i32 t32 = tcg_temp_new_i32(); 3674 TCGv_i32 s32 = tcg_temp_new_i32(); 3675 3676 tcg_gen_extrl_i64_i32(t32, src2); 3677 tcg_gen_extrl_i64_i32(s32, cpu_sar); 3678 tcg_gen_andi_i32(s32, s32, 31); 3679 tcg_gen_rotr_i32(t32, t32, s32); 3680 tcg_gen_extu_i32_i64(dest, t32); 3681 } 3682 } else { 3683 TCGv_i64 src1 = load_gpr(ctx, a->r1); 3684 3685 if (a->d) { 3686 TCGv_i64 t = tcg_temp_new_i64(); 3687 TCGv_i64 n = tcg_temp_new_i64(); 3688 3689 tcg_gen_xori_i64(n, cpu_sar, 63); 3690 tcg_gen_shl_i64(t, src1, n); 3691 tcg_gen_shli_i64(t, t, 1); 3692 tcg_gen_shr_i64(dest, src2, cpu_sar); 3693 tcg_gen_or_i64(dest, dest, t); 3694 } else { 3695 TCGv_i64 t = tcg_temp_new_i64(); 3696 TCGv_i64 s = tcg_temp_new_i64(); 3697 3698 tcg_gen_concat32_i64(t, src2, src1); 3699 tcg_gen_andi_i64(s, cpu_sar, 31); 3700 tcg_gen_shr_i64(dest, t, s); 3701 } 3702 } 3703 save_gpr(ctx, a->t, dest); 3704 3705 /* Install the new nullification. */ 3706 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3707 return nullify_end(ctx); 3708 } 3709 3710 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a) 3711 { 3712 unsigned width, sa; 3713 TCGv_i64 dest, t2; 3714 3715 if (!ctx->is_pa20 && a->d) { 3716 return false; 3717 } 3718 if (a->c) { 3719 nullify_over(ctx); 3720 } 3721 3722 width = a->d ? 64 : 32; 3723 sa = width - 1 - a->cpos; 3724 3725 dest = dest_gpr(ctx, a->t); 3726 t2 = load_gpr(ctx, a->r2); 3727 if (a->r1 == 0) { 3728 tcg_gen_extract_i64(dest, t2, sa, width - sa); 3729 } else if (width == TARGET_LONG_BITS) { 3730 tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa); 3731 } else { 3732 assert(!a->d); 3733 if (a->r1 == a->r2) { 3734 TCGv_i32 t32 = tcg_temp_new_i32(); 3735 tcg_gen_extrl_i64_i32(t32, t2); 3736 tcg_gen_rotri_i32(t32, t32, sa); 3737 tcg_gen_extu_i32_i64(dest, t32); 3738 } else { 3739 tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]); 3740 tcg_gen_extract_i64(dest, dest, sa, 32); 3741 } 3742 } 3743 save_gpr(ctx, a->t, dest); 3744 3745 /* Install the new nullification. */ 3746 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3747 return nullify_end(ctx); 3748 } 3749 3750 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a) 3751 { 3752 unsigned widthm1 = a->d ? 63 : 31; 3753 TCGv_i64 dest, src, tmp; 3754 3755 if (!ctx->is_pa20 && a->d) { 3756 return false; 3757 } 3758 if (a->c) { 3759 nullify_over(ctx); 3760 } 3761 3762 dest = dest_gpr(ctx, a->t); 3763 src = load_gpr(ctx, a->r); 3764 tmp = tcg_temp_new_i64(); 3765 3766 /* Recall that SAR is using big-endian bit numbering. */ 3767 tcg_gen_andi_i64(tmp, cpu_sar, widthm1); 3768 tcg_gen_xori_i64(tmp, tmp, widthm1); 3769 3770 if (a->se) { 3771 if (!a->d) { 3772 tcg_gen_ext32s_i64(dest, src); 3773 src = dest; 3774 } 3775 tcg_gen_sar_i64(dest, src, tmp); 3776 tcg_gen_sextract_i64(dest, dest, 0, a->len); 3777 } else { 3778 if (!a->d) { 3779 tcg_gen_ext32u_i64(dest, src); 3780 src = dest; 3781 } 3782 tcg_gen_shr_i64(dest, src, tmp); 3783 tcg_gen_extract_i64(dest, dest, 0, a->len); 3784 } 3785 save_gpr(ctx, a->t, dest); 3786 3787 /* Install the new nullification. */ 3788 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3789 return nullify_end(ctx); 3790 } 3791 3792 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a) 3793 { 3794 unsigned len, cpos, width; 3795 TCGv_i64 dest, src; 3796 3797 if (!ctx->is_pa20 && a->d) { 3798 return false; 3799 } 3800 if (a->c) { 3801 nullify_over(ctx); 3802 } 3803 3804 len = a->len; 3805 width = a->d ? 64 : 32; 3806 cpos = width - 1 - a->pos; 3807 if (cpos + len > width) { 3808 len = width - cpos; 3809 } 3810 3811 dest = dest_gpr(ctx, a->t); 3812 src = load_gpr(ctx, a->r); 3813 if (a->se) { 3814 tcg_gen_sextract_i64(dest, src, cpos, len); 3815 } else { 3816 tcg_gen_extract_i64(dest, src, cpos, len); 3817 } 3818 save_gpr(ctx, a->t, dest); 3819 3820 /* Install the new nullification. */ 3821 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3822 return nullify_end(ctx); 3823 } 3824 3825 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a) 3826 { 3827 unsigned len, width; 3828 uint64_t mask0, mask1; 3829 TCGv_i64 dest; 3830 3831 if (!ctx->is_pa20 && a->d) { 3832 return false; 3833 } 3834 if (a->c) { 3835 nullify_over(ctx); 3836 } 3837 3838 len = a->len; 3839 width = a->d ? 64 : 32; 3840 if (a->cpos + len > width) { 3841 len = width - a->cpos; 3842 } 3843 3844 dest = dest_gpr(ctx, a->t); 3845 mask0 = deposit64(0, a->cpos, len, a->i); 3846 mask1 = deposit64(-1, a->cpos, len, a->i); 3847 3848 if (a->nz) { 3849 TCGv_i64 src = load_gpr(ctx, a->t); 3850 tcg_gen_andi_i64(dest, src, mask1); 3851 tcg_gen_ori_i64(dest, dest, mask0); 3852 } else { 3853 tcg_gen_movi_i64(dest, mask0); 3854 } 3855 save_gpr(ctx, a->t, dest); 3856 3857 /* Install the new nullification. */ 3858 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3859 return nullify_end(ctx); 3860 } 3861 3862 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a) 3863 { 3864 unsigned rs = a->nz ? a->t : 0; 3865 unsigned len, width; 3866 TCGv_i64 dest, val; 3867 3868 if (!ctx->is_pa20 && a->d) { 3869 return false; 3870 } 3871 if (a->c) { 3872 nullify_over(ctx); 3873 } 3874 3875 len = a->len; 3876 width = a->d ? 64 : 32; 3877 if (a->cpos + len > width) { 3878 len = width - a->cpos; 3879 } 3880 3881 dest = dest_gpr(ctx, a->t); 3882 val = load_gpr(ctx, a->r); 3883 if (rs == 0) { 3884 tcg_gen_deposit_z_i64(dest, val, a->cpos, len); 3885 } else { 3886 tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len); 3887 } 3888 save_gpr(ctx, a->t, dest); 3889 3890 /* Install the new nullification. */ 3891 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3892 return nullify_end(ctx); 3893 } 3894 3895 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c, 3896 bool d, bool nz, unsigned len, TCGv_i64 val) 3897 { 3898 unsigned rs = nz ? rt : 0; 3899 unsigned widthm1 = d ? 63 : 31; 3900 TCGv_i64 mask, tmp, shift, dest; 3901 uint64_t msb = 1ULL << (len - 1); 3902 3903 dest = dest_gpr(ctx, rt); 3904 shift = tcg_temp_new_i64(); 3905 tmp = tcg_temp_new_i64(); 3906 3907 /* Convert big-endian bit numbering in SAR to left-shift. */ 3908 tcg_gen_andi_i64(shift, cpu_sar, widthm1); 3909 tcg_gen_xori_i64(shift, shift, widthm1); 3910 3911 mask = tcg_temp_new_i64(); 3912 tcg_gen_movi_i64(mask, msb + (msb - 1)); 3913 tcg_gen_and_i64(tmp, val, mask); 3914 if (rs) { 3915 tcg_gen_shl_i64(mask, mask, shift); 3916 tcg_gen_shl_i64(tmp, tmp, shift); 3917 tcg_gen_andc_i64(dest, cpu_gr[rs], mask); 3918 tcg_gen_or_i64(dest, dest, tmp); 3919 } else { 3920 tcg_gen_shl_i64(dest, tmp, shift); 3921 } 3922 save_gpr(ctx, rt, dest); 3923 3924 /* Install the new nullification. */ 3925 ctx->null_cond = do_sed_cond(ctx, c, d, dest); 3926 return nullify_end(ctx); 3927 } 3928 3929 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a) 3930 { 3931 if (!ctx->is_pa20 && a->d) { 3932 return false; 3933 } 3934 if (a->c) { 3935 nullify_over(ctx); 3936 } 3937 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len, 3938 load_gpr(ctx, a->r)); 3939 } 3940 3941 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a) 3942 { 3943 if (!ctx->is_pa20 && a->d) { 3944 return false; 3945 } 3946 if (a->c) { 3947 nullify_over(ctx); 3948 } 3949 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len, 3950 tcg_constant_i64(a->i)); 3951 } 3952 3953 static bool trans_be(DisasContext *ctx, arg_be *a) 3954 { 3955 #ifndef CONFIG_USER_ONLY 3956 ctx->iaq_j.space = tcg_temp_new_i64(); 3957 load_spr(ctx, ctx->iaq_j.space, a->sp); 3958 #endif 3959 3960 ctx->iaq_j.base = tcg_temp_new_i64(); 3961 ctx->iaq_j.disp = 0; 3962 3963 tcg_gen_addi_i64(ctx->iaq_j.base, load_gpr(ctx, a->b), a->disp); 3964 ctx->iaq_j.base = do_ibranch_priv(ctx, ctx->iaq_j.base); 3965 3966 return do_ibranch(ctx, a->l, true, a->n); 3967 } 3968 3969 static bool trans_bl(DisasContext *ctx, arg_bl *a) 3970 { 3971 return do_dbranch(ctx, a->disp, a->l, a->n); 3972 } 3973 3974 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) 3975 { 3976 int64_t disp = a->disp; 3977 bool indirect = false; 3978 3979 /* Trap if PSW[B] is set. */ 3980 if (ctx->psw_xb & PSW_B) { 3981 return gen_illegal(ctx); 3982 } 3983 3984 nullify_over(ctx); 3985 3986 #ifndef CONFIG_USER_ONLY 3987 if (ctx->privilege == 0) { 3988 /* Privilege cannot decrease. */ 3989 } else if (!(ctx->tb_flags & PSW_C)) { 3990 /* With paging disabled, priv becomes 0. */ 3991 disp -= ctx->privilege; 3992 } else { 3993 /* Adjust the dest offset for the privilege change from the PTE. */ 3994 TCGv_i64 off = tcg_temp_new_i64(); 3995 3996 copy_iaoq_entry(ctx, off, &ctx->iaq_f); 3997 gen_helper_b_gate_priv(off, tcg_env, off); 3998 3999 ctx->iaq_j.base = off; 4000 ctx->iaq_j.disp = disp + 8; 4001 indirect = true; 4002 } 4003 #endif 4004 4005 if (a->l) { 4006 TCGv_i64 tmp = dest_gpr(ctx, a->l); 4007 if (ctx->privilege < 3) { 4008 tcg_gen_andi_i64(tmp, tmp, -4); 4009 } 4010 tcg_gen_ori_i64(tmp, tmp, ctx->privilege); 4011 save_gpr(ctx, a->l, tmp); 4012 } 4013 4014 if (indirect) { 4015 return do_ibranch(ctx, 0, false, a->n); 4016 } 4017 return do_dbranch(ctx, disp, 0, a->n); 4018 } 4019 4020 static bool trans_blr(DisasContext *ctx, arg_blr *a) 4021 { 4022 if (a->x) { 4023 DisasIAQE next = iaqe_incr(&ctx->iaq_f, 8); 4024 TCGv_i64 t0 = tcg_temp_new_i64(); 4025 TCGv_i64 t1 = tcg_temp_new_i64(); 4026 4027 /* The computation here never changes privilege level. */ 4028 copy_iaoq_entry(ctx, t0, &next); 4029 tcg_gen_shli_i64(t1, load_gpr(ctx, a->x), 3); 4030 tcg_gen_add_i64(t0, t0, t1); 4031 4032 ctx->iaq_j = iaqe_next_absv(ctx, t0); 4033 return do_ibranch(ctx, a->l, false, a->n); 4034 } else { 4035 /* BLR R0,RX is a good way to load PC+8 into RX. */ 4036 return do_dbranch(ctx, 0, a->l, a->n); 4037 } 4038 } 4039 4040 static bool trans_bv(DisasContext *ctx, arg_bv *a) 4041 { 4042 TCGv_i64 dest; 4043 4044 if (a->x == 0) { 4045 dest = load_gpr(ctx, a->b); 4046 } else { 4047 dest = tcg_temp_new_i64(); 4048 tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3); 4049 tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b)); 4050 } 4051 dest = do_ibranch_priv(ctx, dest); 4052 ctx->iaq_j = iaqe_next_absv(ctx, dest); 4053 4054 return do_ibranch(ctx, 0, false, a->n); 4055 } 4056 4057 static bool trans_bve(DisasContext *ctx, arg_bve *a) 4058 { 4059 TCGv_i64 b = load_gpr(ctx, a->b); 4060 4061 #ifndef CONFIG_USER_ONLY 4062 ctx->iaq_j.space = space_select(ctx, 0, b); 4063 #endif 4064 ctx->iaq_j.base = do_ibranch_priv(ctx, b); 4065 ctx->iaq_j.disp = 0; 4066 4067 return do_ibranch(ctx, a->l, false, a->n); 4068 } 4069 4070 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a) 4071 { 4072 /* All branch target stack instructions implement as nop. */ 4073 return ctx->is_pa20; 4074 } 4075 4076 /* 4077 * Float class 0 4078 */ 4079 4080 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 4081 { 4082 tcg_gen_mov_i32(dst, src); 4083 } 4084 4085 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a) 4086 { 4087 uint64_t ret; 4088 4089 if (ctx->is_pa20) { 4090 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */ 4091 } else { 4092 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */ 4093 } 4094 4095 nullify_over(ctx); 4096 save_frd(0, tcg_constant_i64(ret)); 4097 return nullify_end(ctx); 4098 } 4099 4100 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a) 4101 { 4102 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f); 4103 } 4104 4105 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 4106 { 4107 tcg_gen_mov_i64(dst, src); 4108 } 4109 4110 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a) 4111 { 4112 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d); 4113 } 4114 4115 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 4116 { 4117 tcg_gen_andi_i32(dst, src, INT32_MAX); 4118 } 4119 4120 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a) 4121 { 4122 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f); 4123 } 4124 4125 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 4126 { 4127 tcg_gen_andi_i64(dst, src, INT64_MAX); 4128 } 4129 4130 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a) 4131 { 4132 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d); 4133 } 4134 4135 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a) 4136 { 4137 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s); 4138 } 4139 4140 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a) 4141 { 4142 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d); 4143 } 4144 4145 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a) 4146 { 4147 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s); 4148 } 4149 4150 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a) 4151 { 4152 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d); 4153 } 4154 4155 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 4156 { 4157 tcg_gen_xori_i32(dst, src, INT32_MIN); 4158 } 4159 4160 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a) 4161 { 4162 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f); 4163 } 4164 4165 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 4166 { 4167 tcg_gen_xori_i64(dst, src, INT64_MIN); 4168 } 4169 4170 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a) 4171 { 4172 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d); 4173 } 4174 4175 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 4176 { 4177 tcg_gen_ori_i32(dst, src, INT32_MIN); 4178 } 4179 4180 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a) 4181 { 4182 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f); 4183 } 4184 4185 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 4186 { 4187 tcg_gen_ori_i64(dst, src, INT64_MIN); 4188 } 4189 4190 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a) 4191 { 4192 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d); 4193 } 4194 4195 /* 4196 * Float class 1 4197 */ 4198 4199 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a) 4200 { 4201 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s); 4202 } 4203 4204 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a) 4205 { 4206 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d); 4207 } 4208 4209 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a) 4210 { 4211 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s); 4212 } 4213 4214 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a) 4215 { 4216 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s); 4217 } 4218 4219 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a) 4220 { 4221 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d); 4222 } 4223 4224 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a) 4225 { 4226 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d); 4227 } 4228 4229 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a) 4230 { 4231 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w); 4232 } 4233 4234 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a) 4235 { 4236 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w); 4237 } 4238 4239 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a) 4240 { 4241 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw); 4242 } 4243 4244 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a) 4245 { 4246 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw); 4247 } 4248 4249 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a) 4250 { 4251 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w); 4252 } 4253 4254 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a) 4255 { 4256 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w); 4257 } 4258 4259 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a) 4260 { 4261 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw); 4262 } 4263 4264 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a) 4265 { 4266 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw); 4267 } 4268 4269 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a) 4270 { 4271 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s); 4272 } 4273 4274 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a) 4275 { 4276 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s); 4277 } 4278 4279 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a) 4280 { 4281 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d); 4282 } 4283 4284 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a) 4285 { 4286 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d); 4287 } 4288 4289 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a) 4290 { 4291 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw); 4292 } 4293 4294 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a) 4295 { 4296 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw); 4297 } 4298 4299 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a) 4300 { 4301 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw); 4302 } 4303 4304 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a) 4305 { 4306 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw); 4307 } 4308 4309 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a) 4310 { 4311 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw); 4312 } 4313 4314 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a) 4315 { 4316 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw); 4317 } 4318 4319 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a) 4320 { 4321 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw); 4322 } 4323 4324 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a) 4325 { 4326 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw); 4327 } 4328 4329 /* 4330 * Float class 2 4331 */ 4332 4333 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a) 4334 { 4335 TCGv_i32 ta, tb, tc, ty; 4336 4337 nullify_over(ctx); 4338 4339 ta = load_frw0_i32(a->r1); 4340 tb = load_frw0_i32(a->r2); 4341 ty = tcg_constant_i32(a->y); 4342 tc = tcg_constant_i32(a->c); 4343 4344 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc); 4345 4346 return nullify_end(ctx); 4347 } 4348 4349 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a) 4350 { 4351 TCGv_i64 ta, tb; 4352 TCGv_i32 tc, ty; 4353 4354 nullify_over(ctx); 4355 4356 ta = load_frd0(a->r1); 4357 tb = load_frd0(a->r2); 4358 ty = tcg_constant_i32(a->y); 4359 tc = tcg_constant_i32(a->c); 4360 4361 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc); 4362 4363 return nullify_end(ctx); 4364 } 4365 4366 static bool trans_ftest(DisasContext *ctx, arg_ftest *a) 4367 { 4368 TCGCond tc = TCG_COND_TSTNE; 4369 uint32_t mask; 4370 TCGv_i64 t; 4371 4372 nullify_over(ctx); 4373 4374 t = tcg_temp_new_i64(); 4375 tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow)); 4376 4377 if (a->y == 1) { 4378 switch (a->c) { 4379 case 0: /* simple */ 4380 mask = R_FPSR_C_MASK; 4381 break; 4382 case 2: /* rej */ 4383 tc = TCG_COND_TSTEQ; 4384 /* fallthru */ 4385 case 1: /* acc */ 4386 mask = R_FPSR_C_MASK | R_FPSR_CQ_MASK; 4387 break; 4388 case 6: /* rej8 */ 4389 tc = TCG_COND_TSTEQ; 4390 /* fallthru */ 4391 case 5: /* acc8 */ 4392 mask = R_FPSR_C_MASK | R_FPSR_CQ0_6_MASK; 4393 break; 4394 case 9: /* acc6 */ 4395 mask = R_FPSR_C_MASK | R_FPSR_CQ0_4_MASK; 4396 break; 4397 case 13: /* acc4 */ 4398 mask = R_FPSR_C_MASK | R_FPSR_CQ0_2_MASK; 4399 break; 4400 case 17: /* acc2 */ 4401 mask = R_FPSR_C_MASK | R_FPSR_CQ0_MASK; 4402 break; 4403 default: 4404 gen_illegal(ctx); 4405 return true; 4406 } 4407 } else { 4408 unsigned cbit = (a->y ^ 1) - 1; 4409 mask = R_FPSR_CA0_MASK >> cbit; 4410 } 4411 4412 ctx->null_cond = cond_make_ti(tc, t, mask); 4413 return nullify_end(ctx); 4414 } 4415 4416 /* 4417 * Float class 2 4418 */ 4419 4420 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a) 4421 { 4422 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s); 4423 } 4424 4425 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a) 4426 { 4427 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d); 4428 } 4429 4430 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a) 4431 { 4432 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s); 4433 } 4434 4435 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a) 4436 { 4437 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d); 4438 } 4439 4440 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a) 4441 { 4442 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s); 4443 } 4444 4445 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a) 4446 { 4447 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d); 4448 } 4449 4450 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a) 4451 { 4452 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s); 4453 } 4454 4455 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a) 4456 { 4457 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d); 4458 } 4459 4460 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a) 4461 { 4462 TCGv_i64 x, y; 4463 4464 nullify_over(ctx); 4465 4466 x = load_frw0_i64(a->r1); 4467 y = load_frw0_i64(a->r2); 4468 tcg_gen_mul_i64(x, x, y); 4469 save_frd(a->t, x); 4470 4471 return nullify_end(ctx); 4472 } 4473 4474 /* Convert the fmpyadd single-precision register encodings to standard. */ 4475 static inline int fmpyadd_s_reg(unsigned r) 4476 { 4477 return (r & 16) * 2 + 16 + (r & 15); 4478 } 4479 4480 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 4481 { 4482 int tm = fmpyadd_s_reg(a->tm); 4483 int ra = fmpyadd_s_reg(a->ra); 4484 int ta = fmpyadd_s_reg(a->ta); 4485 int rm2 = fmpyadd_s_reg(a->rm2); 4486 int rm1 = fmpyadd_s_reg(a->rm1); 4487 4488 nullify_over(ctx); 4489 4490 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s); 4491 do_fop_weww(ctx, ta, ta, ra, 4492 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s); 4493 4494 return nullify_end(ctx); 4495 } 4496 4497 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a) 4498 { 4499 return do_fmpyadd_s(ctx, a, false); 4500 } 4501 4502 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a) 4503 { 4504 return do_fmpyadd_s(ctx, a, true); 4505 } 4506 4507 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 4508 { 4509 nullify_over(ctx); 4510 4511 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d); 4512 do_fop_dedd(ctx, a->ta, a->ta, a->ra, 4513 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d); 4514 4515 return nullify_end(ctx); 4516 } 4517 4518 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a) 4519 { 4520 return do_fmpyadd_d(ctx, a, false); 4521 } 4522 4523 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a) 4524 { 4525 return do_fmpyadd_d(ctx, a, true); 4526 } 4527 4528 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a) 4529 { 4530 TCGv_i32 x, y, z; 4531 4532 nullify_over(ctx); 4533 x = load_frw0_i32(a->rm1); 4534 y = load_frw0_i32(a->rm2); 4535 z = load_frw0_i32(a->ra3); 4536 4537 if (a->neg) { 4538 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z); 4539 } else { 4540 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z); 4541 } 4542 4543 save_frw_i32(a->t, x); 4544 return nullify_end(ctx); 4545 } 4546 4547 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a) 4548 { 4549 TCGv_i64 x, y, z; 4550 4551 nullify_over(ctx); 4552 x = load_frd0(a->rm1); 4553 y = load_frd0(a->rm2); 4554 z = load_frd0(a->ra3); 4555 4556 if (a->neg) { 4557 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z); 4558 } else { 4559 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z); 4560 } 4561 4562 save_frd(a->t, x); 4563 return nullify_end(ctx); 4564 } 4565 4566 /* Emulate PDC BTLB, called by SeaBIOS-hppa */ 4567 static bool trans_diag_btlb(DisasContext *ctx, arg_diag_btlb *a) 4568 { 4569 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4570 #ifndef CONFIG_USER_ONLY 4571 nullify_over(ctx); 4572 gen_helper_diag_btlb(tcg_env); 4573 return nullify_end(ctx); 4574 #endif 4575 } 4576 4577 /* Print char in %r26 to first serial console, used by SeaBIOS-hppa */ 4578 static bool trans_diag_cout(DisasContext *ctx, arg_diag_cout *a) 4579 { 4580 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4581 #ifndef CONFIG_USER_ONLY 4582 nullify_over(ctx); 4583 gen_helper_diag_console_output(tcg_env); 4584 return nullify_end(ctx); 4585 #endif 4586 } 4587 4588 static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a) 4589 { 4590 return !ctx->is_pa20 && do_getshadowregs(ctx); 4591 } 4592 4593 static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a) 4594 { 4595 return !ctx->is_pa20 && do_putshadowregs(ctx); 4596 } 4597 4598 static bool trans_diag_mfdiag(DisasContext *ctx, arg_diag_mfdiag *a) 4599 { 4600 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4601 nullify_over(ctx); 4602 TCGv_i64 dest = dest_gpr(ctx, a->rt); 4603 tcg_gen_ld_i64(dest, tcg_env, 4604 offsetof(CPUHPPAState, dr[a->dr])); 4605 save_gpr(ctx, a->rt, dest); 4606 return nullify_end(ctx); 4607 } 4608 4609 static bool trans_diag_mtdiag(DisasContext *ctx, arg_diag_mtdiag *a) 4610 { 4611 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4612 nullify_over(ctx); 4613 tcg_gen_st_i64(load_gpr(ctx, a->r1), tcg_env, 4614 offsetof(CPUHPPAState, dr[a->dr])); 4615 #ifndef CONFIG_USER_ONLY 4616 if (ctx->is_pa20 && (a->dr == 2)) { 4617 /* Update gva_offset_mask from the new value of %dr2 */ 4618 gen_helper_update_gva_offset_mask(tcg_env); 4619 /* Exit to capture the new value for the next TB. */ 4620 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 4621 } 4622 #endif 4623 return nullify_end(ctx); 4624 } 4625 4626 static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a) 4627 { 4628 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4629 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i); 4630 return true; 4631 } 4632 4633 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 4634 { 4635 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4636 uint64_t cs_base; 4637 int bound; 4638 4639 ctx->cs = cs; 4640 ctx->tb_flags = ctx->base.tb->flags; 4641 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs)); 4642 ctx->psw_xb = ctx->tb_flags & (PSW_X | PSW_B); 4643 ctx->gva_offset_mask = cpu_env(cs)->gva_offset_mask; 4644 4645 #ifdef CONFIG_USER_ONLY 4646 ctx->privilege = PRIV_USER; 4647 ctx->mmu_idx = MMU_USER_IDX; 4648 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 4649 #else 4650 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; 4651 ctx->mmu_idx = (ctx->tb_flags & PSW_D 4652 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P) 4653 : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX); 4654 #endif 4655 4656 cs_base = ctx->base.tb->cs_base; 4657 ctx->iaoq_first = ctx->base.pc_first + ctx->privilege; 4658 4659 if (unlikely(cs_base & CS_BASE_DIFFSPACE)) { 4660 ctx->iaq_b.space = cpu_iasq_b; 4661 ctx->iaq_b.base = cpu_iaoq_b; 4662 } else if (unlikely(cs_base & CS_BASE_DIFFPAGE)) { 4663 ctx->iaq_b.base = cpu_iaoq_b; 4664 } else { 4665 uint64_t iaoq_f_pgofs = ctx->iaoq_first & ~TARGET_PAGE_MASK; 4666 uint64_t iaoq_b_pgofs = cs_base & ~TARGET_PAGE_MASK; 4667 ctx->iaq_b.disp = iaoq_b_pgofs - iaoq_f_pgofs; 4668 } 4669 4670 ctx->zero = tcg_constant_i64(0); 4671 4672 /* Bound the number of instructions by those left on the page. */ 4673 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 4674 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 4675 } 4676 4677 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) 4678 { 4679 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4680 4681 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */ 4682 ctx->null_cond = cond_make_f(); 4683 ctx->psw_n_nonzero = false; 4684 if (ctx->tb_flags & PSW_N) { 4685 ctx->null_cond.c = TCG_COND_ALWAYS; 4686 ctx->psw_n_nonzero = true; 4687 } 4688 ctx->null_lab = NULL; 4689 } 4690 4691 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 4692 { 4693 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4694 uint64_t iaoq_f, iaoq_b; 4695 int64_t diff; 4696 4697 tcg_debug_assert(!iaqe_variable(&ctx->iaq_f)); 4698 4699 iaoq_f = ctx->iaoq_first + ctx->iaq_f.disp; 4700 if (iaqe_variable(&ctx->iaq_b)) { 4701 diff = INT32_MIN; 4702 } else { 4703 iaoq_b = ctx->iaoq_first + ctx->iaq_b.disp; 4704 diff = iaoq_b - iaoq_f; 4705 /* Direct branches can only produce a 24-bit displacement. */ 4706 tcg_debug_assert(diff == (int32_t)diff); 4707 tcg_debug_assert(diff != INT32_MIN); 4708 } 4709 4710 tcg_gen_insn_start(iaoq_f & ~TARGET_PAGE_MASK, diff, 0); 4711 ctx->insn_start_updated = false; 4712 } 4713 4714 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 4715 { 4716 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4717 CPUHPPAState *env = cpu_env(cs); 4718 DisasJumpType ret; 4719 4720 /* Execute one insn. */ 4721 #ifdef CONFIG_USER_ONLY 4722 if (ctx->base.pc_next < TARGET_PAGE_SIZE) { 4723 do_page_zero(ctx); 4724 ret = ctx->base.is_jmp; 4725 assert(ret != DISAS_NEXT); 4726 } else 4727 #endif 4728 { 4729 /* Always fetch the insn, even if nullified, so that we check 4730 the page permissions for execute. */ 4731 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); 4732 4733 /* 4734 * Set up the IA queue for the next insn. 4735 * This will be overwritten by a branch. 4736 */ 4737 ctx->iaq_n = NULL; 4738 memset(&ctx->iaq_j, 0, sizeof(ctx->iaq_j)); 4739 ctx->psw_b_next = false; 4740 4741 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) { 4742 ctx->null_cond.c = TCG_COND_NEVER; 4743 ret = DISAS_NEXT; 4744 } else { 4745 ctx->insn = insn; 4746 if (!decode(ctx, insn)) { 4747 gen_illegal(ctx); 4748 } 4749 ret = ctx->base.is_jmp; 4750 assert(ctx->null_lab == NULL); 4751 } 4752 4753 if (ret != DISAS_NORETURN) { 4754 set_psw_xb(ctx, ctx->psw_b_next ? PSW_B : 0); 4755 } 4756 } 4757 4758 /* If the TranslationBlock must end, do so. */ 4759 ctx->base.pc_next += 4; 4760 if (ret != DISAS_NEXT) { 4761 return; 4762 } 4763 /* Note this also detects a priority change. */ 4764 if (iaqe_variable(&ctx->iaq_b) 4765 || ctx->iaq_b.disp != ctx->iaq_f.disp + 4) { 4766 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 4767 return; 4768 } 4769 4770 /* 4771 * Advance the insn queue. 4772 * The only exit now is DISAS_TOO_MANY from the translator loop. 4773 */ 4774 ctx->iaq_f.disp = ctx->iaq_b.disp; 4775 if (!ctx->iaq_n) { 4776 ctx->iaq_b.disp += 4; 4777 return; 4778 } 4779 /* 4780 * If IAQ_Next is variable in any way, we need to copy into the 4781 * IAQ_Back globals, in case the next insn raises an exception. 4782 */ 4783 if (ctx->iaq_n->base) { 4784 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaq_n); 4785 ctx->iaq_b.base = cpu_iaoq_b; 4786 ctx->iaq_b.disp = 0; 4787 } else { 4788 ctx->iaq_b.disp = ctx->iaq_n->disp; 4789 } 4790 if (ctx->iaq_n->space) { 4791 tcg_gen_mov_i64(cpu_iasq_b, ctx->iaq_n->space); 4792 ctx->iaq_b.space = cpu_iasq_b; 4793 } 4794 } 4795 4796 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 4797 { 4798 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4799 DisasJumpType is_jmp = ctx->base.is_jmp; 4800 /* Assume the insn queue has not been advanced. */ 4801 DisasIAQE *f = &ctx->iaq_b; 4802 DisasIAQE *b = ctx->iaq_n; 4803 4804 switch (is_jmp) { 4805 case DISAS_NORETURN: 4806 break; 4807 case DISAS_TOO_MANY: 4808 /* The insn queue has not been advanced. */ 4809 f = &ctx->iaq_f; 4810 b = &ctx->iaq_b; 4811 /* FALLTHRU */ 4812 case DISAS_IAQ_N_STALE: 4813 if (use_goto_tb(ctx, f, b) 4814 && (ctx->null_cond.c == TCG_COND_NEVER 4815 || ctx->null_cond.c == TCG_COND_ALWAYS)) { 4816 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS); 4817 gen_goto_tb(ctx, 0, f, b); 4818 break; 4819 } 4820 /* FALLTHRU */ 4821 case DISAS_IAQ_N_STALE_EXIT: 4822 install_iaq_entries(ctx, f, b); 4823 nullify_save(ctx); 4824 if (is_jmp == DISAS_IAQ_N_STALE_EXIT) { 4825 tcg_gen_exit_tb(NULL, 0); 4826 break; 4827 } 4828 /* FALLTHRU */ 4829 case DISAS_IAQ_N_UPDATED: 4830 tcg_gen_lookup_and_goto_ptr(); 4831 break; 4832 case DISAS_EXIT: 4833 tcg_gen_exit_tb(NULL, 0); 4834 break; 4835 default: 4836 g_assert_not_reached(); 4837 } 4838 4839 for (DisasDelayException *e = ctx->delay_excp_list; e ; e = e->next) { 4840 gen_set_label(e->lab); 4841 if (e->set_n >= 0) { 4842 tcg_gen_movi_i64(cpu_psw_n, e->set_n); 4843 } 4844 if (e->set_iir) { 4845 tcg_gen_st_i64(tcg_constant_i64(e->insn), tcg_env, 4846 offsetof(CPUHPPAState, cr[CR_IIR])); 4847 } 4848 install_iaq_entries(ctx, &e->iaq_f, &e->iaq_b); 4849 gen_excp_1(e->excp); 4850 } 4851 } 4852 4853 #ifdef CONFIG_USER_ONLY 4854 static bool hppa_tr_disas_log(const DisasContextBase *dcbase, 4855 CPUState *cs, FILE *logfile) 4856 { 4857 target_ulong pc = dcbase->pc_first; 4858 4859 switch (pc) { 4860 case 0x00: 4861 fprintf(logfile, "IN:\n0x00000000: (null)\n"); 4862 return true; 4863 case 0xb0: 4864 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n"); 4865 return true; 4866 case 0xe0: 4867 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n"); 4868 return true; 4869 case 0x100: 4870 fprintf(logfile, "IN:\n0x00000100: syscall\n"); 4871 return true; 4872 } 4873 return false; 4874 } 4875 #endif 4876 4877 static const TranslatorOps hppa_tr_ops = { 4878 .init_disas_context = hppa_tr_init_disas_context, 4879 .tb_start = hppa_tr_tb_start, 4880 .insn_start = hppa_tr_insn_start, 4881 .translate_insn = hppa_tr_translate_insn, 4882 .tb_stop = hppa_tr_tb_stop, 4883 #ifdef CONFIG_USER_ONLY 4884 .disas_log = hppa_tr_disas_log, 4885 #endif 4886 }; 4887 4888 void hppa_translate_code(CPUState *cs, TranslationBlock *tb, 4889 int *max_insns, vaddr pc, void *host_pc) 4890 { 4891 DisasContext ctx = { }; 4892 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base); 4893 } 4894