1 /* 2 * Tiny Code Interpreter for QEMU 3 * 4 * Copyright (c) 2009, 2011, 2016 Stefan Weil 5 * 6 * This program is free software: you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation, either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "tcg/tcg.h" 22 #include "tcg/helper-info.h" 23 #include "tcg/tcg-ldst.h" 24 #include "disas/dis-asm.h" 25 #include "tcg-has.h" 26 #include <ffi.h> 27 28 29 /* 30 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined). 31 * Without assertions, the interpreter runs much faster. 32 */ 33 #if defined(CONFIG_DEBUG_TCG) 34 # define tci_assert(cond) assert(cond) 35 #else 36 # define tci_assert(cond) ((void)(cond)) 37 #endif 38 39 __thread uintptr_t tci_tb_ptr; 40 41 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index, 42 uint32_t low_index, uint64_t value) 43 { 44 regs[low_index] = (uint32_t)value; 45 regs[high_index] = value >> 32; 46 } 47 48 /* Create a 64 bit value from two 32 bit values. */ 49 static uint64_t tci_uint64(uint32_t high, uint32_t low) 50 { 51 return ((uint64_t)high << 32) + low; 52 } 53 54 /* 55 * Load sets of arguments all at once. The naming convention is: 56 * tci_args_<arguments> 57 * where arguments is a sequence of 58 * 59 * b = immediate (bit position) 60 * c = condition (TCGCond) 61 * i = immediate (uint32_t) 62 * I = immediate (tcg_target_ulong) 63 * l = label or pointer 64 * m = immediate (MemOpIdx) 65 * n = immediate (call return length) 66 * r = register 67 * s = signed ldst offset 68 */ 69 70 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0) 71 { 72 int diff = sextract32(insn, 12, 20); 73 *l0 = diff ? (void *)tb_ptr + diff : NULL; 74 } 75 76 static void tci_args_r(uint32_t insn, TCGReg *r0) 77 { 78 *r0 = extract32(insn, 8, 4); 79 } 80 81 static void tci_args_nl(uint32_t insn, const void *tb_ptr, 82 uint8_t *n0, void **l1) 83 { 84 *n0 = extract32(insn, 8, 4); 85 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 86 } 87 88 static void tci_args_rl(uint32_t insn, const void *tb_ptr, 89 TCGReg *r0, void **l1) 90 { 91 *r0 = extract32(insn, 8, 4); 92 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 93 } 94 95 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1) 96 { 97 *r0 = extract32(insn, 8, 4); 98 *r1 = extract32(insn, 12, 4); 99 } 100 101 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1) 102 { 103 *r0 = extract32(insn, 8, 4); 104 *i1 = sextract32(insn, 12, 20); 105 } 106 107 static void tci_args_rrm(uint32_t insn, TCGReg *r0, 108 TCGReg *r1, MemOpIdx *m2) 109 { 110 *r0 = extract32(insn, 8, 4); 111 *r1 = extract32(insn, 12, 4); 112 *m2 = extract32(insn, 16, 16); 113 } 114 115 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2) 116 { 117 *r0 = extract32(insn, 8, 4); 118 *r1 = extract32(insn, 12, 4); 119 *r2 = extract32(insn, 16, 4); 120 } 121 122 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2) 123 { 124 *r0 = extract32(insn, 8, 4); 125 *r1 = extract32(insn, 12, 4); 126 *i2 = sextract32(insn, 16, 16); 127 } 128 129 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 130 uint8_t *i2, uint8_t *i3) 131 { 132 *r0 = extract32(insn, 8, 4); 133 *r1 = extract32(insn, 12, 4); 134 *i2 = extract32(insn, 16, 6); 135 *i3 = extract32(insn, 22, 6); 136 } 137 138 static void tci_args_rrrc(uint32_t insn, 139 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3) 140 { 141 *r0 = extract32(insn, 8, 4); 142 *r1 = extract32(insn, 12, 4); 143 *r2 = extract32(insn, 16, 4); 144 *c3 = extract32(insn, 20, 4); 145 } 146 147 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 148 TCGReg *r2, uint8_t *i3, uint8_t *i4) 149 { 150 *r0 = extract32(insn, 8, 4); 151 *r1 = extract32(insn, 12, 4); 152 *r2 = extract32(insn, 16, 4); 153 *i3 = extract32(insn, 20, 6); 154 *i4 = extract32(insn, 26, 6); 155 } 156 157 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 158 TCGReg *r2, TCGReg *r3, TCGReg *r4) 159 { 160 *r0 = extract32(insn, 8, 4); 161 *r1 = extract32(insn, 12, 4); 162 *r2 = extract32(insn, 16, 4); 163 *r3 = extract32(insn, 20, 4); 164 *r4 = extract32(insn, 24, 4); 165 } 166 167 static void tci_args_rrrr(uint32_t insn, 168 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3) 169 { 170 *r0 = extract32(insn, 8, 4); 171 *r1 = extract32(insn, 12, 4); 172 *r2 = extract32(insn, 16, 4); 173 *r3 = extract32(insn, 20, 4); 174 } 175 176 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1, 177 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5) 178 { 179 *r0 = extract32(insn, 8, 4); 180 *r1 = extract32(insn, 12, 4); 181 *r2 = extract32(insn, 16, 4); 182 *r3 = extract32(insn, 20, 4); 183 *r4 = extract32(insn, 24, 4); 184 *c5 = extract32(insn, 28, 4); 185 } 186 187 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 188 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5) 189 { 190 *r0 = extract32(insn, 8, 4); 191 *r1 = extract32(insn, 12, 4); 192 *r2 = extract32(insn, 16, 4); 193 *r3 = extract32(insn, 20, 4); 194 *r4 = extract32(insn, 24, 4); 195 *r5 = extract32(insn, 28, 4); 196 } 197 198 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition) 199 { 200 bool result = false; 201 int32_t i0 = u0; 202 int32_t i1 = u1; 203 switch (condition) { 204 case TCG_COND_EQ: 205 result = (u0 == u1); 206 break; 207 case TCG_COND_NE: 208 result = (u0 != u1); 209 break; 210 case TCG_COND_LT: 211 result = (i0 < i1); 212 break; 213 case TCG_COND_GE: 214 result = (i0 >= i1); 215 break; 216 case TCG_COND_LE: 217 result = (i0 <= i1); 218 break; 219 case TCG_COND_GT: 220 result = (i0 > i1); 221 break; 222 case TCG_COND_LTU: 223 result = (u0 < u1); 224 break; 225 case TCG_COND_GEU: 226 result = (u0 >= u1); 227 break; 228 case TCG_COND_LEU: 229 result = (u0 <= u1); 230 break; 231 case TCG_COND_GTU: 232 result = (u0 > u1); 233 break; 234 case TCG_COND_TSTEQ: 235 result = (u0 & u1) == 0; 236 break; 237 case TCG_COND_TSTNE: 238 result = (u0 & u1) != 0; 239 break; 240 default: 241 g_assert_not_reached(); 242 } 243 return result; 244 } 245 246 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) 247 { 248 bool result = false; 249 int64_t i0 = u0; 250 int64_t i1 = u1; 251 switch (condition) { 252 case TCG_COND_EQ: 253 result = (u0 == u1); 254 break; 255 case TCG_COND_NE: 256 result = (u0 != u1); 257 break; 258 case TCG_COND_LT: 259 result = (i0 < i1); 260 break; 261 case TCG_COND_GE: 262 result = (i0 >= i1); 263 break; 264 case TCG_COND_LE: 265 result = (i0 <= i1); 266 break; 267 case TCG_COND_GT: 268 result = (i0 > i1); 269 break; 270 case TCG_COND_LTU: 271 result = (u0 < u1); 272 break; 273 case TCG_COND_GEU: 274 result = (u0 >= u1); 275 break; 276 case TCG_COND_LEU: 277 result = (u0 <= u1); 278 break; 279 case TCG_COND_GTU: 280 result = (u0 > u1); 281 break; 282 case TCG_COND_TSTEQ: 283 result = (u0 & u1) == 0; 284 break; 285 case TCG_COND_TSTNE: 286 result = (u0 & u1) != 0; 287 break; 288 default: 289 g_assert_not_reached(); 290 } 291 return result; 292 } 293 294 static uint64_t tci_qemu_ld(CPUArchState *env, uint64_t taddr, 295 MemOpIdx oi, const void *tb_ptr) 296 { 297 MemOp mop = get_memop(oi); 298 uintptr_t ra = (uintptr_t)tb_ptr; 299 300 switch (mop & MO_SSIZE) { 301 case MO_UB: 302 return helper_ldub_mmu(env, taddr, oi, ra); 303 case MO_SB: 304 return helper_ldsb_mmu(env, taddr, oi, ra); 305 case MO_UW: 306 return helper_lduw_mmu(env, taddr, oi, ra); 307 case MO_SW: 308 return helper_ldsw_mmu(env, taddr, oi, ra); 309 case MO_UL: 310 return helper_ldul_mmu(env, taddr, oi, ra); 311 case MO_SL: 312 return helper_ldsl_mmu(env, taddr, oi, ra); 313 case MO_UQ: 314 return helper_ldq_mmu(env, taddr, oi, ra); 315 default: 316 g_assert_not_reached(); 317 } 318 } 319 320 static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val, 321 MemOpIdx oi, const void *tb_ptr) 322 { 323 MemOp mop = get_memop(oi); 324 uintptr_t ra = (uintptr_t)tb_ptr; 325 326 switch (mop & MO_SIZE) { 327 case MO_UB: 328 helper_stb_mmu(env, taddr, val, oi, ra); 329 break; 330 case MO_UW: 331 helper_stw_mmu(env, taddr, val, oi, ra); 332 break; 333 case MO_UL: 334 helper_stl_mmu(env, taddr, val, oi, ra); 335 break; 336 case MO_UQ: 337 helper_stq_mmu(env, taddr, val, oi, ra); 338 break; 339 default: 340 g_assert_not_reached(); 341 } 342 } 343 344 #if TCG_TARGET_REG_BITS == 64 345 # define CASE_32_64(x) \ 346 case glue(glue(INDEX_op_, x), _i64): \ 347 case glue(glue(INDEX_op_, x), _i32): 348 # define CASE_64(x) \ 349 case glue(glue(INDEX_op_, x), _i64): 350 #else 351 # define CASE_32_64(x) \ 352 case glue(glue(INDEX_op_, x), _i32): 353 # define CASE_64(x) 354 #endif 355 356 /* Interpret pseudo code in tb. */ 357 /* 358 * Disable CFI checks. 359 * One possible operation in the pseudo code is a call to binary code. 360 * Therefore, disable CFI checks in the interpreter function 361 */ 362 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, 363 const void *v_tb_ptr) 364 { 365 const uint32_t *tb_ptr = v_tb_ptr; 366 tcg_target_ulong regs[TCG_TARGET_NB_REGS]; 367 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE) 368 / sizeof(uint64_t)]; 369 370 regs[TCG_AREG0] = (tcg_target_ulong)env; 371 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack; 372 tci_assert(tb_ptr); 373 374 for (;;) { 375 uint32_t insn; 376 TCGOpcode opc; 377 TCGReg r0, r1, r2, r3, r4, r5; 378 tcg_target_ulong t1; 379 TCGCond condition; 380 uint8_t pos, len; 381 uint32_t tmp32; 382 uint64_t tmp64, taddr; 383 uint64_t T1, T2; 384 MemOpIdx oi; 385 int32_t ofs; 386 void *ptr; 387 388 insn = *tb_ptr++; 389 opc = extract32(insn, 0, 8); 390 391 switch (opc) { 392 case INDEX_op_call: 393 { 394 void *call_slots[MAX_CALL_IARGS]; 395 ffi_cif *cif; 396 void *func; 397 unsigned i, s, n; 398 399 tci_args_nl(insn, tb_ptr, &len, &ptr); 400 func = ((void **)ptr)[0]; 401 cif = ((void **)ptr)[1]; 402 403 n = cif->nargs; 404 for (i = s = 0; i < n; ++i) { 405 ffi_type *t = cif->arg_types[i]; 406 call_slots[i] = &stack[s]; 407 s += DIV_ROUND_UP(t->size, 8); 408 } 409 410 /* Helper functions may need to access the "return address" */ 411 tci_tb_ptr = (uintptr_t)tb_ptr; 412 ffi_call(cif, func, stack, call_slots); 413 } 414 415 switch (len) { 416 case 0: /* void */ 417 break; 418 case 1: /* uint32_t */ 419 /* 420 * The result winds up "left-aligned" in the stack[0] slot. 421 * Note that libffi has an odd special case in that it will 422 * always widen an integral result to ffi_arg. 423 */ 424 if (sizeof(ffi_arg) == 8) { 425 regs[TCG_REG_R0] = (uint32_t)stack[0]; 426 } else { 427 regs[TCG_REG_R0] = *(uint32_t *)stack; 428 } 429 break; 430 case 2: /* uint64_t */ 431 /* 432 * For TCG_TARGET_REG_BITS == 32, the register pair 433 * must stay in host memory order. 434 */ 435 memcpy(®s[TCG_REG_R0], stack, 8); 436 break; 437 case 3: /* Int128 */ 438 memcpy(®s[TCG_REG_R0], stack, 16); 439 break; 440 default: 441 g_assert_not_reached(); 442 } 443 break; 444 445 case INDEX_op_br: 446 tci_args_l(insn, tb_ptr, &ptr); 447 tb_ptr = ptr; 448 continue; 449 case INDEX_op_setcond_i32: 450 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 451 regs[r0] = tci_compare32(regs[r1], regs[r2], condition); 452 break; 453 case INDEX_op_movcond_i32: 454 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 455 tmp32 = tci_compare32(regs[r1], regs[r2], condition); 456 regs[r0] = regs[tmp32 ? r3 : r4]; 457 break; 458 #if TCG_TARGET_REG_BITS == 32 459 case INDEX_op_setcond2_i32: 460 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 461 T1 = tci_uint64(regs[r2], regs[r1]); 462 T2 = tci_uint64(regs[r4], regs[r3]); 463 regs[r0] = tci_compare64(T1, T2, condition); 464 break; 465 #elif TCG_TARGET_REG_BITS == 64 466 case INDEX_op_setcond_i64: 467 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 468 regs[r0] = tci_compare64(regs[r1], regs[r2], condition); 469 break; 470 case INDEX_op_movcond_i64: 471 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 472 tmp32 = tci_compare64(regs[r1], regs[r2], condition); 473 regs[r0] = regs[tmp32 ? r3 : r4]; 474 break; 475 #endif 476 CASE_32_64(mov) 477 tci_args_rr(insn, &r0, &r1); 478 regs[r0] = regs[r1]; 479 break; 480 case INDEX_op_tci_movi: 481 tci_args_ri(insn, &r0, &t1); 482 regs[r0] = t1; 483 break; 484 case INDEX_op_tci_movl: 485 tci_args_rl(insn, tb_ptr, &r0, &ptr); 486 regs[r0] = *(tcg_target_ulong *)ptr; 487 break; 488 489 /* Load/store operations (32 bit). */ 490 491 CASE_32_64(ld8u) 492 tci_args_rrs(insn, &r0, &r1, &ofs); 493 ptr = (void *)(regs[r1] + ofs); 494 regs[r0] = *(uint8_t *)ptr; 495 break; 496 CASE_32_64(ld8s) 497 tci_args_rrs(insn, &r0, &r1, &ofs); 498 ptr = (void *)(regs[r1] + ofs); 499 regs[r0] = *(int8_t *)ptr; 500 break; 501 CASE_32_64(ld16u) 502 tci_args_rrs(insn, &r0, &r1, &ofs); 503 ptr = (void *)(regs[r1] + ofs); 504 regs[r0] = *(uint16_t *)ptr; 505 break; 506 CASE_32_64(ld16s) 507 tci_args_rrs(insn, &r0, &r1, &ofs); 508 ptr = (void *)(regs[r1] + ofs); 509 regs[r0] = *(int16_t *)ptr; 510 break; 511 case INDEX_op_ld_i32: 512 CASE_64(ld32u) 513 tci_args_rrs(insn, &r0, &r1, &ofs); 514 ptr = (void *)(regs[r1] + ofs); 515 regs[r0] = *(uint32_t *)ptr; 516 break; 517 CASE_32_64(st8) 518 tci_args_rrs(insn, &r0, &r1, &ofs); 519 ptr = (void *)(regs[r1] + ofs); 520 *(uint8_t *)ptr = regs[r0]; 521 break; 522 CASE_32_64(st16) 523 tci_args_rrs(insn, &r0, &r1, &ofs); 524 ptr = (void *)(regs[r1] + ofs); 525 *(uint16_t *)ptr = regs[r0]; 526 break; 527 case INDEX_op_st_i32: 528 CASE_64(st32) 529 tci_args_rrs(insn, &r0, &r1, &ofs); 530 ptr = (void *)(regs[r1] + ofs); 531 *(uint32_t *)ptr = regs[r0]; 532 break; 533 534 /* Arithmetic operations (mixed 32/64 bit). */ 535 536 CASE_32_64(add) 537 tci_args_rrr(insn, &r0, &r1, &r2); 538 regs[r0] = regs[r1] + regs[r2]; 539 break; 540 CASE_32_64(sub) 541 tci_args_rrr(insn, &r0, &r1, &r2); 542 regs[r0] = regs[r1] - regs[r2]; 543 break; 544 CASE_32_64(mul) 545 tci_args_rrr(insn, &r0, &r1, &r2); 546 regs[r0] = regs[r1] * regs[r2]; 547 break; 548 CASE_32_64(and) 549 tci_args_rrr(insn, &r0, &r1, &r2); 550 regs[r0] = regs[r1] & regs[r2]; 551 break; 552 CASE_32_64(or) 553 tci_args_rrr(insn, &r0, &r1, &r2); 554 regs[r0] = regs[r1] | regs[r2]; 555 break; 556 CASE_32_64(xor) 557 tci_args_rrr(insn, &r0, &r1, &r2); 558 regs[r0] = regs[r1] ^ regs[r2]; 559 break; 560 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64 561 CASE_32_64(andc) 562 tci_args_rrr(insn, &r0, &r1, &r2); 563 regs[r0] = regs[r1] & ~regs[r2]; 564 break; 565 #endif 566 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64 567 CASE_32_64(orc) 568 tci_args_rrr(insn, &r0, &r1, &r2); 569 regs[r0] = regs[r1] | ~regs[r2]; 570 break; 571 #endif 572 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64 573 CASE_32_64(eqv) 574 tci_args_rrr(insn, &r0, &r1, &r2); 575 regs[r0] = ~(regs[r1] ^ regs[r2]); 576 break; 577 #endif 578 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64 579 CASE_32_64(nand) 580 tci_args_rrr(insn, &r0, &r1, &r2); 581 regs[r0] = ~(regs[r1] & regs[r2]); 582 break; 583 #endif 584 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64 585 CASE_32_64(nor) 586 tci_args_rrr(insn, &r0, &r1, &r2); 587 regs[r0] = ~(regs[r1] | regs[r2]); 588 break; 589 #endif 590 591 /* Arithmetic operations (32 bit). */ 592 593 case INDEX_op_div_i32: 594 tci_args_rrr(insn, &r0, &r1, &r2); 595 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2]; 596 break; 597 case INDEX_op_divu_i32: 598 tci_args_rrr(insn, &r0, &r1, &r2); 599 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2]; 600 break; 601 case INDEX_op_rem_i32: 602 tci_args_rrr(insn, &r0, &r1, &r2); 603 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2]; 604 break; 605 case INDEX_op_remu_i32: 606 tci_args_rrr(insn, &r0, &r1, &r2); 607 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2]; 608 break; 609 #if TCG_TARGET_HAS_clz_i32 610 case INDEX_op_clz_i32: 611 tci_args_rrr(insn, &r0, &r1, &r2); 612 tmp32 = regs[r1]; 613 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2]; 614 break; 615 #endif 616 #if TCG_TARGET_HAS_ctz_i32 617 case INDEX_op_ctz_i32: 618 tci_args_rrr(insn, &r0, &r1, &r2); 619 tmp32 = regs[r1]; 620 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2]; 621 break; 622 #endif 623 #if TCG_TARGET_HAS_ctpop_i32 624 case INDEX_op_ctpop_i32: 625 tci_args_rr(insn, &r0, &r1); 626 regs[r0] = ctpop32(regs[r1]); 627 break; 628 #endif 629 630 /* Shift/rotate operations (32 bit). */ 631 632 case INDEX_op_shl_i32: 633 tci_args_rrr(insn, &r0, &r1, &r2); 634 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31); 635 break; 636 case INDEX_op_shr_i32: 637 tci_args_rrr(insn, &r0, &r1, &r2); 638 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31); 639 break; 640 case INDEX_op_sar_i32: 641 tci_args_rrr(insn, &r0, &r1, &r2); 642 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31); 643 break; 644 #if TCG_TARGET_HAS_rot_i32 645 case INDEX_op_rotl_i32: 646 tci_args_rrr(insn, &r0, &r1, &r2); 647 regs[r0] = rol32(regs[r1], regs[r2] & 31); 648 break; 649 case INDEX_op_rotr_i32: 650 tci_args_rrr(insn, &r0, &r1, &r2); 651 regs[r0] = ror32(regs[r1], regs[r2] & 31); 652 break; 653 #endif 654 case INDEX_op_deposit_i32: 655 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 656 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]); 657 break; 658 case INDEX_op_extract_i32: 659 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 660 regs[r0] = extract32(regs[r1], pos, len); 661 break; 662 case INDEX_op_sextract_i32: 663 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 664 regs[r0] = sextract32(regs[r1], pos, len); 665 break; 666 case INDEX_op_brcond_i32: 667 tci_args_rl(insn, tb_ptr, &r0, &ptr); 668 if ((uint32_t)regs[r0]) { 669 tb_ptr = ptr; 670 } 671 break; 672 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32 673 case INDEX_op_add2_i32: 674 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 675 T1 = tci_uint64(regs[r3], regs[r2]); 676 T2 = tci_uint64(regs[r5], regs[r4]); 677 tci_write_reg64(regs, r1, r0, T1 + T2); 678 break; 679 #endif 680 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32 681 case INDEX_op_sub2_i32: 682 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 683 T1 = tci_uint64(regs[r3], regs[r2]); 684 T2 = tci_uint64(regs[r5], regs[r4]); 685 tci_write_reg64(regs, r1, r0, T1 - T2); 686 break; 687 #endif 688 #if TCG_TARGET_HAS_mulu2_i32 689 case INDEX_op_mulu2_i32: 690 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 691 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3]; 692 tci_write_reg64(regs, r1, r0, tmp64); 693 break; 694 #endif 695 #if TCG_TARGET_HAS_muls2_i32 696 case INDEX_op_muls2_i32: 697 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 698 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3]; 699 tci_write_reg64(regs, r1, r0, tmp64); 700 break; 701 #endif 702 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 703 CASE_32_64(ext8s) 704 tci_args_rr(insn, &r0, &r1); 705 regs[r0] = (int8_t)regs[r1]; 706 break; 707 #endif 708 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \ 709 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 710 CASE_32_64(ext16s) 711 tci_args_rr(insn, &r0, &r1); 712 regs[r0] = (int16_t)regs[r1]; 713 break; 714 #endif 715 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64 716 CASE_32_64(ext8u) 717 tci_args_rr(insn, &r0, &r1); 718 regs[r0] = (uint8_t)regs[r1]; 719 break; 720 #endif 721 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64 722 CASE_32_64(ext16u) 723 tci_args_rr(insn, &r0, &r1); 724 regs[r0] = (uint16_t)regs[r1]; 725 break; 726 #endif 727 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 728 CASE_32_64(bswap16) 729 tci_args_rr(insn, &r0, &r1); 730 regs[r0] = bswap16(regs[r1]); 731 break; 732 #endif 733 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64 734 CASE_32_64(bswap32) 735 tci_args_rr(insn, &r0, &r1); 736 regs[r0] = bswap32(regs[r1]); 737 break; 738 #endif 739 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64 740 CASE_32_64(not) 741 tci_args_rr(insn, &r0, &r1); 742 regs[r0] = ~regs[r1]; 743 break; 744 #endif 745 CASE_32_64(neg) 746 tci_args_rr(insn, &r0, &r1); 747 regs[r0] = -regs[r1]; 748 break; 749 #if TCG_TARGET_REG_BITS == 64 750 /* Load/store operations (64 bit). */ 751 752 case INDEX_op_ld32s_i64: 753 tci_args_rrs(insn, &r0, &r1, &ofs); 754 ptr = (void *)(regs[r1] + ofs); 755 regs[r0] = *(int32_t *)ptr; 756 break; 757 case INDEX_op_ld_i64: 758 tci_args_rrs(insn, &r0, &r1, &ofs); 759 ptr = (void *)(regs[r1] + ofs); 760 regs[r0] = *(uint64_t *)ptr; 761 break; 762 case INDEX_op_st_i64: 763 tci_args_rrs(insn, &r0, &r1, &ofs); 764 ptr = (void *)(regs[r1] + ofs); 765 *(uint64_t *)ptr = regs[r0]; 766 break; 767 768 /* Arithmetic operations (64 bit). */ 769 770 case INDEX_op_div_i64: 771 tci_args_rrr(insn, &r0, &r1, &r2); 772 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2]; 773 break; 774 case INDEX_op_divu_i64: 775 tci_args_rrr(insn, &r0, &r1, &r2); 776 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2]; 777 break; 778 case INDEX_op_rem_i64: 779 tci_args_rrr(insn, &r0, &r1, &r2); 780 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2]; 781 break; 782 case INDEX_op_remu_i64: 783 tci_args_rrr(insn, &r0, &r1, &r2); 784 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2]; 785 break; 786 #if TCG_TARGET_HAS_clz_i64 787 case INDEX_op_clz_i64: 788 tci_args_rrr(insn, &r0, &r1, &r2); 789 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2]; 790 break; 791 #endif 792 #if TCG_TARGET_HAS_ctz_i64 793 case INDEX_op_ctz_i64: 794 tci_args_rrr(insn, &r0, &r1, &r2); 795 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2]; 796 break; 797 #endif 798 #if TCG_TARGET_HAS_ctpop_i64 799 case INDEX_op_ctpop_i64: 800 tci_args_rr(insn, &r0, &r1); 801 regs[r0] = ctpop64(regs[r1]); 802 break; 803 #endif 804 #if TCG_TARGET_HAS_mulu2_i64 805 case INDEX_op_mulu2_i64: 806 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 807 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]); 808 break; 809 #endif 810 #if TCG_TARGET_HAS_muls2_i64 811 case INDEX_op_muls2_i64: 812 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 813 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]); 814 break; 815 #endif 816 #if TCG_TARGET_HAS_add2_i64 817 case INDEX_op_add2_i64: 818 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 819 T1 = regs[r2] + regs[r4]; 820 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]); 821 regs[r0] = T1; 822 regs[r1] = T2; 823 break; 824 #endif 825 #if TCG_TARGET_HAS_add2_i64 826 case INDEX_op_sub2_i64: 827 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 828 T1 = regs[r2] - regs[r4]; 829 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]); 830 regs[r0] = T1; 831 regs[r1] = T2; 832 break; 833 #endif 834 835 /* Shift/rotate operations (64 bit). */ 836 837 case INDEX_op_shl_i64: 838 tci_args_rrr(insn, &r0, &r1, &r2); 839 regs[r0] = regs[r1] << (regs[r2] & 63); 840 break; 841 case INDEX_op_shr_i64: 842 tci_args_rrr(insn, &r0, &r1, &r2); 843 regs[r0] = regs[r1] >> (regs[r2] & 63); 844 break; 845 case INDEX_op_sar_i64: 846 tci_args_rrr(insn, &r0, &r1, &r2); 847 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63); 848 break; 849 #if TCG_TARGET_HAS_rot_i64 850 case INDEX_op_rotl_i64: 851 tci_args_rrr(insn, &r0, &r1, &r2); 852 regs[r0] = rol64(regs[r1], regs[r2] & 63); 853 break; 854 case INDEX_op_rotr_i64: 855 tci_args_rrr(insn, &r0, &r1, &r2); 856 regs[r0] = ror64(regs[r1], regs[r2] & 63); 857 break; 858 #endif 859 case INDEX_op_deposit_i64: 860 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 861 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]); 862 break; 863 case INDEX_op_extract_i64: 864 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 865 regs[r0] = extract64(regs[r1], pos, len); 866 break; 867 case INDEX_op_sextract_i64: 868 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 869 regs[r0] = sextract64(regs[r1], pos, len); 870 break; 871 case INDEX_op_brcond_i64: 872 tci_args_rl(insn, tb_ptr, &r0, &ptr); 873 if (regs[r0]) { 874 tb_ptr = ptr; 875 } 876 break; 877 case INDEX_op_ext32s_i64: 878 case INDEX_op_ext_i32_i64: 879 tci_args_rr(insn, &r0, &r1); 880 regs[r0] = (int32_t)regs[r1]; 881 break; 882 case INDEX_op_ext32u_i64: 883 case INDEX_op_extu_i32_i64: 884 tci_args_rr(insn, &r0, &r1); 885 regs[r0] = (uint32_t)regs[r1]; 886 break; 887 #if TCG_TARGET_HAS_bswap64_i64 888 case INDEX_op_bswap64_i64: 889 tci_args_rr(insn, &r0, &r1); 890 regs[r0] = bswap64(regs[r1]); 891 break; 892 #endif 893 #endif /* TCG_TARGET_REG_BITS == 64 */ 894 895 /* QEMU specific operations. */ 896 897 case INDEX_op_exit_tb: 898 tci_args_l(insn, tb_ptr, &ptr); 899 return (uintptr_t)ptr; 900 901 case INDEX_op_goto_tb: 902 tci_args_l(insn, tb_ptr, &ptr); 903 tb_ptr = *(void **)ptr; 904 break; 905 906 case INDEX_op_goto_ptr: 907 tci_args_r(insn, &r0); 908 ptr = (void *)regs[r0]; 909 if (!ptr) { 910 return 0; 911 } 912 tb_ptr = ptr; 913 break; 914 915 case INDEX_op_qemu_ld_a32_i32: 916 tci_args_rrm(insn, &r0, &r1, &oi); 917 taddr = (uint32_t)regs[r1]; 918 goto do_ld_i32; 919 case INDEX_op_qemu_ld_a64_i32: 920 if (TCG_TARGET_REG_BITS == 64) { 921 tci_args_rrm(insn, &r0, &r1, &oi); 922 taddr = regs[r1]; 923 } else { 924 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 925 taddr = tci_uint64(regs[r2], regs[r1]); 926 oi = regs[r3]; 927 } 928 do_ld_i32: 929 regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr); 930 break; 931 932 case INDEX_op_qemu_ld_a32_i64: 933 if (TCG_TARGET_REG_BITS == 64) { 934 tci_args_rrm(insn, &r0, &r1, &oi); 935 taddr = (uint32_t)regs[r1]; 936 } else { 937 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 938 taddr = (uint32_t)regs[r2]; 939 oi = regs[r3]; 940 } 941 goto do_ld_i64; 942 case INDEX_op_qemu_ld_a64_i64: 943 if (TCG_TARGET_REG_BITS == 64) { 944 tci_args_rrm(insn, &r0, &r1, &oi); 945 taddr = regs[r1]; 946 } else { 947 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 948 taddr = tci_uint64(regs[r3], regs[r2]); 949 oi = regs[r4]; 950 } 951 do_ld_i64: 952 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); 953 if (TCG_TARGET_REG_BITS == 32) { 954 tci_write_reg64(regs, r1, r0, tmp64); 955 } else { 956 regs[r0] = tmp64; 957 } 958 break; 959 960 case INDEX_op_qemu_st_a32_i32: 961 tci_args_rrm(insn, &r0, &r1, &oi); 962 taddr = (uint32_t)regs[r1]; 963 goto do_st_i32; 964 case INDEX_op_qemu_st_a64_i32: 965 if (TCG_TARGET_REG_BITS == 64) { 966 tci_args_rrm(insn, &r0, &r1, &oi); 967 taddr = regs[r1]; 968 } else { 969 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 970 taddr = tci_uint64(regs[r2], regs[r1]); 971 oi = regs[r3]; 972 } 973 do_st_i32: 974 tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr); 975 break; 976 977 case INDEX_op_qemu_st_a32_i64: 978 if (TCG_TARGET_REG_BITS == 64) { 979 tci_args_rrm(insn, &r0, &r1, &oi); 980 tmp64 = regs[r0]; 981 taddr = (uint32_t)regs[r1]; 982 } else { 983 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 984 tmp64 = tci_uint64(regs[r1], regs[r0]); 985 taddr = (uint32_t)regs[r2]; 986 oi = regs[r3]; 987 } 988 goto do_st_i64; 989 case INDEX_op_qemu_st_a64_i64: 990 if (TCG_TARGET_REG_BITS == 64) { 991 tci_args_rrm(insn, &r0, &r1, &oi); 992 tmp64 = regs[r0]; 993 taddr = regs[r1]; 994 } else { 995 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 996 tmp64 = tci_uint64(regs[r1], regs[r0]); 997 taddr = tci_uint64(regs[r3], regs[r2]); 998 oi = regs[r4]; 999 } 1000 do_st_i64: 1001 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr); 1002 break; 1003 1004 case INDEX_op_mb: 1005 /* Ensure ordering for all kinds */ 1006 smp_mb(); 1007 break; 1008 default: 1009 g_assert_not_reached(); 1010 } 1011 } 1012 } 1013 1014 /* 1015 * Disassembler that matches the interpreter 1016 */ 1017 1018 static const char *str_r(TCGReg r) 1019 { 1020 static const char regs[TCG_TARGET_NB_REGS][4] = { 1021 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 1022 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp" 1023 }; 1024 1025 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14); 1026 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15); 1027 1028 assert((unsigned)r < TCG_TARGET_NB_REGS); 1029 return regs[r]; 1030 } 1031 1032 static const char *str_c(TCGCond c) 1033 { 1034 static const char cond[16][8] = { 1035 [TCG_COND_NEVER] = "never", 1036 [TCG_COND_ALWAYS] = "always", 1037 [TCG_COND_EQ] = "eq", 1038 [TCG_COND_NE] = "ne", 1039 [TCG_COND_LT] = "lt", 1040 [TCG_COND_GE] = "ge", 1041 [TCG_COND_LE] = "le", 1042 [TCG_COND_GT] = "gt", 1043 [TCG_COND_LTU] = "ltu", 1044 [TCG_COND_GEU] = "geu", 1045 [TCG_COND_LEU] = "leu", 1046 [TCG_COND_GTU] = "gtu", 1047 [TCG_COND_TSTEQ] = "tsteq", 1048 [TCG_COND_TSTNE] = "tstne", 1049 }; 1050 1051 assert((unsigned)c < ARRAY_SIZE(cond)); 1052 assert(cond[c][0] != 0); 1053 return cond[c]; 1054 } 1055 1056 /* Disassemble TCI bytecode. */ 1057 int print_insn_tci(bfd_vma addr, disassemble_info *info) 1058 { 1059 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr; 1060 const TCGOpDef *def; 1061 const char *op_name; 1062 uint32_t insn; 1063 TCGOpcode op; 1064 TCGReg r0, r1, r2, r3, r4, r5; 1065 tcg_target_ulong i1; 1066 int32_t s2; 1067 TCGCond c; 1068 MemOpIdx oi; 1069 uint8_t pos, len; 1070 void *ptr; 1071 1072 /* TCI is always the host, so we don't need to load indirect. */ 1073 insn = *tb_ptr++; 1074 1075 info->fprintf_func(info->stream, "%08x ", insn); 1076 1077 op = extract32(insn, 0, 8); 1078 def = &tcg_op_defs[op]; 1079 op_name = def->name; 1080 1081 switch (op) { 1082 case INDEX_op_br: 1083 case INDEX_op_exit_tb: 1084 case INDEX_op_goto_tb: 1085 tci_args_l(insn, tb_ptr, &ptr); 1086 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr); 1087 break; 1088 1089 case INDEX_op_goto_ptr: 1090 tci_args_r(insn, &r0); 1091 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0)); 1092 break; 1093 1094 case INDEX_op_call: 1095 tci_args_nl(insn, tb_ptr, &len, &ptr); 1096 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr); 1097 break; 1098 1099 case INDEX_op_brcond_i32: 1100 case INDEX_op_brcond_i64: 1101 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1102 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p", 1103 op_name, str_r(r0), ptr); 1104 break; 1105 1106 case INDEX_op_setcond_i32: 1107 case INDEX_op_setcond_i64: 1108 tci_args_rrrc(insn, &r0, &r1, &r2, &c); 1109 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1110 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c)); 1111 break; 1112 1113 case INDEX_op_tci_movi: 1114 tci_args_ri(insn, &r0, &i1); 1115 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx, 1116 op_name, str_r(r0), i1); 1117 break; 1118 1119 case INDEX_op_tci_movl: 1120 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1121 info->fprintf_func(info->stream, "%-12s %s, %p", 1122 op_name, str_r(r0), ptr); 1123 break; 1124 1125 case INDEX_op_ld8u_i32: 1126 case INDEX_op_ld8u_i64: 1127 case INDEX_op_ld8s_i32: 1128 case INDEX_op_ld8s_i64: 1129 case INDEX_op_ld16u_i32: 1130 case INDEX_op_ld16u_i64: 1131 case INDEX_op_ld16s_i32: 1132 case INDEX_op_ld16s_i64: 1133 case INDEX_op_ld32u_i64: 1134 case INDEX_op_ld32s_i64: 1135 case INDEX_op_ld_i32: 1136 case INDEX_op_ld_i64: 1137 case INDEX_op_st8_i32: 1138 case INDEX_op_st8_i64: 1139 case INDEX_op_st16_i32: 1140 case INDEX_op_st16_i64: 1141 case INDEX_op_st32_i64: 1142 case INDEX_op_st_i32: 1143 case INDEX_op_st_i64: 1144 tci_args_rrs(insn, &r0, &r1, &s2); 1145 info->fprintf_func(info->stream, "%-12s %s, %s, %d", 1146 op_name, str_r(r0), str_r(r1), s2); 1147 break; 1148 1149 case INDEX_op_mov_i32: 1150 case INDEX_op_mov_i64: 1151 case INDEX_op_ext8s_i32: 1152 case INDEX_op_ext8s_i64: 1153 case INDEX_op_ext8u_i32: 1154 case INDEX_op_ext8u_i64: 1155 case INDEX_op_ext16s_i32: 1156 case INDEX_op_ext16s_i64: 1157 case INDEX_op_ext16u_i32: 1158 case INDEX_op_ext32s_i64: 1159 case INDEX_op_ext32u_i64: 1160 case INDEX_op_ext_i32_i64: 1161 case INDEX_op_extu_i32_i64: 1162 case INDEX_op_bswap16_i32: 1163 case INDEX_op_bswap16_i64: 1164 case INDEX_op_bswap32_i32: 1165 case INDEX_op_bswap32_i64: 1166 case INDEX_op_bswap64_i64: 1167 case INDEX_op_not_i32: 1168 case INDEX_op_not_i64: 1169 case INDEX_op_neg_i32: 1170 case INDEX_op_neg_i64: 1171 case INDEX_op_ctpop_i32: 1172 case INDEX_op_ctpop_i64: 1173 tci_args_rr(insn, &r0, &r1); 1174 info->fprintf_func(info->stream, "%-12s %s, %s", 1175 op_name, str_r(r0), str_r(r1)); 1176 break; 1177 1178 case INDEX_op_add_i32: 1179 case INDEX_op_add_i64: 1180 case INDEX_op_sub_i32: 1181 case INDEX_op_sub_i64: 1182 case INDEX_op_mul_i32: 1183 case INDEX_op_mul_i64: 1184 case INDEX_op_and_i32: 1185 case INDEX_op_and_i64: 1186 case INDEX_op_or_i32: 1187 case INDEX_op_or_i64: 1188 case INDEX_op_xor_i32: 1189 case INDEX_op_xor_i64: 1190 case INDEX_op_andc_i32: 1191 case INDEX_op_andc_i64: 1192 case INDEX_op_orc_i32: 1193 case INDEX_op_orc_i64: 1194 case INDEX_op_eqv_i32: 1195 case INDEX_op_eqv_i64: 1196 case INDEX_op_nand_i32: 1197 case INDEX_op_nand_i64: 1198 case INDEX_op_nor_i32: 1199 case INDEX_op_nor_i64: 1200 case INDEX_op_div_i32: 1201 case INDEX_op_div_i64: 1202 case INDEX_op_rem_i32: 1203 case INDEX_op_rem_i64: 1204 case INDEX_op_divu_i32: 1205 case INDEX_op_divu_i64: 1206 case INDEX_op_remu_i32: 1207 case INDEX_op_remu_i64: 1208 case INDEX_op_shl_i32: 1209 case INDEX_op_shl_i64: 1210 case INDEX_op_shr_i32: 1211 case INDEX_op_shr_i64: 1212 case INDEX_op_sar_i32: 1213 case INDEX_op_sar_i64: 1214 case INDEX_op_rotl_i32: 1215 case INDEX_op_rotl_i64: 1216 case INDEX_op_rotr_i32: 1217 case INDEX_op_rotr_i64: 1218 case INDEX_op_clz_i32: 1219 case INDEX_op_clz_i64: 1220 case INDEX_op_ctz_i32: 1221 case INDEX_op_ctz_i64: 1222 tci_args_rrr(insn, &r0, &r1, &r2); 1223 info->fprintf_func(info->stream, "%-12s %s, %s, %s", 1224 op_name, str_r(r0), str_r(r1), str_r(r2)); 1225 break; 1226 1227 case INDEX_op_deposit_i32: 1228 case INDEX_op_deposit_i64: 1229 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 1230 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d", 1231 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len); 1232 break; 1233 1234 case INDEX_op_extract_i32: 1235 case INDEX_op_extract_i64: 1236 case INDEX_op_sextract_i32: 1237 case INDEX_op_sextract_i64: 1238 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 1239 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d", 1240 op_name, str_r(r0), str_r(r1), pos, len); 1241 break; 1242 1243 case INDEX_op_movcond_i32: 1244 case INDEX_op_movcond_i64: 1245 case INDEX_op_setcond2_i32: 1246 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c); 1247 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1248 op_name, str_r(r0), str_r(r1), str_r(r2), 1249 str_r(r3), str_r(r4), str_c(c)); 1250 break; 1251 1252 case INDEX_op_mulu2_i32: 1253 case INDEX_op_mulu2_i64: 1254 case INDEX_op_muls2_i32: 1255 case INDEX_op_muls2_i64: 1256 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 1257 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1258 op_name, str_r(r0), str_r(r1), 1259 str_r(r2), str_r(r3)); 1260 break; 1261 1262 case INDEX_op_add2_i32: 1263 case INDEX_op_add2_i64: 1264 case INDEX_op_sub2_i32: 1265 case INDEX_op_sub2_i64: 1266 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 1267 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1268 op_name, str_r(r0), str_r(r1), str_r(r2), 1269 str_r(r3), str_r(r4), str_r(r5)); 1270 break; 1271 1272 case INDEX_op_qemu_ld_a32_i32: 1273 case INDEX_op_qemu_st_a32_i32: 1274 len = 1 + 1; 1275 goto do_qemu_ldst; 1276 case INDEX_op_qemu_ld_a32_i64: 1277 case INDEX_op_qemu_st_a32_i64: 1278 case INDEX_op_qemu_ld_a64_i32: 1279 case INDEX_op_qemu_st_a64_i32: 1280 len = 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); 1281 goto do_qemu_ldst; 1282 case INDEX_op_qemu_ld_a64_i64: 1283 case INDEX_op_qemu_st_a64_i64: 1284 len = 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); 1285 goto do_qemu_ldst; 1286 do_qemu_ldst: 1287 switch (len) { 1288 case 2: 1289 tci_args_rrm(insn, &r0, &r1, &oi); 1290 info->fprintf_func(info->stream, "%-12s %s, %s, %x", 1291 op_name, str_r(r0), str_r(r1), oi); 1292 break; 1293 case 3: 1294 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 1295 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1296 op_name, str_r(r0), str_r(r1), 1297 str_r(r2), str_r(r3)); 1298 break; 1299 case 4: 1300 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1301 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s", 1302 op_name, str_r(r0), str_r(r1), 1303 str_r(r2), str_r(r3), str_r(r4)); 1304 break; 1305 default: 1306 g_assert_not_reached(); 1307 } 1308 break; 1309 1310 case 0: 1311 /* tcg_out_nop_fill uses zeros */ 1312 if (insn == 0) { 1313 info->fprintf_func(info->stream, "align"); 1314 break; 1315 } 1316 /* fall through */ 1317 1318 default: 1319 info->fprintf_func(info->stream, "illegal opcode %d", op); 1320 break; 1321 } 1322 1323 return sizeof(insn); 1324 } 1325