1 /* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #ifndef TCG_H 26 #define TCG_H 27 28 #include "exec/memop.h" 29 #include "exec/memopidx.h" 30 #include "qemu/bitops.h" 31 #include "qemu/plugin.h" 32 #include "qemu/queue.h" 33 #include "tcg/tcg-mo.h" 34 #include "tcg-target-reg-bits.h" 35 #include "tcg-target.h" 36 #include "tcg/tcg-cond.h" 37 #include "tcg/insn-start-words.h" 38 #include "tcg/debug-assert.h" 39 40 /* XXX: make safe guess about sizes */ 41 #define MAX_OP_PER_INSTR 266 42 43 #define CPU_TEMP_BUF_NLONGS 128 44 #define TCG_STATIC_FRAME_SIZE (CPU_TEMP_BUF_NLONGS * sizeof(long)) 45 46 #if TCG_TARGET_REG_BITS == 32 47 typedef int32_t tcg_target_long; 48 typedef uint32_t tcg_target_ulong; 49 #define TCG_PRIlx PRIx32 50 #define TCG_PRIld PRId32 51 #elif TCG_TARGET_REG_BITS == 64 52 typedef int64_t tcg_target_long; 53 typedef uint64_t tcg_target_ulong; 54 #define TCG_PRIlx PRIx64 55 #define TCG_PRIld PRId64 56 #else 57 #error unsupported 58 #endif 59 60 #if TCG_TARGET_NB_REGS <= 32 61 typedef uint32_t TCGRegSet; 62 #elif TCG_TARGET_NB_REGS <= 64 63 typedef uint64_t TCGRegSet; 64 #else 65 #error unsupported 66 #endif 67 68 typedef enum TCGOpcode { 69 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name, 70 #include "tcg/tcg-opc.h" 71 #undef DEF 72 NB_OPS, 73 } TCGOpcode; 74 75 #define tcg_regset_set_reg(d, r) ((d) |= (TCGRegSet)1 << (r)) 76 #define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r))) 77 #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) 78 79 #ifndef TCG_TARGET_INSN_UNIT_SIZE 80 # error "Missing TCG_TARGET_INSN_UNIT_SIZE" 81 #elif TCG_TARGET_INSN_UNIT_SIZE == 1 82 typedef uint8_t tcg_insn_unit; 83 #elif TCG_TARGET_INSN_UNIT_SIZE == 2 84 typedef uint16_t tcg_insn_unit; 85 #elif TCG_TARGET_INSN_UNIT_SIZE == 4 86 typedef uint32_t tcg_insn_unit; 87 #elif TCG_TARGET_INSN_UNIT_SIZE == 8 88 typedef uint64_t tcg_insn_unit; 89 #else 90 /* The port better have done this. */ 91 #endif 92 93 typedef struct TCGRelocation TCGRelocation; 94 struct TCGRelocation { 95 QSIMPLEQ_ENTRY(TCGRelocation) next; 96 tcg_insn_unit *ptr; 97 intptr_t addend; 98 int type; 99 }; 100 101 typedef struct TCGOp TCGOp; 102 typedef struct TCGLabelUse TCGLabelUse; 103 struct TCGLabelUse { 104 QSIMPLEQ_ENTRY(TCGLabelUse) next; 105 TCGOp *op; 106 }; 107 108 typedef struct TCGLabel TCGLabel; 109 struct TCGLabel { 110 bool present; 111 bool has_value; 112 uint16_t id; 113 union { 114 uintptr_t value; 115 const tcg_insn_unit *value_ptr; 116 } u; 117 QSIMPLEQ_HEAD(, TCGLabelUse) branches; 118 QSIMPLEQ_HEAD(, TCGRelocation) relocs; 119 QSIMPLEQ_ENTRY(TCGLabel) next; 120 }; 121 122 typedef struct TCGPool { 123 struct TCGPool *next; 124 int size; 125 uint8_t data[] __attribute__ ((aligned)); 126 } TCGPool; 127 128 #define TCG_POOL_CHUNK_SIZE 32768 129 130 #define TCG_MAX_TEMPS 512 131 #define TCG_MAX_INSNS 512 132 133 /* when the size of the arguments of a called function is smaller than 134 this value, they are statically allocated in the TB stack frame */ 135 #define TCG_STATIC_CALL_ARGS_SIZE 128 136 137 typedef enum TCGType { 138 TCG_TYPE_I32, 139 TCG_TYPE_I64, 140 TCG_TYPE_I128, 141 142 TCG_TYPE_V64, 143 TCG_TYPE_V128, 144 TCG_TYPE_V256, 145 146 /* Number of different types (integer not enum) */ 147 #define TCG_TYPE_COUNT (TCG_TYPE_V256 + 1) 148 149 /* An alias for the size of the host register. */ 150 #if TCG_TARGET_REG_BITS == 32 151 TCG_TYPE_REG = TCG_TYPE_I32, 152 #else 153 TCG_TYPE_REG = TCG_TYPE_I64, 154 #endif 155 156 /* An alias for the size of the native pointer. */ 157 #if UINTPTR_MAX == UINT32_MAX 158 TCG_TYPE_PTR = TCG_TYPE_I32, 159 #else 160 TCG_TYPE_PTR = TCG_TYPE_I64, 161 #endif 162 } TCGType; 163 164 /** 165 * tcg_type_size 166 * @t: type 167 * 168 * Return the size of the type in bytes. 169 */ 170 static inline int tcg_type_size(TCGType t) 171 { 172 unsigned i = t; 173 if (i >= TCG_TYPE_V64) { 174 tcg_debug_assert(i < TCG_TYPE_COUNT); 175 i -= TCG_TYPE_V64 - 1; 176 } 177 return 4 << i; 178 } 179 180 typedef tcg_target_ulong TCGArg; 181 182 /* Define type and accessor macros for TCG variables. 183 184 TCG variables are the inputs and outputs of TCG ops, as described 185 in tcg/README. Target CPU front-end code uses these types to deal 186 with TCG variables as it emits TCG code via the tcg_gen_* functions. 187 They come in several flavours: 188 * TCGv_i32 : 32 bit integer type 189 * TCGv_i64 : 64 bit integer type 190 * TCGv_i128 : 128 bit integer type 191 * TCGv_ptr : a host pointer type 192 * TCGv_vaddr: an integer type wide enough to hold a target pointer type 193 * TCGv_vec : a host vector type; the exact size is not exposed 194 to the CPU front-end code. 195 * TCGv : an integer type the same size as target_ulong 196 (an alias for either TCGv_i32 or TCGv_i64) 197 The compiler's type checking will complain if you mix them 198 up and pass the wrong sized TCGv to a function. 199 200 Users of tcg_gen_* don't need to know about any of the internal 201 details of these, and should treat them as opaque types. 202 You won't be able to look inside them in a debugger either. 203 204 Internal implementation details follow: 205 206 Note that there is no definition of the structs TCGv_i32_d etc anywhere. 207 This is deliberate, because the values we store in variables of type 208 TCGv_i32 are not really pointers-to-structures. They're just small 209 integers, but keeping them in pointer types like this means that the 210 compiler will complain if you accidentally pass a TCGv_i32 to a 211 function which takes a TCGv_i64, and so on. Only the internals of 212 TCG need to care about the actual contents of the types. */ 213 214 typedef struct TCGv_i32_d *TCGv_i32; 215 typedef struct TCGv_i64_d *TCGv_i64; 216 typedef struct TCGv_i128_d *TCGv_i128; 217 typedef struct TCGv_ptr_d *TCGv_ptr; 218 typedef struct TCGv_vec_d *TCGv_vec; 219 typedef TCGv_ptr TCGv_env; 220 221 #if __SIZEOF_POINTER__ == 4 222 typedef TCGv_i32 TCGv_vaddr; 223 #elif __SIZEOF_POINTER__ == 8 224 typedef TCGv_i64 TCGv_vaddr; 225 #else 226 # error "sizeof pointer is different from {4,8}" 227 #endif /* __SIZEOF_POINTER__ */ 228 229 /* call flags */ 230 /* Helper does not read globals (either directly or through an exception). It 231 implies TCG_CALL_NO_WRITE_GLOBALS. */ 232 #define TCG_CALL_NO_READ_GLOBALS 0x0001 233 /* Helper does not write globals */ 234 #define TCG_CALL_NO_WRITE_GLOBALS 0x0002 235 /* Helper can be safely suppressed if the return value is not used. */ 236 #define TCG_CALL_NO_SIDE_EFFECTS 0x0004 237 /* Helper is G_NORETURN. */ 238 #define TCG_CALL_NO_RETURN 0x0008 239 240 /* convenience version of most used call flags */ 241 #define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS 242 #define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS 243 #define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS 244 #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE) 245 #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE) 246 247 /* 248 * Flags for the bswap opcodes. 249 * If IZ, the input is zero-extended, otherwise unknown. 250 * If OZ or OS, the output is zero- or sign-extended respectively, 251 * otherwise the high bits are undefined. 252 */ 253 enum { 254 TCG_BSWAP_IZ = 1, 255 TCG_BSWAP_OZ = 2, 256 TCG_BSWAP_OS = 4, 257 }; 258 259 typedef enum TCGTempVal { 260 TEMP_VAL_DEAD, 261 TEMP_VAL_REG, 262 TEMP_VAL_MEM, 263 TEMP_VAL_CONST, 264 } TCGTempVal; 265 266 typedef enum TCGTempKind { 267 /* 268 * Temp is dead at the end of the extended basic block (EBB), 269 * the single-entry multiple-exit region that falls through 270 * conditional branches. 271 */ 272 TEMP_EBB, 273 /* Temp is live across the entire translation block, but dead at end. */ 274 TEMP_TB, 275 /* Temp is live across the entire translation block, and between them. */ 276 TEMP_GLOBAL, 277 /* Temp is in a fixed register. */ 278 TEMP_FIXED, 279 /* Temp is a fixed constant. */ 280 TEMP_CONST, 281 } TCGTempKind; 282 283 typedef struct TCGTemp { 284 TCGReg reg:8; 285 TCGTempVal val_type:8; 286 TCGType base_type:8; 287 TCGType type:8; 288 TCGTempKind kind:3; 289 unsigned int indirect_reg:1; 290 unsigned int indirect_base:1; 291 unsigned int mem_coherent:1; 292 unsigned int mem_allocated:1; 293 unsigned int temp_allocated:1; 294 unsigned int temp_subindex:2; 295 296 int64_t val; 297 struct TCGTemp *mem_base; 298 intptr_t mem_offset; 299 const char *name; 300 301 /* Pass-specific information that can be stored for a temporary. 302 One word worth of integer data, and one pointer to data 303 allocated separately. */ 304 uintptr_t state; 305 void *state_ptr; 306 } TCGTemp; 307 308 typedef struct TCGContext TCGContext; 309 310 typedef struct TCGTempSet { 311 unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)]; 312 } TCGTempSet; 313 314 /* 315 * With 1 128-bit output, a 32-bit host requires 4 output parameters, 316 * which leaves a maximum of 28 other slots. Which is enough for 7 317 * 128-bit operands. 318 */ 319 #define DEAD_ARG (1 << 4) 320 #define SYNC_ARG (1 << 0) 321 typedef uint32_t TCGLifeData; 322 323 struct TCGOp { 324 TCGOpcode opc : 8; 325 unsigned nargs : 8; 326 327 /* Parameters for this opcode. See below. */ 328 unsigned param1 : 8; 329 unsigned param2 : 8; 330 331 /* Lifetime data of the operands. */ 332 TCGLifeData life; 333 334 /* Next and previous opcodes. */ 335 QTAILQ_ENTRY(TCGOp) link; 336 337 /* Register preferences for the output(s). */ 338 TCGRegSet output_pref[2]; 339 340 /* Arguments for the opcode. */ 341 TCGArg args[]; 342 }; 343 344 #define TCGOP_CALLI(X) (X)->param1 345 #define TCGOP_CALLO(X) (X)->param2 346 347 #define TCGOP_TYPE(X) (X)->param1 348 #define TCGOP_FLAGS(X) (X)->param2 349 #define TCGOP_VECE(X) (X)->param2 350 351 /* Make sure operands fit in the bitfields above. */ 352 QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8)); 353 354 static inline TCGRegSet output_pref(const TCGOp *op, unsigned i) 355 { 356 return i < ARRAY_SIZE(op->output_pref) ? op->output_pref[i] : 0; 357 } 358 359 struct TCGContext { 360 uint8_t *pool_cur, *pool_end; 361 TCGPool *pool_first, *pool_current, *pool_first_large; 362 int nb_labels; 363 int nb_globals; 364 int nb_temps; 365 int nb_indirects; 366 int nb_ops; 367 TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */ 368 369 int page_mask; 370 uint8_t page_bits; 371 uint8_t tlb_dyn_max_bits; 372 TCGBar guest_mo; 373 374 TCGRegSet reserved_regs; 375 intptr_t current_frame_offset; 376 intptr_t frame_start; 377 intptr_t frame_end; 378 TCGTemp *frame_temp; 379 380 TranslationBlock *gen_tb; /* tb for which code is being generated */ 381 tcg_insn_unit *code_buf; /* pointer for start of tb */ 382 tcg_insn_unit *code_ptr; /* pointer for running end of tb */ 383 384 #ifdef CONFIG_DEBUG_TCG 385 int goto_tb_issue_mask; 386 const TCGOpcode *vecop_list; 387 #endif 388 389 /* Code generation. Note that we specifically do not use tcg_insn_unit 390 here, because there's too much arithmetic throughout that relies 391 on addition and subtraction working on bytes. Rely on the GCC 392 extension that allows arithmetic on void*. */ 393 void *code_gen_buffer; 394 size_t code_gen_buffer_size; 395 void *code_gen_ptr; 396 void *data_gen_ptr; 397 398 /* Threshold to flush the translated code buffer. */ 399 void *code_gen_highwater; 400 401 /* Track which vCPU triggers events */ 402 CPUState *cpu; /* *_trans */ 403 404 /* These structures are private to tcg-target.c.inc. */ 405 QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels; 406 struct TCGLabelPoolData *pool_labels; 407 408 TCGLabel *exitreq_label; 409 410 #ifdef CONFIG_PLUGIN 411 /* 412 * We keep one plugin_tb struct per TCGContext. Note that on every TB 413 * translation we clear but do not free its contents; this way we 414 * avoid a lot of malloc/free churn, since after a few TB's it's 415 * unlikely that we'll need to allocate either more instructions or more 416 * space for instructions (for variable-instruction-length ISAs). 417 */ 418 struct qemu_plugin_tb *plugin_tb; 419 const struct DisasContextBase *plugin_db; 420 421 /* descriptor of the instruction being translated */ 422 struct qemu_plugin_insn *plugin_insn; 423 #endif 424 425 /* For host-specific values. */ 426 #ifdef __riscv 427 MemOp riscv_cur_vsew; 428 TCGType riscv_cur_type; 429 #endif 430 /* 431 * During the tcg_reg_alloc_op loop, we are within a sequence of 432 * carry-using opcodes like addco+addci. 433 */ 434 bool carry_live; 435 436 GHashTable *const_table[TCG_TYPE_COUNT]; 437 TCGTempSet free_temps[TCG_TYPE_COUNT]; 438 TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ 439 440 QTAILQ_HEAD(, TCGOp) ops, free_ops; 441 QSIMPLEQ_HEAD(, TCGLabel) labels; 442 443 /* 444 * When clear, new ops are added to the tail of @ops. 445 * When set, new ops are added in front of @emit_before_op. 446 */ 447 TCGOp *emit_before_op; 448 449 /* Tells which temporary holds a given register. 450 It does not take into account fixed registers */ 451 TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS]; 452 453 uint16_t gen_insn_end_off[TCG_MAX_INSNS]; 454 uint64_t *gen_insn_data; 455 456 /* Exit to translator on overflow. */ 457 sigjmp_buf jmp_trans; 458 }; 459 460 static inline bool temp_readonly(TCGTemp *ts) 461 { 462 return ts->kind >= TEMP_FIXED; 463 } 464 465 #ifdef CONFIG_USER_ONLY 466 extern bool tcg_use_softmmu; 467 #else 468 #define tcg_use_softmmu true 469 #endif 470 471 extern __thread TCGContext *tcg_ctx; 472 extern const void *tcg_code_gen_epilogue; 473 extern uintptr_t tcg_splitwx_diff; 474 extern TCGv_env tcg_env; 475 476 bool in_code_gen_buffer(const void *p); 477 478 #ifdef CONFIG_DEBUG_TCG 479 const void *tcg_splitwx_to_rx(void *rw); 480 void *tcg_splitwx_to_rw(const void *rx); 481 #else 482 static inline const void *tcg_splitwx_to_rx(void *rw) 483 { 484 return rw ? rw + tcg_splitwx_diff : NULL; 485 } 486 487 static inline void *tcg_splitwx_to_rw(const void *rx) 488 { 489 return rx ? (void *)rx - tcg_splitwx_diff : NULL; 490 } 491 #endif 492 493 static inline TCGArg temp_arg(TCGTemp *ts) 494 { 495 return (uintptr_t)ts; 496 } 497 498 static inline TCGTemp *arg_temp(TCGArg a) 499 { 500 return (TCGTemp *)(uintptr_t)a; 501 } 502 503 #ifdef CONFIG_DEBUG_TCG 504 size_t temp_idx(TCGTemp *ts); 505 TCGTemp *tcgv_i32_temp(TCGv_i32 v); 506 #else 507 static inline size_t temp_idx(TCGTemp *ts) 508 { 509 return ts - tcg_ctx->temps; 510 } 511 512 /* 513 * Using the offset of a temporary, relative to TCGContext, rather than 514 * its index means that we don't use 0. That leaves offset 0 free for 515 * a NULL representation without having to leave index 0 unused. 516 */ 517 static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v) 518 { 519 return (void *)tcg_ctx + (uintptr_t)v; 520 } 521 #endif 522 523 static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v) 524 { 525 return tcgv_i32_temp((TCGv_i32)v); 526 } 527 528 static inline TCGTemp *tcgv_i128_temp(TCGv_i128 v) 529 { 530 return tcgv_i32_temp((TCGv_i32)v); 531 } 532 533 static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v) 534 { 535 return tcgv_i32_temp((TCGv_i32)v); 536 } 537 538 static inline TCGTemp *tcgv_vec_temp(TCGv_vec v) 539 { 540 return tcgv_i32_temp((TCGv_i32)v); 541 } 542 543 static inline TCGArg tcgv_i32_arg(TCGv_i32 v) 544 { 545 return temp_arg(tcgv_i32_temp(v)); 546 } 547 548 static inline TCGArg tcgv_i64_arg(TCGv_i64 v) 549 { 550 return temp_arg(tcgv_i64_temp(v)); 551 } 552 553 static inline TCGArg tcgv_i128_arg(TCGv_i128 v) 554 { 555 return temp_arg(tcgv_i128_temp(v)); 556 } 557 558 static inline TCGArg tcgv_ptr_arg(TCGv_ptr v) 559 { 560 return temp_arg(tcgv_ptr_temp(v)); 561 } 562 563 static inline TCGArg tcgv_vec_arg(TCGv_vec v) 564 { 565 return temp_arg(tcgv_vec_temp(v)); 566 } 567 568 static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t) 569 { 570 (void)temp_idx(t); /* trigger embedded assert */ 571 return (TCGv_i32)((void *)t - (void *)tcg_ctx); 572 } 573 574 static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t) 575 { 576 return (TCGv_i64)temp_tcgv_i32(t); 577 } 578 579 static inline TCGv_i128 temp_tcgv_i128(TCGTemp *t) 580 { 581 return (TCGv_i128)temp_tcgv_i32(t); 582 } 583 584 static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t) 585 { 586 return (TCGv_ptr)temp_tcgv_i32(t); 587 } 588 589 static inline TCGv_vaddr temp_tcgv_vaddr(TCGTemp *t) 590 { 591 return (TCGv_vaddr)temp_tcgv_i32(t); 592 } 593 594 static inline TCGv_vec temp_tcgv_vec(TCGTemp *t) 595 { 596 return (TCGv_vec)temp_tcgv_i32(t); 597 } 598 599 static inline TCGArg tcg_get_insn_param(TCGOp *op, unsigned arg) 600 { 601 return op->args[arg]; 602 } 603 604 static inline void tcg_set_insn_param(TCGOp *op, unsigned arg, TCGArg v) 605 { 606 op->args[arg] = v; 607 } 608 609 static inline uint64_t tcg_get_insn_start_param(TCGOp *op, unsigned arg) 610 { 611 tcg_debug_assert(arg < INSN_START_WORDS); 612 if (TCG_TARGET_REG_BITS == 64) { 613 return tcg_get_insn_param(op, arg); 614 } else { 615 return deposit64(tcg_get_insn_param(op, arg * 2), 32, 32, 616 tcg_get_insn_param(op, arg * 2 + 1)); 617 } 618 } 619 620 static inline void tcg_set_insn_start_param(TCGOp *op, unsigned arg, uint64_t v) 621 { 622 tcg_debug_assert(arg < INSN_START_WORDS); 623 if (TCG_TARGET_REG_BITS == 64) { 624 tcg_set_insn_param(op, arg, v); 625 } else { 626 tcg_set_insn_param(op, arg * 2, v); 627 tcg_set_insn_param(op, arg * 2 + 1, v >> 32); 628 } 629 } 630 631 /* The last op that was emitted. */ 632 static inline TCGOp *tcg_last_op(void) 633 { 634 return QTAILQ_LAST(&tcg_ctx->ops); 635 } 636 637 /* Test for whether to terminate the TB for using too many opcodes. */ 638 static inline bool tcg_op_buf_full(void) 639 { 640 /* This is not a hard limit, it merely stops translation when 641 * we have produced "enough" opcodes. We want to limit TB size 642 * such that a RISC host can reasonably use a 16-bit signed 643 * branch within the TB. We also need to be mindful of the 644 * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[] 645 * and TCGContext.gen_insn_end_off[]. 646 */ 647 return tcg_ctx->nb_ops >= 4000; 648 } 649 650 /* pool based memory allocation */ 651 652 /* user-mode: mmap_lock must be held for tcg_malloc_internal. */ 653 void *tcg_malloc_internal(TCGContext *s, int size); 654 void tcg_pool_reset(TCGContext *s); 655 TranslationBlock *tcg_tb_alloc(TCGContext *s); 656 657 void tcg_region_reset_all(void); 658 659 size_t tcg_code_size(void); 660 size_t tcg_code_capacity(void); 661 662 /** 663 * tcg_tb_insert: 664 * @tb: translation block to insert 665 * 666 * Insert @tb into the region trees. 667 */ 668 void tcg_tb_insert(TranslationBlock *tb); 669 670 /** 671 * tcg_tb_remove: 672 * @tb: translation block to remove 673 * 674 * Remove @tb from the region trees. 675 */ 676 void tcg_tb_remove(TranslationBlock *tb); 677 678 /** 679 * tcg_tb_lookup: 680 * @tc_ptr: host PC to look up 681 * 682 * Look up a translation block inside the region trees by @tc_ptr. This is 683 * useful for exception handling, but must not be used for the purposes of 684 * executing the returned translation block. See struct tb_tc for more 685 * information. 686 * 687 * Returns: a translation block previously inserted into the region trees, 688 * such that @tc_ptr points anywhere inside the code generated for it, or 689 * NULL. 690 */ 691 TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr); 692 693 /** 694 * tcg_tb_foreach: 695 * @func: callback 696 * @user_data: opaque value to pass to @callback 697 * 698 * Call @func for each translation block inserted into the region trees. 699 */ 700 void tcg_tb_foreach(GTraverseFunc func, gpointer user_data); 701 702 /** 703 * tcg_nb_tbs: 704 * 705 * Returns: the number of translation blocks inserted into the region trees. 706 */ 707 size_t tcg_nb_tbs(void); 708 709 /* user-mode: Called with mmap_lock held. */ 710 static inline void *tcg_malloc(int size) 711 { 712 TCGContext *s = tcg_ctx; 713 uint8_t *ptr, *ptr_end; 714 715 /* ??? This is a weak placeholder for minimum malloc alignment. */ 716 size = QEMU_ALIGN_UP(size, 8); 717 718 ptr = s->pool_cur; 719 ptr_end = ptr + size; 720 if (unlikely(ptr_end > s->pool_end)) { 721 return tcg_malloc_internal(tcg_ctx, size); 722 } else { 723 s->pool_cur = ptr_end; 724 return ptr; 725 } 726 } 727 728 void tcg_func_start(TCGContext *s); 729 730 int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start); 731 732 void tb_target_set_jmp_target(const TranslationBlock *, int, 733 uintptr_t, uintptr_t); 734 735 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size); 736 737 #define TCG_CT_CONST 1 /* any constant of register size */ 738 #define TCG_CT_REG_ZERO 2 /* zero, in TCG_REG_ZERO */ 739 740 typedef struct TCGArgConstraint { 741 unsigned ct : 16; 742 unsigned alias_index : 4; 743 unsigned sort_index : 4; 744 unsigned pair_index : 4; 745 unsigned pair : 2; /* 0: none, 1: first, 2: second, 3: second alias */ 746 bool oalias : 1; 747 bool ialias : 1; 748 bool newreg : 1; 749 TCGRegSet regs; 750 } TCGArgConstraint; 751 752 #define TCG_MAX_OP_ARGS 16 753 754 /* Bits for TCGOpDef->flags, 8 bits available, all used. */ 755 enum { 756 /* Instruction exits the translation block. */ 757 TCG_OPF_BB_EXIT = 0x01, 758 /* Instruction defines the end of a basic block. */ 759 TCG_OPF_BB_END = 0x02, 760 /* Instruction clobbers call registers and potentially update globals. */ 761 TCG_OPF_CALL_CLOBBER = 0x04, 762 /* Instruction has side effects: it cannot be removed if its outputs 763 are not used, and might trigger exceptions. */ 764 TCG_OPF_SIDE_EFFECTS = 0x08, 765 /* Instruction operands may be I32 or I64 */ 766 TCG_OPF_INT = 0x10, 767 /* Instruction is optional and not implemented by the host, or insn 768 is generic and should not be implemented by the host. */ 769 TCG_OPF_NOT_PRESENT = 0x20, 770 /* Instruction operands are vectors. */ 771 TCG_OPF_VECTOR = 0x40, 772 /* Instruction is a conditional branch. */ 773 TCG_OPF_COND_BRANCH = 0x80, 774 /* Instruction produces carry out. */ 775 TCG_OPF_CARRY_OUT = 0x100, 776 /* Instruction consumes carry in. */ 777 TCG_OPF_CARRY_IN = 0x200, 778 }; 779 780 typedef struct TCGOpDef { 781 const char *name; 782 uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args; 783 uint16_t flags; 784 } TCGOpDef; 785 786 extern const TCGOpDef tcg_op_defs[]; 787 extern const size_t tcg_op_defs_max; 788 789 /* 790 * tcg_op_supported: 791 * Query if @op, for @type and @flags, is supported by the host 792 * on which we are currently executing. 793 */ 794 bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags); 795 /* 796 * tcg_op_deposit_valid: 797 * Query if a deposit into (ofs, len) is supported for @type by 798 * the host on which we are currently executing. 799 */ 800 bool tcg_op_deposit_valid(TCGType type, unsigned ofs, unsigned len); 801 802 void tcg_gen_call0(void *func, TCGHelperInfo *, TCGTemp *ret); 803 void tcg_gen_call1(void *func, TCGHelperInfo *, TCGTemp *ret, TCGTemp *); 804 void tcg_gen_call2(void *func, TCGHelperInfo *, TCGTemp *ret, 805 TCGTemp *, TCGTemp *); 806 void tcg_gen_call3(void *func, TCGHelperInfo *, TCGTemp *ret, 807 TCGTemp *, TCGTemp *, TCGTemp *); 808 void tcg_gen_call4(void *func, TCGHelperInfo *, TCGTemp *ret, 809 TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *); 810 void tcg_gen_call5(void *func, TCGHelperInfo *, TCGTemp *ret, 811 TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *); 812 void tcg_gen_call6(void *func, TCGHelperInfo *, TCGTemp *ret, 813 TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *, 814 TCGTemp *, TCGTemp *); 815 void tcg_gen_call7(void *func, TCGHelperInfo *, TCGTemp *ret, 816 TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *, 817 TCGTemp *, TCGTemp *, TCGTemp *); 818 819 TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs); 820 void tcg_op_remove(TCGContext *s, TCGOp *op); 821 822 /** 823 * tcg_remove_ops_after: 824 * @op: target operation 825 * 826 * Discard any opcodes emitted since @op. Expected usage is to save 827 * a starting point with tcg_last_op(), speculatively emit opcodes, 828 * then decide whether or not to keep those opcodes after the fact. 829 */ 830 void tcg_remove_ops_after(TCGOp *op); 831 832 void tcg_optimize(TCGContext *s); 833 834 TCGLabel *gen_new_label(void); 835 836 /** 837 * label_arg 838 * @l: label 839 * 840 * Encode a label for storage in the TCG opcode stream. 841 */ 842 843 static inline TCGArg label_arg(TCGLabel *l) 844 { 845 return (uintptr_t)l; 846 } 847 848 /** 849 * arg_label 850 * @i: value 851 * 852 * The opposite of label_arg. Retrieve a label from the 853 * encoding of the TCG opcode stream. 854 */ 855 856 static inline TCGLabel *arg_label(TCGArg i) 857 { 858 return (TCGLabel *)(uintptr_t)i; 859 } 860 861 /** 862 * tcg_ptr_byte_diff 863 * @a, @b: addresses to be differenced 864 * 865 * There are many places within the TCG backends where we need a byte 866 * difference between two pointers. While this can be accomplished 867 * with local casting, it's easy to get wrong -- especially if one is 868 * concerned with the signedness of the result. 869 * 870 * This version relies on GCC's void pointer arithmetic to get the 871 * correct result. 872 */ 873 874 static inline ptrdiff_t tcg_ptr_byte_diff(const void *a, const void *b) 875 { 876 return a - b; 877 } 878 879 /** 880 * tcg_pcrel_diff 881 * @s: the tcg context 882 * @target: address of the target 883 * 884 * Produce a pc-relative difference, from the current code_ptr 885 * to the destination address. 886 */ 887 888 static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, const void *target) 889 { 890 return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_ptr)); 891 } 892 893 /** 894 * tcg_tbrel_diff 895 * @s: the tcg context 896 * @target: address of the target 897 * 898 * Produce a difference, from the beginning of the current TB code 899 * to the destination address. 900 */ 901 static inline ptrdiff_t tcg_tbrel_diff(TCGContext *s, const void *target) 902 { 903 return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_buf)); 904 } 905 906 /** 907 * tcg_current_code_size 908 * @s: the tcg context 909 * 910 * Compute the current code size within the translation block. 911 * This is used to fill in qemu's data structures for goto_tb. 912 */ 913 914 static inline size_t tcg_current_code_size(TCGContext *s) 915 { 916 return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); 917 } 918 919 /** 920 * tcg_qemu_tb_exec: 921 * @env: pointer to CPUArchState for the CPU 922 * @tb_ptr: address of generated code for the TB to execute 923 * 924 * Start executing code from a given translation block. 925 * Where translation blocks have been linked, execution 926 * may proceed from the given TB into successive ones. 927 * Control eventually returns only when some action is needed 928 * from the top-level loop: either control must pass to a TB 929 * which has not yet been directly linked, or an asynchronous 930 * event such as an interrupt needs handling. 931 * 932 * Return: The return value is the value passed to the corresponding 933 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute. 934 * The value is either zero or a 4-byte aligned pointer to that TB combined 935 * with additional information in its two least significant bits. The 936 * additional information is encoded as follows: 937 * 0, 1: the link between this TB and the next is via the specified 938 * TB index (0 or 1). That is, we left the TB via (the equivalent 939 * of) "goto_tb <index>". The main loop uses this to determine 940 * how to link the TB just executed to the next. 941 * 2: we are using instruction counting code generation, and we 942 * did not start executing this TB because the instruction counter 943 * would hit zero midway through it. In this case the pointer 944 * returned is the TB we were about to execute, and the caller must 945 * arrange to execute the remaining count of instructions. 946 * 3: we stopped because the CPU's exit_request flag was set 947 * (usually meaning that there is an interrupt that needs to be 948 * handled). The pointer returned is the TB we were about to execute 949 * when we noticed the pending exit request. 950 * 951 * If the bottom two bits indicate an exit-via-index then the CPU 952 * state is correctly synchronised and ready for execution of the next 953 * TB (and in particular the guest PC is the address to execute next). 954 * Otherwise, we gave up on execution of this TB before it started, and 955 * the caller must fix up the CPU state by calling the CPU's 956 * synchronize_from_tb() method with the TB pointer we return (falling 957 * back to calling the CPU's set_pc method with tb->pb if no 958 * synchronize_from_tb() method exists). 959 * 960 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec 961 * to this default (which just calls the prologue.code emitted by 962 * tcg_target_qemu_prologue()). 963 */ 964 #define TB_EXIT_MASK 3 965 #define TB_EXIT_IDX0 0 966 #define TB_EXIT_IDX1 1 967 #define TB_EXIT_IDXMAX 1 968 #define TB_EXIT_REQUESTED 3 969 970 #ifdef CONFIG_TCG_INTERPRETER 971 uintptr_t tcg_qemu_tb_exec(CPUArchState *env, const void *tb_ptr); 972 #else 973 typedef uintptr_t tcg_prologue_fn(CPUArchState *env, const void *tb_ptr); 974 extern tcg_prologue_fn *tcg_qemu_tb_exec; 975 #endif 976 977 void tcg_register_jit(const void *buf, size_t buf_size); 978 979 /* Return zero if the tuple (opc, type, vece) is unsupportable; 980 return > 0 if it is directly supportable; 981 return < 0 if we must call tcg_expand_vec_op. */ 982 int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned); 983 984 /* Expand the tuple (opc, type, vece) on the given arguments. */ 985 void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...); 986 987 /* Replicate a constant C according to the log2 of the element size. */ 988 uint64_t dup_const(unsigned vece, uint64_t c); 989 990 #define dup_const(VECE, C) \ 991 (__builtin_constant_p(VECE) \ 992 ? ( (VECE) == MO_8 ? 0x0101010101010101ull * (uint8_t)(C) \ 993 : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C) \ 994 : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C) \ 995 : (VECE) == MO_64 ? (uint64_t)(C) \ 996 : (qemu_build_not_reached_always(), 0)) \ 997 : dup_const(VECE, C)) 998 999 static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n) 1000 { 1001 #ifdef CONFIG_DEBUG_TCG 1002 const TCGOpcode *o = tcg_ctx->vecop_list; 1003 tcg_ctx->vecop_list = n; 1004 return o; 1005 #else 1006 return NULL; 1007 #endif 1008 } 1009 1010 bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned); 1011 void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs); 1012 1013 #endif /* TCG_H */ 1014