1 /* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #ifndef TCG_H 26 #define TCG_H 27 28 #include "exec/memop.h" 29 #include "exec/memopidx.h" 30 #include "qemu/bitops.h" 31 #include "qemu/plugin.h" 32 #include "qemu/queue.h" 33 #include "tcg/tcg-mo.h" 34 #include "tcg-target-reg-bits.h" 35 #include "tcg-target.h" 36 #include "tcg/tcg-cond.h" 37 #include "tcg/insn-start-words.h" 38 #include "tcg/debug-assert.h" 39 40 /* XXX: make safe guess about sizes */ 41 #define MAX_OP_PER_INSTR 266 42 43 #define CPU_TEMP_BUF_NLONGS 128 44 #define TCG_STATIC_FRAME_SIZE (CPU_TEMP_BUF_NLONGS * sizeof(long)) 45 46 #if TCG_TARGET_REG_BITS == 32 47 typedef int32_t tcg_target_long; 48 typedef uint32_t tcg_target_ulong; 49 #define TCG_PRIlx PRIx32 50 #define TCG_PRIld PRId32 51 #elif TCG_TARGET_REG_BITS == 64 52 typedef int64_t tcg_target_long; 53 typedef uint64_t tcg_target_ulong; 54 #define TCG_PRIlx PRIx64 55 #define TCG_PRIld PRId64 56 #else 57 #error unsupported 58 #endif 59 60 #if TCG_TARGET_NB_REGS <= 32 61 typedef uint32_t TCGRegSet; 62 #elif TCG_TARGET_NB_REGS <= 64 63 typedef uint64_t TCGRegSet; 64 #else 65 #error unsupported 66 #endif 67 68 typedef enum TCGOpcode { 69 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name, 70 #include "tcg/tcg-opc.h" 71 #undef DEF 72 NB_OPS, 73 } TCGOpcode; 74 75 #define tcg_regset_set_reg(d, r) ((d) |= (TCGRegSet)1 << (r)) 76 #define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r))) 77 #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) 78 79 #ifndef TCG_TARGET_INSN_UNIT_SIZE 80 # error "Missing TCG_TARGET_INSN_UNIT_SIZE" 81 #elif TCG_TARGET_INSN_UNIT_SIZE == 1 82 typedef uint8_t tcg_insn_unit; 83 #elif TCG_TARGET_INSN_UNIT_SIZE == 2 84 typedef uint16_t tcg_insn_unit; 85 #elif TCG_TARGET_INSN_UNIT_SIZE == 4 86 typedef uint32_t tcg_insn_unit; 87 #elif TCG_TARGET_INSN_UNIT_SIZE == 8 88 typedef uint64_t tcg_insn_unit; 89 #else 90 /* The port better have done this. */ 91 #endif 92 93 typedef struct TCGRelocation TCGRelocation; 94 struct TCGRelocation { 95 QSIMPLEQ_ENTRY(TCGRelocation) next; 96 tcg_insn_unit *ptr; 97 intptr_t addend; 98 int type; 99 }; 100 101 typedef struct TCGOp TCGOp; 102 typedef struct TCGLabelUse TCGLabelUse; 103 struct TCGLabelUse { 104 QSIMPLEQ_ENTRY(TCGLabelUse) next; 105 TCGOp *op; 106 }; 107 108 typedef struct TCGLabel TCGLabel; 109 struct TCGLabel { 110 bool present; 111 bool has_value; 112 uint16_t id; 113 union { 114 uintptr_t value; 115 const tcg_insn_unit *value_ptr; 116 } u; 117 QSIMPLEQ_HEAD(, TCGLabelUse) branches; 118 QSIMPLEQ_HEAD(, TCGRelocation) relocs; 119 QSIMPLEQ_ENTRY(TCGLabel) next; 120 }; 121 122 typedef struct TCGPool { 123 struct TCGPool *next; 124 int size; 125 uint8_t data[] __attribute__ ((aligned)); 126 } TCGPool; 127 128 #define TCG_POOL_CHUNK_SIZE 32768 129 130 #define TCG_MAX_TEMPS 512 131 #define TCG_MAX_INSNS 512 132 133 /* when the size of the arguments of a called function is smaller than 134 this value, they are statically allocated in the TB stack frame */ 135 #define TCG_STATIC_CALL_ARGS_SIZE 128 136 137 typedef enum TCGType { 138 TCG_TYPE_I32, 139 TCG_TYPE_I64, 140 TCG_TYPE_I128, 141 142 TCG_TYPE_V64, 143 TCG_TYPE_V128, 144 TCG_TYPE_V256, 145 146 /* Number of different types (integer not enum) */ 147 #define TCG_TYPE_COUNT (TCG_TYPE_V256 + 1) 148 149 /* An alias for the size of the host register. */ 150 #if TCG_TARGET_REG_BITS == 32 151 TCG_TYPE_REG = TCG_TYPE_I32, 152 #else 153 TCG_TYPE_REG = TCG_TYPE_I64, 154 #endif 155 156 /* An alias for the size of the native pointer. */ 157 #if UINTPTR_MAX == UINT32_MAX 158 TCG_TYPE_PTR = TCG_TYPE_I32, 159 #else 160 TCG_TYPE_PTR = TCG_TYPE_I64, 161 #endif 162 } TCGType; 163 164 /** 165 * tcg_type_size 166 * @t: type 167 * 168 * Return the size of the type in bytes. 169 */ 170 static inline int tcg_type_size(TCGType t) 171 { 172 unsigned i = t; 173 if (i >= TCG_TYPE_V64) { 174 tcg_debug_assert(i < TCG_TYPE_COUNT); 175 i -= TCG_TYPE_V64 - 1; 176 } 177 return 4 << i; 178 } 179 180 typedef tcg_target_ulong TCGArg; 181 182 /* Define type and accessor macros for TCG variables. 183 184 TCG variables are the inputs and outputs of TCG ops, as described 185 in tcg/README. Target CPU front-end code uses these types to deal 186 with TCG variables as it emits TCG code via the tcg_gen_* functions. 187 They come in several flavours: 188 * TCGv_i32 : 32 bit integer type 189 * TCGv_i64 : 64 bit integer type 190 * TCGv_i128 : 128 bit integer type 191 * TCGv_ptr : a host pointer type 192 * TCGv_vaddr: an integer type wide enough to hold a target pointer type 193 * TCGv_vec : a host vector type; the exact size is not exposed 194 to the CPU front-end code. 195 * TCGv : an integer type the same size as target_ulong 196 (an alias for either TCGv_i32 or TCGv_i64) 197 The compiler's type checking will complain if you mix them 198 up and pass the wrong sized TCGv to a function. 199 200 Users of tcg_gen_* don't need to know about any of the internal 201 details of these, and should treat them as opaque types. 202 You won't be able to look inside them in a debugger either. 203 204 Internal implementation details follow: 205 206 Note that there is no definition of the structs TCGv_i32_d etc anywhere. 207 This is deliberate, because the values we store in variables of type 208 TCGv_i32 are not really pointers-to-structures. They're just small 209 integers, but keeping them in pointer types like this means that the 210 compiler will complain if you accidentally pass a TCGv_i32 to a 211 function which takes a TCGv_i64, and so on. Only the internals of 212 TCG need to care about the actual contents of the types. */ 213 214 typedef struct TCGv_i32_d *TCGv_i32; 215 typedef struct TCGv_i64_d *TCGv_i64; 216 typedef struct TCGv_i128_d *TCGv_i128; 217 typedef struct TCGv_ptr_d *TCGv_ptr; 218 typedef struct TCGv_vec_d *TCGv_vec; 219 typedef TCGv_ptr TCGv_env; 220 221 #if __SIZEOF_POINTER__ == 4 222 typedef TCGv_i32 TCGv_vaddr; 223 #elif __SIZEOF_POINTER__ == 8 224 typedef TCGv_i64 TCGv_vaddr; 225 #else 226 # error "sizeof pointer is different from {4,8}" 227 #endif /* __SIZEOF_POINTER__ */ 228 229 /* call flags */ 230 /* Helper does not read globals (either directly or through an exception). It 231 implies TCG_CALL_NO_WRITE_GLOBALS. */ 232 #define TCG_CALL_NO_READ_GLOBALS 0x0001 233 /* Helper does not write globals */ 234 #define TCG_CALL_NO_WRITE_GLOBALS 0x0002 235 /* Helper can be safely suppressed if the return value is not used. */ 236 #define TCG_CALL_NO_SIDE_EFFECTS 0x0004 237 /* Helper is G_NORETURN. */ 238 #define TCG_CALL_NO_RETURN 0x0008 239 240 /* convenience version of most used call flags */ 241 #define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS 242 #define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS 243 #define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS 244 #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE) 245 #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE) 246 247 /* 248 * Flags for the bswap opcodes. 249 * If IZ, the input is zero-extended, otherwise unknown. 250 * If OZ or OS, the output is zero- or sign-extended respectively, 251 * otherwise the high bits are undefined. 252 */ 253 enum { 254 TCG_BSWAP_IZ = 1, 255 TCG_BSWAP_OZ = 2, 256 TCG_BSWAP_OS = 4, 257 }; 258 259 typedef enum TCGTempVal { 260 TEMP_VAL_DEAD, 261 TEMP_VAL_REG, 262 TEMP_VAL_MEM, 263 TEMP_VAL_CONST, 264 } TCGTempVal; 265 266 typedef enum TCGTempKind { 267 /* 268 * Temp is dead at the end of the extended basic block (EBB), 269 * the single-entry multiple-exit region that falls through 270 * conditional branches. 271 */ 272 TEMP_EBB, 273 /* Temp is live across the entire translation block, but dead at end. */ 274 TEMP_TB, 275 /* Temp is live across the entire translation block, and between them. */ 276 TEMP_GLOBAL, 277 /* Temp is in a fixed register. */ 278 TEMP_FIXED, 279 /* Temp is a fixed constant. */ 280 TEMP_CONST, 281 } TCGTempKind; 282 283 typedef struct TCGTemp { 284 TCGReg reg:8; 285 TCGTempVal val_type:8; 286 TCGType base_type:8; 287 TCGType type:8; 288 TCGTempKind kind:3; 289 unsigned int indirect_reg:1; 290 unsigned int indirect_base:1; 291 unsigned int mem_coherent:1; 292 unsigned int mem_allocated:1; 293 unsigned int temp_allocated:1; 294 unsigned int temp_subindex:2; 295 296 int64_t val; 297 struct TCGTemp *mem_base; 298 intptr_t mem_offset; 299 const char *name; 300 301 /* Pass-specific information that can be stored for a temporary. 302 One word worth of integer data, and one pointer to data 303 allocated separately. */ 304 uintptr_t state; 305 void *state_ptr; 306 } TCGTemp; 307 308 typedef struct TCGContext TCGContext; 309 310 typedef struct TCGTempSet { 311 unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)]; 312 } TCGTempSet; 313 314 /* 315 * With 1 128-bit output, a 32-bit host requires 4 output parameters, 316 * which leaves a maximum of 28 other slots. Which is enough for 7 317 * 128-bit operands. 318 */ 319 #define DEAD_ARG (1 << 4) 320 #define SYNC_ARG (1 << 0) 321 typedef uint32_t TCGLifeData; 322 323 struct TCGOp { 324 TCGOpcode opc : 8; 325 unsigned nargs : 8; 326 327 /* Parameters for this opcode. See below. */ 328 unsigned param1 : 8; 329 unsigned param2 : 8; 330 331 /* Lifetime data of the operands. */ 332 TCGLifeData life; 333 334 /* Next and previous opcodes. */ 335 QTAILQ_ENTRY(TCGOp) link; 336 337 /* Register preferences for the output(s). */ 338 TCGRegSet output_pref[2]; 339 340 /* Arguments for the opcode. */ 341 TCGArg args[]; 342 }; 343 344 #define TCGOP_CALLI(X) (X)->param1 345 #define TCGOP_CALLO(X) (X)->param2 346 347 #define TCGOP_TYPE(X) (X)->param1 348 #define TCGOP_FLAGS(X) (X)->param2 349 #define TCGOP_VECE(X) (X)->param2 350 351 /* Make sure operands fit in the bitfields above. */ 352 QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8)); 353 354 static inline TCGRegSet output_pref(const TCGOp *op, unsigned i) 355 { 356 return i < ARRAY_SIZE(op->output_pref) ? op->output_pref[i] : 0; 357 } 358 359 struct TCGContext { 360 uint8_t *pool_cur, *pool_end; 361 TCGPool *pool_first, *pool_current, *pool_first_large; 362 int nb_labels; 363 int nb_globals; 364 int nb_temps; 365 int nb_indirects; 366 int nb_ops; 367 TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */ 368 TCGBar guest_mo; 369 370 TCGRegSet reserved_regs; 371 intptr_t current_frame_offset; 372 intptr_t frame_start; 373 intptr_t frame_end; 374 TCGTemp *frame_temp; 375 376 TranslationBlock *gen_tb; /* tb for which code is being generated */ 377 tcg_insn_unit *code_buf; /* pointer for start of tb */ 378 tcg_insn_unit *code_ptr; /* pointer for running end of tb */ 379 380 #ifdef CONFIG_DEBUG_TCG 381 int goto_tb_issue_mask; 382 const TCGOpcode *vecop_list; 383 #endif 384 385 /* Code generation. Note that we specifically do not use tcg_insn_unit 386 here, because there's too much arithmetic throughout that relies 387 on addition and subtraction working on bytes. Rely on the GCC 388 extension that allows arithmetic on void*. */ 389 void *code_gen_buffer; 390 size_t code_gen_buffer_size; 391 void *code_gen_ptr; 392 void *data_gen_ptr; 393 394 /* Threshold to flush the translated code buffer. */ 395 void *code_gen_highwater; 396 397 /* Track which vCPU triggers events */ 398 CPUState *cpu; /* *_trans */ 399 400 /* These structures are private to tcg-target.c.inc. */ 401 QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels; 402 struct TCGLabelPoolData *pool_labels; 403 404 TCGLabel *exitreq_label; 405 406 #ifdef CONFIG_PLUGIN 407 /* 408 * We keep one plugin_tb struct per TCGContext. Note that on every TB 409 * translation we clear but do not free its contents; this way we 410 * avoid a lot of malloc/free churn, since after a few TB's it's 411 * unlikely that we'll need to allocate either more instructions or more 412 * space for instructions (for variable-instruction-length ISAs). 413 */ 414 struct qemu_plugin_tb *plugin_tb; 415 const struct DisasContextBase *plugin_db; 416 417 /* descriptor of the instruction being translated */ 418 struct qemu_plugin_insn *plugin_insn; 419 #endif 420 421 /* For host-specific values. */ 422 #ifdef __riscv 423 MemOp riscv_cur_vsew; 424 TCGType riscv_cur_type; 425 #endif 426 /* 427 * During the tcg_reg_alloc_op loop, we are within a sequence of 428 * carry-using opcodes like addco+addci. 429 */ 430 bool carry_live; 431 432 GHashTable *const_table[TCG_TYPE_COUNT]; 433 TCGTempSet free_temps[TCG_TYPE_COUNT]; 434 TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ 435 436 QTAILQ_HEAD(, TCGOp) ops, free_ops; 437 QSIMPLEQ_HEAD(, TCGLabel) labels; 438 439 /* 440 * When clear, new ops are added to the tail of @ops. 441 * When set, new ops are added in front of @emit_before_op. 442 */ 443 TCGOp *emit_before_op; 444 445 /* Tells which temporary holds a given register. 446 It does not take into account fixed registers */ 447 TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS]; 448 449 uint16_t gen_insn_end_off[TCG_MAX_INSNS]; 450 uint64_t *gen_insn_data; 451 452 /* Exit to translator on overflow. */ 453 sigjmp_buf jmp_trans; 454 }; 455 456 static inline bool temp_readonly(TCGTemp *ts) 457 { 458 return ts->kind >= TEMP_FIXED; 459 } 460 461 #ifdef CONFIG_USER_ONLY 462 extern bool tcg_use_softmmu; 463 #else 464 #define tcg_use_softmmu true 465 #endif 466 467 extern __thread TCGContext *tcg_ctx; 468 extern const void *tcg_code_gen_epilogue; 469 extern uintptr_t tcg_splitwx_diff; 470 extern TCGv_env tcg_env; 471 472 bool in_code_gen_buffer(const void *p); 473 474 #ifdef CONFIG_DEBUG_TCG 475 const void *tcg_splitwx_to_rx(void *rw); 476 void *tcg_splitwx_to_rw(const void *rx); 477 #else 478 static inline const void *tcg_splitwx_to_rx(void *rw) 479 { 480 return rw ? rw + tcg_splitwx_diff : NULL; 481 } 482 483 static inline void *tcg_splitwx_to_rw(const void *rx) 484 { 485 return rx ? (void *)rx - tcg_splitwx_diff : NULL; 486 } 487 #endif 488 489 static inline TCGArg temp_arg(TCGTemp *ts) 490 { 491 return (uintptr_t)ts; 492 } 493 494 static inline TCGTemp *arg_temp(TCGArg a) 495 { 496 return (TCGTemp *)(uintptr_t)a; 497 } 498 499 #ifdef CONFIG_DEBUG_TCG 500 size_t temp_idx(TCGTemp *ts); 501 TCGTemp *tcgv_i32_temp(TCGv_i32 v); 502 #else 503 static inline size_t temp_idx(TCGTemp *ts) 504 { 505 return ts - tcg_ctx->temps; 506 } 507 508 /* 509 * Using the offset of a temporary, relative to TCGContext, rather than 510 * its index means that we don't use 0. That leaves offset 0 free for 511 * a NULL representation without having to leave index 0 unused. 512 */ 513 static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v) 514 { 515 return (void *)tcg_ctx + (uintptr_t)v; 516 } 517 #endif 518 519 static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v) 520 { 521 return tcgv_i32_temp((TCGv_i32)v); 522 } 523 524 static inline TCGTemp *tcgv_i128_temp(TCGv_i128 v) 525 { 526 return tcgv_i32_temp((TCGv_i32)v); 527 } 528 529 static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v) 530 { 531 return tcgv_i32_temp((TCGv_i32)v); 532 } 533 534 static inline TCGTemp *tcgv_vec_temp(TCGv_vec v) 535 { 536 return tcgv_i32_temp((TCGv_i32)v); 537 } 538 539 static inline TCGArg tcgv_i32_arg(TCGv_i32 v) 540 { 541 return temp_arg(tcgv_i32_temp(v)); 542 } 543 544 static inline TCGArg tcgv_i64_arg(TCGv_i64 v) 545 { 546 return temp_arg(tcgv_i64_temp(v)); 547 } 548 549 static inline TCGArg tcgv_i128_arg(TCGv_i128 v) 550 { 551 return temp_arg(tcgv_i128_temp(v)); 552 } 553 554 static inline TCGArg tcgv_ptr_arg(TCGv_ptr v) 555 { 556 return temp_arg(tcgv_ptr_temp(v)); 557 } 558 559 static inline TCGArg tcgv_vec_arg(TCGv_vec v) 560 { 561 return temp_arg(tcgv_vec_temp(v)); 562 } 563 564 static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t) 565 { 566 (void)temp_idx(t); /* trigger embedded assert */ 567 return (TCGv_i32)((void *)t - (void *)tcg_ctx); 568 } 569 570 static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t) 571 { 572 return (TCGv_i64)temp_tcgv_i32(t); 573 } 574 575 static inline TCGv_i128 temp_tcgv_i128(TCGTemp *t) 576 { 577 return (TCGv_i128)temp_tcgv_i32(t); 578 } 579 580 static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t) 581 { 582 return (TCGv_ptr)temp_tcgv_i32(t); 583 } 584 585 static inline TCGv_vaddr temp_tcgv_vaddr(TCGTemp *t) 586 { 587 return (TCGv_vaddr)temp_tcgv_i32(t); 588 } 589 590 static inline TCGv_vec temp_tcgv_vec(TCGTemp *t) 591 { 592 return (TCGv_vec)temp_tcgv_i32(t); 593 } 594 595 static inline TCGArg tcg_get_insn_param(TCGOp *op, unsigned arg) 596 { 597 return op->args[arg]; 598 } 599 600 static inline void tcg_set_insn_param(TCGOp *op, unsigned arg, TCGArg v) 601 { 602 op->args[arg] = v; 603 } 604 605 static inline uint64_t tcg_get_insn_start_param(TCGOp *op, unsigned arg) 606 { 607 tcg_debug_assert(arg < INSN_START_WORDS); 608 if (TCG_TARGET_REG_BITS == 64) { 609 return tcg_get_insn_param(op, arg); 610 } else { 611 return deposit64(tcg_get_insn_param(op, arg * 2), 32, 32, 612 tcg_get_insn_param(op, arg * 2 + 1)); 613 } 614 } 615 616 static inline void tcg_set_insn_start_param(TCGOp *op, unsigned arg, uint64_t v) 617 { 618 tcg_debug_assert(arg < INSN_START_WORDS); 619 if (TCG_TARGET_REG_BITS == 64) { 620 tcg_set_insn_param(op, arg, v); 621 } else { 622 tcg_set_insn_param(op, arg * 2, v); 623 tcg_set_insn_param(op, arg * 2 + 1, v >> 32); 624 } 625 } 626 627 /* The last op that was emitted. */ 628 static inline TCGOp *tcg_last_op(void) 629 { 630 return QTAILQ_LAST(&tcg_ctx->ops); 631 } 632 633 /* Test for whether to terminate the TB for using too many opcodes. */ 634 static inline bool tcg_op_buf_full(void) 635 { 636 /* This is not a hard limit, it merely stops translation when 637 * we have produced "enough" opcodes. We want to limit TB size 638 * such that a RISC host can reasonably use a 16-bit signed 639 * branch within the TB. We also need to be mindful of the 640 * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[] 641 * and TCGContext.gen_insn_end_off[]. 642 */ 643 return tcg_ctx->nb_ops >= 4000; 644 } 645 646 /* pool based memory allocation */ 647 648 /* user-mode: mmap_lock must be held for tcg_malloc_internal. */ 649 void *tcg_malloc_internal(TCGContext *s, int size); 650 void tcg_pool_reset(TCGContext *s); 651 TranslationBlock *tcg_tb_alloc(TCGContext *s); 652 653 void tcg_region_reset_all(void); 654 655 size_t tcg_code_size(void); 656 size_t tcg_code_capacity(void); 657 658 /** 659 * tcg_tb_insert: 660 * @tb: translation block to insert 661 * 662 * Insert @tb into the region trees. 663 */ 664 void tcg_tb_insert(TranslationBlock *tb); 665 666 /** 667 * tcg_tb_remove: 668 * @tb: translation block to remove 669 * 670 * Remove @tb from the region trees. 671 */ 672 void tcg_tb_remove(TranslationBlock *tb); 673 674 /** 675 * tcg_tb_lookup: 676 * @tc_ptr: host PC to look up 677 * 678 * Look up a translation block inside the region trees by @tc_ptr. This is 679 * useful for exception handling, but must not be used for the purposes of 680 * executing the returned translation block. See struct tb_tc for more 681 * information. 682 * 683 * Returns: a translation block previously inserted into the region trees, 684 * such that @tc_ptr points anywhere inside the code generated for it, or 685 * NULL. 686 */ 687 TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr); 688 689 /** 690 * tcg_tb_foreach: 691 * @func: callback 692 * @user_data: opaque value to pass to @callback 693 * 694 * Call @func for each translation block inserted into the region trees. 695 */ 696 void tcg_tb_foreach(GTraverseFunc func, gpointer user_data); 697 698 /** 699 * tcg_nb_tbs: 700 * 701 * Returns: the number of translation blocks inserted into the region trees. 702 */ 703 size_t tcg_nb_tbs(void); 704 705 /* user-mode: Called with mmap_lock held. */ 706 static inline void *tcg_malloc(int size) 707 { 708 TCGContext *s = tcg_ctx; 709 uint8_t *ptr, *ptr_end; 710 711 /* ??? This is a weak placeholder for minimum malloc alignment. */ 712 size = QEMU_ALIGN_UP(size, 8); 713 714 ptr = s->pool_cur; 715 ptr_end = ptr + size; 716 if (unlikely(ptr_end > s->pool_end)) { 717 return tcg_malloc_internal(tcg_ctx, size); 718 } else { 719 s->pool_cur = ptr_end; 720 return ptr; 721 } 722 } 723 724 void tcg_func_start(TCGContext *s); 725 726 int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start); 727 728 void tb_target_set_jmp_target(const TranslationBlock *, int, 729 uintptr_t, uintptr_t); 730 731 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size); 732 733 #define TCG_CT_CONST 1 /* any constant of register size */ 734 #define TCG_CT_REG_ZERO 2 /* zero, in TCG_REG_ZERO */ 735 736 typedef struct TCGArgConstraint { 737 unsigned ct : 16; 738 unsigned alias_index : 4; 739 unsigned sort_index : 4; 740 unsigned pair_index : 4; 741 unsigned pair : 2; /* 0: none, 1: first, 2: second, 3: second alias */ 742 bool oalias : 1; 743 bool ialias : 1; 744 bool newreg : 1; 745 TCGRegSet regs; 746 } TCGArgConstraint; 747 748 #define TCG_MAX_OP_ARGS 16 749 750 /* Bits for TCGOpDef->flags, 8 bits available, all used. */ 751 enum { 752 /* Instruction exits the translation block. */ 753 TCG_OPF_BB_EXIT = 0x01, 754 /* Instruction defines the end of a basic block. */ 755 TCG_OPF_BB_END = 0x02, 756 /* Instruction clobbers call registers and potentially update globals. */ 757 TCG_OPF_CALL_CLOBBER = 0x04, 758 /* Instruction has side effects: it cannot be removed if its outputs 759 are not used, and might trigger exceptions. */ 760 TCG_OPF_SIDE_EFFECTS = 0x08, 761 /* Instruction operands may be I32 or I64 */ 762 TCG_OPF_INT = 0x10, 763 /* Instruction is optional and not implemented by the host, or insn 764 is generic and should not be implemented by the host. */ 765 TCG_OPF_NOT_PRESENT = 0x20, 766 /* Instruction operands are vectors. */ 767 TCG_OPF_VECTOR = 0x40, 768 /* Instruction is a conditional branch. */ 769 TCG_OPF_COND_BRANCH = 0x80, 770 /* Instruction produces carry out. */ 771 TCG_OPF_CARRY_OUT = 0x100, 772 /* Instruction consumes carry in. */ 773 TCG_OPF_CARRY_IN = 0x200, 774 }; 775 776 typedef struct TCGOpDef { 777 const char *name; 778 uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args; 779 uint16_t flags; 780 } TCGOpDef; 781 782 extern const TCGOpDef tcg_op_defs[]; 783 extern const size_t tcg_op_defs_max; 784 785 /* 786 * tcg_op_supported: 787 * Query if @op, for @type and @flags, is supported by the host 788 * on which we are currently executing. 789 */ 790 bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags); 791 /* 792 * tcg_op_deposit_valid: 793 * Query if a deposit into (ofs, len) is supported for @type by 794 * the host on which we are currently executing. 795 */ 796 bool tcg_op_deposit_valid(TCGType type, unsigned ofs, unsigned len); 797 798 void tcg_gen_call0(void *func, TCGHelperInfo *, TCGTemp *ret); 799 void tcg_gen_call1(void *func, TCGHelperInfo *, TCGTemp *ret, TCGTemp *); 800 void tcg_gen_call2(void *func, TCGHelperInfo *, TCGTemp *ret, 801 TCGTemp *, TCGTemp *); 802 void tcg_gen_call3(void *func, TCGHelperInfo *, TCGTemp *ret, 803 TCGTemp *, TCGTemp *, TCGTemp *); 804 void tcg_gen_call4(void *func, TCGHelperInfo *, TCGTemp *ret, 805 TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *); 806 void tcg_gen_call5(void *func, TCGHelperInfo *, TCGTemp *ret, 807 TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *); 808 void tcg_gen_call6(void *func, TCGHelperInfo *, TCGTemp *ret, 809 TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *, 810 TCGTemp *, TCGTemp *); 811 void tcg_gen_call7(void *func, TCGHelperInfo *, TCGTemp *ret, 812 TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *, 813 TCGTemp *, TCGTemp *, TCGTemp *); 814 815 TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs); 816 void tcg_op_remove(TCGContext *s, TCGOp *op); 817 818 /** 819 * tcg_remove_ops_after: 820 * @op: target operation 821 * 822 * Discard any opcodes emitted since @op. Expected usage is to save 823 * a starting point with tcg_last_op(), speculatively emit opcodes, 824 * then decide whether or not to keep those opcodes after the fact. 825 */ 826 void tcg_remove_ops_after(TCGOp *op); 827 828 void tcg_optimize(TCGContext *s); 829 830 TCGLabel *gen_new_label(void); 831 832 /** 833 * label_arg 834 * @l: label 835 * 836 * Encode a label for storage in the TCG opcode stream. 837 */ 838 839 static inline TCGArg label_arg(TCGLabel *l) 840 { 841 return (uintptr_t)l; 842 } 843 844 /** 845 * arg_label 846 * @i: value 847 * 848 * The opposite of label_arg. Retrieve a label from the 849 * encoding of the TCG opcode stream. 850 */ 851 852 static inline TCGLabel *arg_label(TCGArg i) 853 { 854 return (TCGLabel *)(uintptr_t)i; 855 } 856 857 /** 858 * tcg_ptr_byte_diff 859 * @a, @b: addresses to be differenced 860 * 861 * There are many places within the TCG backends where we need a byte 862 * difference between two pointers. While this can be accomplished 863 * with local casting, it's easy to get wrong -- especially if one is 864 * concerned with the signedness of the result. 865 * 866 * This version relies on GCC's void pointer arithmetic to get the 867 * correct result. 868 */ 869 870 static inline ptrdiff_t tcg_ptr_byte_diff(const void *a, const void *b) 871 { 872 return a - b; 873 } 874 875 /** 876 * tcg_pcrel_diff 877 * @s: the tcg context 878 * @target: address of the target 879 * 880 * Produce a pc-relative difference, from the current code_ptr 881 * to the destination address. 882 */ 883 884 static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, const void *target) 885 { 886 return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_ptr)); 887 } 888 889 /** 890 * tcg_tbrel_diff 891 * @s: the tcg context 892 * @target: address of the target 893 * 894 * Produce a difference, from the beginning of the current TB code 895 * to the destination address. 896 */ 897 static inline ptrdiff_t tcg_tbrel_diff(TCGContext *s, const void *target) 898 { 899 return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_buf)); 900 } 901 902 /** 903 * tcg_current_code_size 904 * @s: the tcg context 905 * 906 * Compute the current code size within the translation block. 907 * This is used to fill in qemu's data structures for goto_tb. 908 */ 909 910 static inline size_t tcg_current_code_size(TCGContext *s) 911 { 912 return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); 913 } 914 915 /** 916 * tcg_qemu_tb_exec: 917 * @env: pointer to CPUArchState for the CPU 918 * @tb_ptr: address of generated code for the TB to execute 919 * 920 * Start executing code from a given translation block. 921 * Where translation blocks have been linked, execution 922 * may proceed from the given TB into successive ones. 923 * Control eventually returns only when some action is needed 924 * from the top-level loop: either control must pass to a TB 925 * which has not yet been directly linked, or an asynchronous 926 * event such as an interrupt needs handling. 927 * 928 * Return: The return value is the value passed to the corresponding 929 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute. 930 * The value is either zero or a 4-byte aligned pointer to that TB combined 931 * with additional information in its two least significant bits. The 932 * additional information is encoded as follows: 933 * 0, 1: the link between this TB and the next is via the specified 934 * TB index (0 or 1). That is, we left the TB via (the equivalent 935 * of) "goto_tb <index>". The main loop uses this to determine 936 * how to link the TB just executed to the next. 937 * 2: we are using instruction counting code generation, and we 938 * did not start executing this TB because the instruction counter 939 * would hit zero midway through it. In this case the pointer 940 * returned is the TB we were about to execute, and the caller must 941 * arrange to execute the remaining count of instructions. 942 * 3: we stopped because the CPU's exit_request flag was set 943 * (usually meaning that there is an interrupt that needs to be 944 * handled). The pointer returned is the TB we were about to execute 945 * when we noticed the pending exit request. 946 * 947 * If the bottom two bits indicate an exit-via-index then the CPU 948 * state is correctly synchronised and ready for execution of the next 949 * TB (and in particular the guest PC is the address to execute next). 950 * Otherwise, we gave up on execution of this TB before it started, and 951 * the caller must fix up the CPU state by calling the CPU's 952 * synchronize_from_tb() method with the TB pointer we return (falling 953 * back to calling the CPU's set_pc method with tb->pb if no 954 * synchronize_from_tb() method exists). 955 * 956 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec 957 * to this default (which just calls the prologue.code emitted by 958 * tcg_target_qemu_prologue()). 959 */ 960 #define TB_EXIT_MASK 3 961 #define TB_EXIT_IDX0 0 962 #define TB_EXIT_IDX1 1 963 #define TB_EXIT_IDXMAX 1 964 #define TB_EXIT_REQUESTED 3 965 966 #ifdef CONFIG_TCG_INTERPRETER 967 uintptr_t tcg_qemu_tb_exec(CPUArchState *env, const void *tb_ptr); 968 #else 969 typedef uintptr_t tcg_prologue_fn(CPUArchState *env, const void *tb_ptr); 970 extern tcg_prologue_fn *tcg_qemu_tb_exec; 971 #endif 972 973 void tcg_register_jit(const void *buf, size_t buf_size); 974 975 /* Return zero if the tuple (opc, type, vece) is unsupportable; 976 return > 0 if it is directly supportable; 977 return < 0 if we must call tcg_expand_vec_op. */ 978 int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned); 979 980 /* Expand the tuple (opc, type, vece) on the given arguments. */ 981 void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...); 982 983 /* Replicate a constant C according to the log2 of the element size. */ 984 uint64_t dup_const(unsigned vece, uint64_t c); 985 986 #define dup_const(VECE, C) \ 987 (__builtin_constant_p(VECE) \ 988 ? ( (VECE) == MO_8 ? 0x0101010101010101ull * (uint8_t)(C) \ 989 : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C) \ 990 : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C) \ 991 : (VECE) == MO_64 ? (uint64_t)(C) \ 992 : (qemu_build_not_reached_always(), 0)) \ 993 : dup_const(VECE, C)) 994 995 static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n) 996 { 997 #ifdef CONFIG_DEBUG_TCG 998 const TCGOpcode *o = tcg_ctx->vecop_list; 999 tcg_ctx->vecop_list = n; 1000 return o; 1001 #else 1002 return NULL; 1003 #endif 1004 } 1005 1006 bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned); 1007 void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs); 1008 1009 #endif /* TCG_H */ 1010