1 /* 2 * Generic intermediate code generation. 3 * 4 * Copyright (C) 2016-2017 Lluís Vilanova <vilanova@ac.upc.edu> 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or later. 7 * See the COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/error-report.h" 13 #include "exec/exec-all.h" 14 #include "exec/translator.h" 15 #include "exec/cpu_ldst.h" 16 #include "exec/plugin-gen.h" 17 #include "exec/cpu_ldst.h" 18 #include "tcg/tcg-op-common.h" 19 #include "internal-target.h" 20 21 static void set_can_do_io(DisasContextBase *db, bool val) 22 { 23 QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1); 24 tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env, 25 offsetof(ArchCPU, parent_obj.neg.can_do_io) - 26 offsetof(ArchCPU, env)); 27 } 28 29 bool translator_io_start(DisasContextBase *db) 30 { 31 /* 32 * Ensure that this instruction will be the last in the TB. 33 * The target may override this to something more forceful. 34 */ 35 if (db->is_jmp == DISAS_NEXT) { 36 db->is_jmp = DISAS_TOO_MANY; 37 } 38 return true; 39 } 40 41 static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags) 42 { 43 TCGv_i32 count = NULL; 44 TCGOp *icount_start_insn = NULL; 45 46 if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) { 47 count = tcg_temp_new_i32(); 48 tcg_gen_ld_i32(count, tcg_env, 49 offsetof(ArchCPU, parent_obj.neg.icount_decr.u32) 50 - offsetof(ArchCPU, env)); 51 } 52 53 if (cflags & CF_USE_ICOUNT) { 54 /* 55 * We emit a sub with a dummy immediate argument. Keep the insn index 56 * of the sub so that we later (when we know the actual insn count) 57 * can update the argument with the actual insn count. 58 */ 59 tcg_gen_sub_i32(count, count, tcg_constant_i32(0)); 60 icount_start_insn = tcg_last_op(); 61 } 62 63 /* 64 * Emit the check against icount_decr.u32 to see if we should exit 65 * unless we suppress the check with CF_NOIRQ. If we are using 66 * icount and have suppressed interruption the higher level code 67 * should have ensured we don't run more instructions than the 68 * budget. 69 */ 70 if (cflags & CF_NOIRQ) { 71 tcg_ctx->exitreq_label = NULL; 72 } else { 73 tcg_ctx->exitreq_label = gen_new_label(); 74 tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label); 75 } 76 77 if (cflags & CF_USE_ICOUNT) { 78 tcg_gen_st16_i32(count, tcg_env, 79 offsetof(ArchCPU, parent_obj.neg.icount_decr.u16.low) 80 - offsetof(ArchCPU, env)); 81 } 82 83 return icount_start_insn; 84 } 85 86 static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags, 87 TCGOp *icount_start_insn, int num_insns) 88 { 89 if (cflags & CF_USE_ICOUNT) { 90 /* 91 * Update the num_insn immediate parameter now that we know 92 * the actual insn count. 93 */ 94 tcg_set_insn_param(icount_start_insn, 2, 95 tcgv_i32_arg(tcg_constant_i32(num_insns))); 96 } 97 98 if (tcg_ctx->exitreq_label) { 99 gen_set_label(tcg_ctx->exitreq_label); 100 tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED); 101 } 102 } 103 104 bool translator_use_goto_tb(DisasContextBase *db, vaddr dest) 105 { 106 /* Suppress goto_tb if requested. */ 107 if (tb_cflags(db->tb) & CF_NO_GOTO_TB) { 108 return false; 109 } 110 111 /* Check for the dest on the same page as the start of the TB. */ 112 return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0; 113 } 114 115 void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, 116 vaddr pc, void *host_pc, const TranslatorOps *ops, 117 DisasContextBase *db) 118 { 119 uint32_t cflags = tb_cflags(tb); 120 TCGOp *icount_start_insn; 121 TCGOp *first_insn_start = NULL; 122 bool plugin_enabled; 123 124 /* Initialize DisasContext */ 125 db->tb = tb; 126 db->pc_first = pc; 127 db->pc_next = pc; 128 db->is_jmp = DISAS_NEXT; 129 db->num_insns = 0; 130 db->max_insns = *max_insns; 131 db->singlestep_enabled = cflags & CF_SINGLE_STEP; 132 db->insn_start = NULL; 133 db->fake_insn = false; 134 db->host_addr[0] = host_pc; 135 db->host_addr[1] = NULL; 136 db->record_start = 0; 137 db->record_len = 0; 138 139 ops->init_disas_context(db, cpu); 140 tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ 141 142 /* Start translating. */ 143 icount_start_insn = gen_tb_start(db, cflags); 144 ops->tb_start(db, cpu); 145 tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ 146 147 plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY); 148 db->plugin_enabled = plugin_enabled; 149 150 while (true) { 151 *max_insns = ++db->num_insns; 152 ops->insn_start(db, cpu); 153 db->insn_start = tcg_last_op(); 154 if (first_insn_start == NULL) { 155 first_insn_start = db->insn_start; 156 } 157 tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ 158 159 if (plugin_enabled) { 160 plugin_gen_insn_start(cpu, db); 161 } 162 163 /* 164 * Disassemble one instruction. The translate_insn hook should 165 * update db->pc_next and db->is_jmp to indicate what should be 166 * done next -- either exiting this loop or locate the start of 167 * the next instruction. 168 */ 169 ops->translate_insn(db, cpu); 170 171 /* 172 * We can't instrument after instructions that change control 173 * flow although this only really affects post-load operations. 174 * 175 * Calling plugin_gen_insn_end() before we possibly stop translation 176 * is important. Even if this ends up as dead code, plugin generation 177 * needs to see a matching plugin_gen_insn_{start,end}() pair in order 178 * to accurately track instrumented helpers that might access memory. 179 */ 180 if (plugin_enabled) { 181 plugin_gen_insn_end(); 182 } 183 184 /* Stop translation if translate_insn so indicated. */ 185 if (db->is_jmp != DISAS_NEXT) { 186 break; 187 } 188 189 /* Stop translation if the output buffer is full, 190 or we have executed all of the allowed instructions. */ 191 if (tcg_op_buf_full() || db->num_insns >= db->max_insns) { 192 db->is_jmp = DISAS_TOO_MANY; 193 break; 194 } 195 } 196 197 /* Emit code to exit the TB, as indicated by db->is_jmp. */ 198 ops->tb_stop(db, cpu); 199 gen_tb_end(tb, cflags, icount_start_insn, db->num_insns); 200 201 /* 202 * Manage can_do_io for the translation block: set to false before 203 * the first insn and set to true before the last insn. 204 */ 205 if (db->num_insns == 1) { 206 tcg_debug_assert(first_insn_start == db->insn_start); 207 } else { 208 tcg_debug_assert(first_insn_start != db->insn_start); 209 tcg_ctx->emit_before_op = first_insn_start; 210 set_can_do_io(db, false); 211 } 212 tcg_ctx->emit_before_op = db->insn_start; 213 set_can_do_io(db, true); 214 tcg_ctx->emit_before_op = NULL; 215 216 if (plugin_enabled) { 217 plugin_gen_tb_end(cpu, db->num_insns); 218 } 219 220 /* The disas_log hook may use these values rather than recompute. */ 221 tb->size = db->pc_next - db->pc_first; 222 tb->icount = db->num_insns; 223 224 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) 225 && qemu_log_in_addr_range(db->pc_first)) { 226 FILE *logfile = qemu_log_trylock(); 227 if (logfile) { 228 fprintf(logfile, "----------------\n"); 229 ops->disas_log(db, cpu, logfile); 230 fprintf(logfile, "\n"); 231 qemu_log_unlock(logfile); 232 } 233 } 234 } 235 236 static bool translator_ld(CPUArchState *env, DisasContextBase *db, 237 void *dest, vaddr pc, size_t len) 238 { 239 TranslationBlock *tb = db->tb; 240 vaddr last = pc + len - 1; 241 void *host; 242 vaddr base; 243 244 /* Use slow path if first page is MMIO. */ 245 if (unlikely(tb_page_addr0(tb) == -1)) { 246 /* We capped translation with first page MMIO in tb_gen_code. */ 247 tcg_debug_assert(db->max_insns == 1); 248 return false; 249 } 250 251 host = db->host_addr[0]; 252 base = db->pc_first; 253 254 if (likely(((base ^ last) & TARGET_PAGE_MASK) == 0)) { 255 /* Entire read is from the first page. */ 256 memcpy(dest, host + (pc - base), len); 257 return true; 258 } 259 260 if (unlikely(((base ^ pc) & TARGET_PAGE_MASK) == 0)) { 261 /* Read begins on the first page and extends to the second. */ 262 size_t len0 = -(pc | TARGET_PAGE_MASK); 263 memcpy(dest, host + (pc - base), len0); 264 pc += len0; 265 dest += len0; 266 len -= len0; 267 } 268 269 /* 270 * The read must conclude on the second page and not extend to a third. 271 * 272 * TODO: We could allow the two pages to be virtually discontiguous, 273 * since we already allow the two pages to be physically discontiguous. 274 * The only reasonable use case would be executing an insn at the end 275 * of the address space wrapping around to the beginning. For that, 276 * we would need to know the current width of the address space. 277 * In the meantime, assert. 278 */ 279 base = (base & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; 280 assert(((base ^ pc) & TARGET_PAGE_MASK) == 0); 281 assert(((base ^ last) & TARGET_PAGE_MASK) == 0); 282 host = db->host_addr[1]; 283 284 if (host == NULL) { 285 tb_page_addr_t page0, old_page1, new_page1; 286 287 new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]); 288 289 /* 290 * If the second page is MMIO, treat as if the first page 291 * was MMIO as well, so that we do not cache the TB. 292 */ 293 if (unlikely(new_page1 == -1)) { 294 tb_unlock_pages(tb); 295 tb_set_page_addr0(tb, -1); 296 /* Require that this be the final insn. */ 297 db->max_insns = db->num_insns; 298 return false; 299 } 300 301 /* 302 * If this is not the first time around, and page1 matches, 303 * then we already have the page locked. Alternately, we're 304 * not doing anything to prevent the PTE from changing, so 305 * we might wind up with a different page, requiring us to 306 * re-do the locking. 307 */ 308 old_page1 = tb_page_addr1(tb); 309 if (likely(new_page1 != old_page1)) { 310 page0 = tb_page_addr0(tb); 311 if (unlikely(old_page1 != -1)) { 312 tb_unlock_page1(page0, old_page1); 313 } 314 tb_set_page_addr1(tb, new_page1); 315 tb_lock_page1(page0, new_page1); 316 } 317 host = db->host_addr[1]; 318 } 319 320 memcpy(dest, host + (pc - base), len); 321 return true; 322 } 323 324 static void record_save(DisasContextBase *db, vaddr pc, 325 const void *from, int size) 326 { 327 int offset; 328 329 /* Do not record probes before the start of TB. */ 330 if (pc < db->pc_first) { 331 return; 332 } 333 334 /* 335 * In translator_access, we verified that pc is within 2 pages 336 * of pc_first, thus this will never overflow. 337 */ 338 offset = pc - db->pc_first; 339 340 /* 341 * Either the first or second page may be I/O. If it is the second, 342 * then the first byte we need to record will be at a non-zero offset. 343 * In either case, we should not need to record but a single insn. 344 */ 345 if (db->record_len == 0) { 346 db->record_start = offset; 347 db->record_len = size; 348 } else { 349 assert(offset == db->record_start + db->record_len); 350 assert(db->record_len + size <= sizeof(db->record)); 351 db->record_len += size; 352 } 353 354 memcpy(db->record + (offset - db->record_start), from, size); 355 } 356 357 static void plugin_insn_append(vaddr pc, const void *from, size_t size) 358 { 359 #ifdef CONFIG_PLUGIN 360 struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn; 361 size_t off; 362 363 if (insn == NULL) { 364 return; 365 } 366 off = pc - insn->vaddr; 367 if (off < insn->data->len) { 368 g_byte_array_set_size(insn->data, off); 369 } else if (off > insn->data->len) { 370 /* we have an unexpected gap */ 371 g_assert_not_reached(); 372 } 373 374 insn->data = g_byte_array_append(insn->data, from, size); 375 #endif 376 } 377 378 uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc) 379 { 380 uint8_t raw; 381 382 if (!translator_ld(env, db, &raw, pc, sizeof(raw))) { 383 raw = cpu_ldub_code(env, pc); 384 record_save(db, pc, &raw, sizeof(raw)); 385 } 386 plugin_insn_append(pc, &raw, sizeof(raw)); 387 return raw; 388 } 389 390 uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc) 391 { 392 uint16_t raw, tgt; 393 394 if (translator_ld(env, db, &raw, pc, sizeof(raw))) { 395 tgt = tswap16(raw); 396 } else { 397 tgt = cpu_lduw_code(env, pc); 398 raw = tswap16(tgt); 399 record_save(db, pc, &raw, sizeof(raw)); 400 } 401 plugin_insn_append(pc, &raw, sizeof(raw)); 402 return tgt; 403 } 404 405 uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc) 406 { 407 uint32_t raw, tgt; 408 409 if (translator_ld(env, db, &raw, pc, sizeof(raw))) { 410 tgt = tswap32(raw); 411 } else { 412 tgt = cpu_ldl_code(env, pc); 413 raw = tswap32(tgt); 414 record_save(db, pc, &raw, sizeof(raw)); 415 } 416 plugin_insn_append(pc, &raw, sizeof(raw)); 417 return tgt; 418 } 419 420 uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc) 421 { 422 uint64_t raw, tgt; 423 424 if (translator_ld(env, db, &raw, pc, sizeof(raw))) { 425 tgt = tswap64(raw); 426 } else { 427 tgt = cpu_ldq_code(env, pc); 428 raw = tswap64(tgt); 429 record_save(db, pc, &raw, sizeof(raw)); 430 } 431 plugin_insn_append(pc, &raw, sizeof(raw)); 432 return tgt; 433 } 434 435 void translator_fake_ldb(DisasContextBase *db, vaddr pc, uint8_t insn8) 436 { 437 assert(pc >= db->pc_first); 438 db->fake_insn = true; 439 record_save(db, pc, &insn8, sizeof(insn8)); 440 plugin_insn_append(pc, &insn8, sizeof(insn8)); 441 } 442