xref: /qemu/accel/tcg/translator.c (revision b103cc6e74ac92f070a0e004bd84334e845c20b5)
1 /*
2  * Generic intermediate code generation.
3  *
4  * Copyright (C) 2016-2017 Lluís Vilanova <vilanova@ac.upc.edu>
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/bswap.h"
12 #include "qemu/log.h"
13 #include "qemu/error-report.h"
14 #include "exec/exec-all.h"
15 #include "exec/cpu-ldst-common.h"
16 #include "accel/tcg/cpu-mmu-index.h"
17 #include "exec/translator.h"
18 #include "exec/plugin-gen.h"
19 #include "tcg/tcg-op-common.h"
20 #include "internal-common.h"
21 #include "internal-target.h"
22 #include "disas/disas.h"
23 #include "tb-internal.h"
24 
25 static void set_can_do_io(DisasContextBase *db, bool val)
26 {
27     QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
28     tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env,
29                     offsetof(ArchCPU, parent_obj.neg.can_do_io) -
30                     offsetof(ArchCPU, env));
31 }
32 
33 bool translator_io_start(DisasContextBase *db)
34 {
35     /*
36      * Ensure that this instruction will be the last in the TB.
37      * The target may override this to something more forceful.
38      */
39     if (db->is_jmp == DISAS_NEXT) {
40         db->is_jmp = DISAS_TOO_MANY;
41     }
42     return true;
43 }
44 
45 static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
46 {
47     TCGv_i32 count = NULL;
48     TCGOp *icount_start_insn = NULL;
49 
50     if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) {
51         count = tcg_temp_new_i32();
52         tcg_gen_ld_i32(count, tcg_env,
53                        offsetof(ArchCPU, parent_obj.neg.icount_decr.u32)
54                        - offsetof(ArchCPU, env));
55     }
56 
57     if (cflags & CF_USE_ICOUNT) {
58         /*
59          * We emit a sub with a dummy immediate argument. Keep the insn index
60          * of the sub so that we later (when we know the actual insn count)
61          * can update the argument with the actual insn count.
62          */
63         tcg_gen_sub_i32(count, count, tcg_constant_i32(0));
64         icount_start_insn = tcg_last_op();
65     }
66 
67     /*
68      * Emit the check against icount_decr.u32 to see if we should exit
69      * unless we suppress the check with CF_NOIRQ. If we are using
70      * icount and have suppressed interruption the higher level code
71      * should have ensured we don't run more instructions than the
72      * budget.
73      */
74     if (cflags & CF_NOIRQ) {
75         tcg_ctx->exitreq_label = NULL;
76     } else {
77         tcg_ctx->exitreq_label = gen_new_label();
78         tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
79     }
80 
81     if (cflags & CF_USE_ICOUNT) {
82         tcg_gen_st16_i32(count, tcg_env,
83                          offsetof(ArchCPU, parent_obj.neg.icount_decr.u16.low)
84                          - offsetof(ArchCPU, env));
85     }
86 
87     return icount_start_insn;
88 }
89 
90 static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags,
91                        TCGOp *icount_start_insn, int num_insns)
92 {
93     if (cflags & CF_USE_ICOUNT) {
94         /*
95          * Update the num_insn immediate parameter now that we know
96          * the actual insn count.
97          */
98         tcg_set_insn_param(icount_start_insn, 2,
99                            tcgv_i32_arg(tcg_constant_i32(num_insns)));
100     }
101 
102     if (tcg_ctx->exitreq_label) {
103         gen_set_label(tcg_ctx->exitreq_label);
104         tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
105     }
106 }
107 
108 bool translator_is_same_page(const DisasContextBase *db, vaddr addr)
109 {
110     return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
111 }
112 
113 bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
114 {
115     /* Suppress goto_tb if requested. */
116     if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
117         return false;
118     }
119 
120     /* Check for the dest on the same page as the start of the TB.  */
121     return translator_is_same_page(db, dest);
122 }
123 
124 void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
125                      vaddr pc, void *host_pc, const TranslatorOps *ops,
126                      DisasContextBase *db)
127 {
128     uint32_t cflags = tb_cflags(tb);
129     TCGOp *icount_start_insn;
130     TCGOp *first_insn_start = NULL;
131     bool plugin_enabled;
132 
133     /* Initialize DisasContext */
134     db->tb = tb;
135     db->pc_first = pc;
136     db->pc_next = pc;
137     db->is_jmp = DISAS_NEXT;
138     db->num_insns = 0;
139     db->max_insns = *max_insns;
140     db->insn_start = NULL;
141     db->fake_insn = false;
142     db->host_addr[0] = host_pc;
143     db->host_addr[1] = NULL;
144     db->record_start = 0;
145     db->record_len = 0;
146     db->code_mmuidx = cpu_mmu_index(cpu, true);
147 
148     ops->init_disas_context(db, cpu);
149     tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
150 
151     /* Start translating.  */
152     icount_start_insn = gen_tb_start(db, cflags);
153     ops->tb_start(db, cpu);
154     tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
155 
156     plugin_enabled = plugin_gen_tb_start(cpu, db);
157     db->plugin_enabled = plugin_enabled;
158 
159     while (true) {
160         *max_insns = ++db->num_insns;
161         ops->insn_start(db, cpu);
162         db->insn_start = tcg_last_op();
163         if (first_insn_start == NULL) {
164             first_insn_start = db->insn_start;
165         }
166         tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
167 
168         if (plugin_enabled) {
169             plugin_gen_insn_start(cpu, db);
170         }
171 
172         /*
173          * Disassemble one instruction.  The translate_insn hook should
174          * update db->pc_next and db->is_jmp to indicate what should be
175          * done next -- either exiting this loop or locate the start of
176          * the next instruction.
177          */
178         ops->translate_insn(db, cpu);
179 
180         /*
181          * We can't instrument after instructions that change control
182          * flow although this only really affects post-load operations.
183          *
184          * Calling plugin_gen_insn_end() before we possibly stop translation
185          * is important. Even if this ends up as dead code, plugin generation
186          * needs to see a matching plugin_gen_insn_{start,end}() pair in order
187          * to accurately track instrumented helpers that might access memory.
188          */
189         if (plugin_enabled) {
190             plugin_gen_insn_end();
191         }
192 
193         /* Stop translation if translate_insn so indicated.  */
194         if (db->is_jmp != DISAS_NEXT) {
195             break;
196         }
197 
198         /* Stop translation if the output buffer is full,
199            or we have executed all of the allowed instructions.  */
200         if (tcg_op_buf_full() || db->num_insns >= db->max_insns) {
201             db->is_jmp = DISAS_TOO_MANY;
202             break;
203         }
204     }
205 
206     /* Emit code to exit the TB, as indicated by db->is_jmp.  */
207     ops->tb_stop(db, cpu);
208     gen_tb_end(tb, cflags, icount_start_insn, db->num_insns);
209 
210     /*
211      * Manage can_do_io for the translation block: set to false before
212      * the first insn and set to true before the last insn.
213      */
214     if (db->num_insns == 1) {
215         tcg_debug_assert(first_insn_start == db->insn_start);
216     } else {
217         tcg_debug_assert(first_insn_start != db->insn_start);
218         tcg_ctx->emit_before_op = first_insn_start;
219         set_can_do_io(db, false);
220     }
221     tcg_ctx->emit_before_op = db->insn_start;
222     set_can_do_io(db, true);
223     tcg_ctx->emit_before_op = NULL;
224 
225     /* May be used by disas_log or plugin callbacks. */
226     tb->size = db->pc_next - db->pc_first;
227     tb->icount = db->num_insns;
228 
229     if (plugin_enabled) {
230         plugin_gen_tb_end(cpu, db->num_insns);
231     }
232 
233     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
234         && qemu_log_in_addr_range(db->pc_first)) {
235         FILE *logfile = qemu_log_trylock();
236         if (logfile) {
237             fprintf(logfile, "----------------\n");
238 
239             if (!ops->disas_log ||
240                 !ops->disas_log(db, cpu, logfile)) {
241                 fprintf(logfile, "IN: %s\n", lookup_symbol(db->pc_first));
242                 target_disas(logfile, cpu, db);
243             }
244             fprintf(logfile, "\n");
245             qemu_log_unlock(logfile);
246         }
247     }
248 }
249 
250 static bool translator_ld(CPUArchState *env, DisasContextBase *db,
251                           void *dest, vaddr pc, size_t len)
252 {
253     TranslationBlock *tb = db->tb;
254     vaddr last = pc + len - 1;
255     void *host;
256     vaddr base;
257 
258     /* Use slow path if first page is MMIO. */
259     if (unlikely(tb_page_addr0(tb) == -1)) {
260         /* We capped translation with first page MMIO in tb_gen_code. */
261         tcg_debug_assert(db->max_insns == 1);
262         return false;
263     }
264 
265     host = db->host_addr[0];
266     base = db->pc_first;
267 
268     if (likely(((base ^ last) & TARGET_PAGE_MASK) == 0)) {
269         /* Entire read is from the first page. */
270         goto do_read;
271     }
272 
273     if (unlikely(((base ^ pc) & TARGET_PAGE_MASK) == 0)) {
274         /*
275          * Read begins on the first page and extends to the second.
276          * The unaligned read is never atomic.
277          */
278         size_t len0 = -(pc | TARGET_PAGE_MASK);
279         memcpy(dest, host + (pc - base), len0);
280         pc += len0;
281         dest += len0;
282         len -= len0;
283     }
284 
285     /*
286      * The read must conclude on the second page and not extend to a third.
287      *
288      * TODO: We could allow the two pages to be virtually discontiguous,
289      * since we already allow the two pages to be physically discontiguous.
290      * The only reasonable use case would be executing an insn at the end
291      * of the address space wrapping around to the beginning.  For that,
292      * we would need to know the current width of the address space.
293      * In the meantime, assert.
294      */
295     base = (base & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
296     assert(((base ^ pc) & TARGET_PAGE_MASK) == 0);
297     assert(((base ^ last) & TARGET_PAGE_MASK) == 0);
298     host = db->host_addr[1];
299 
300     if (host == NULL) {
301         tb_page_addr_t page0, old_page1, new_page1;
302 
303         new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]);
304 
305         /*
306          * If the second page is MMIO, treat as if the first page
307          * was MMIO as well, so that we do not cache the TB.
308          */
309         if (unlikely(new_page1 == -1)) {
310             tb_unlock_pages(tb);
311             tb_set_page_addr0(tb, -1);
312             /* Require that this be the final insn. */
313             db->max_insns = db->num_insns;
314             return false;
315         }
316 
317         /*
318          * If this is not the first time around, and page1 matches,
319          * then we already have the page locked.  Alternately, we're
320          * not doing anything to prevent the PTE from changing, so
321          * we might wind up with a different page, requiring us to
322          * re-do the locking.
323          */
324         old_page1 = tb_page_addr1(tb);
325         if (likely(new_page1 != old_page1)) {
326             page0 = tb_page_addr0(tb);
327             if (unlikely(old_page1 != -1)) {
328                 tb_unlock_page1(page0, old_page1);
329             }
330             tb_set_page_addr1(tb, new_page1);
331             tb_lock_page1(page0, new_page1);
332         }
333         host = db->host_addr[1];
334     }
335 
336  do_read:
337     /*
338      * Assume aligned reads should be atomic, if possible.
339      * We're not in a position to jump out with EXCP_ATOMIC.
340      */
341     host += pc - base;
342     switch (len) {
343     case 2:
344         if (QEMU_IS_ALIGNED(pc, 2)) {
345             uint16_t t = qatomic_read((uint16_t *)host);
346             stw_he_p(dest, t);
347             return true;
348         }
349         break;
350     case 4:
351         if (QEMU_IS_ALIGNED(pc, 4)) {
352             uint32_t t = qatomic_read((uint32_t *)host);
353             stl_he_p(dest, t);
354             return true;
355         }
356         break;
357 #ifdef CONFIG_ATOMIC64
358     case 8:
359         if (QEMU_IS_ALIGNED(pc, 8)) {
360             uint64_t t = qatomic_read__nocheck((uint64_t *)host);
361             stq_he_p(dest, t);
362             return true;
363         }
364         break;
365 #endif
366     }
367     /* Unaligned or partial read from the second page is not atomic. */
368     memcpy(dest, host, len);
369     return true;
370 }
371 
372 static void record_save(DisasContextBase *db, vaddr pc,
373                         const void *from, int size)
374 {
375     int offset;
376 
377     /* Do not record probes before the start of TB. */
378     if (pc < db->pc_first) {
379         return;
380     }
381 
382     /*
383      * In translator_access, we verified that pc is within 2 pages
384      * of pc_first, thus this will never overflow.
385      */
386     offset = pc - db->pc_first;
387 
388     /*
389      * Either the first or second page may be I/O.  If it is the second,
390      * then the first byte we need to record will be at a non-zero offset.
391      * In either case, we should not need to record but a single insn.
392      */
393     if (db->record_len == 0) {
394         db->record_start = offset;
395         db->record_len = size;
396     } else {
397         assert(offset == db->record_start + db->record_len);
398         assert(db->record_len + size <= sizeof(db->record));
399         db->record_len += size;
400     }
401 
402     memcpy(db->record + (offset - db->record_start), from, size);
403 }
404 
405 size_t translator_st_len(const DisasContextBase *db)
406 {
407     return db->fake_insn ? db->record_len : db->tb->size;
408 }
409 
410 bool translator_st(const DisasContextBase *db, void *dest,
411                    vaddr addr, size_t len)
412 {
413     size_t offset, offset_end;
414 
415     if (addr < db->pc_first) {
416         return false;
417     }
418     offset = addr - db->pc_first;
419     offset_end = offset + len;
420     if (offset_end > translator_st_len(db)) {
421         return false;
422     }
423 
424     if (!db->fake_insn) {
425         size_t offset_page1 = -(db->pc_first | TARGET_PAGE_MASK);
426 
427         /* Get all the bytes from the first page. */
428         if (db->host_addr[0]) {
429             if (offset_end <= offset_page1) {
430                 memcpy(dest, db->host_addr[0] + offset, len);
431                 return true;
432             }
433             if (offset < offset_page1) {
434                 size_t len0 = offset_page1 - offset;
435                 memcpy(dest, db->host_addr[0] + offset, len0);
436                 offset += len0;
437                 dest += len0;
438             }
439         }
440 
441         /* Get any bytes from the second page. */
442         if (db->host_addr[1] && offset >= offset_page1) {
443             memcpy(dest, db->host_addr[1] + (offset - offset_page1),
444                    offset_end - offset);
445             return true;
446         }
447     }
448 
449     /* Else get recorded bytes. */
450     if (db->record_len != 0 &&
451         offset >= db->record_start &&
452         offset_end <= db->record_start + db->record_len) {
453         memcpy(dest, db->record + (offset - db->record_start),
454                offset_end - offset);
455         return true;
456     }
457     return false;
458 }
459 
460 uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc)
461 {
462     uint8_t val;
463 
464     if (!translator_ld(env, db, &val, pc, sizeof(val))) {
465         MemOpIdx oi = make_memop_idx(MO_UB, db->code_mmuidx);
466         val = cpu_ldb_code_mmu(env, pc, oi, 0);
467         record_save(db, pc, &val, sizeof(val));
468     }
469     return val;
470 }
471 
472 uint16_t translator_lduw_end(CPUArchState *env, DisasContextBase *db,
473                              vaddr pc, MemOp endian)
474 {
475     uint16_t val;
476 
477     if (!translator_ld(env, db, &val, pc, sizeof(val))) {
478         MemOpIdx oi = make_memop_idx(MO_UW, db->code_mmuidx);
479         val = cpu_ldw_code_mmu(env, pc, oi, 0);
480         record_save(db, pc, &val, sizeof(val));
481     }
482     if (endian & MO_BSWAP) {
483         val = bswap16(val);
484     }
485     return val;
486 }
487 
488 uint32_t translator_ldl_end(CPUArchState *env, DisasContextBase *db,
489                             vaddr pc, MemOp endian)
490 {
491     uint32_t val;
492 
493     if (!translator_ld(env, db, &val, pc, sizeof(val))) {
494         MemOpIdx oi = make_memop_idx(MO_UL, db->code_mmuidx);
495         val = cpu_ldl_code_mmu(env, pc, oi, 0);
496         record_save(db, pc, &val, sizeof(val));
497     }
498     if (endian & MO_BSWAP) {
499         val = bswap32(val);
500     }
501     return val;
502 }
503 
504 uint64_t translator_ldq_end(CPUArchState *env, DisasContextBase *db,
505                             vaddr pc, MemOp endian)
506 {
507     uint64_t val;
508 
509     if (!translator_ld(env, db, &val, pc, sizeof(val))) {
510         MemOpIdx oi = make_memop_idx(MO_UQ, db->code_mmuidx);
511         val = cpu_ldq_code_mmu(env, pc, oi, 0);
512         record_save(db, pc, &val, sizeof(val));
513     }
514     if (endian & MO_BSWAP) {
515         val = bswap64(val);
516     }
517     return val;
518 }
519 
520 void translator_fake_ld(DisasContextBase *db, const void *data, size_t len)
521 {
522     db->fake_insn = true;
523     record_save(db, db->pc_first, data, len);
524 }
525