xref: /qemu/accel/tcg/translator.c (revision 4d3ad3c3ba1f1e9c217d0581e4913a59ef2ac15f)
1 /*
2  * Generic intermediate code generation.
3  *
4  * Copyright (C) 2016-2017 LluĂ­s Vilanova <vilanova@ac.upc.edu>
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/bswap.h"
12 #include "qemu/log.h"
13 #include "qemu/error-report.h"
14 #include "exec/exec-all.h"
15 #include "exec/cpu-ldst-common.h"
16 #include "accel/tcg/cpu-mmu-index.h"
17 #include "exec/translator.h"
18 #include "exec/plugin-gen.h"
19 #include "tcg/tcg-op-common.h"
20 #include "internal-target.h"
21 #include "disas/disas.h"
22 #include "tb-internal.h"
23 
24 static void set_can_do_io(DisasContextBase *db, bool val)
25 {
26     QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
27     tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env,
28                     offsetof(ArchCPU, parent_obj.neg.can_do_io) -
29                     offsetof(ArchCPU, env));
30 }
31 
32 bool translator_io_start(DisasContextBase *db)
33 {
34     /*
35      * Ensure that this instruction will be the last in the TB.
36      * The target may override this to something more forceful.
37      */
38     if (db->is_jmp == DISAS_NEXT) {
39         db->is_jmp = DISAS_TOO_MANY;
40     }
41     return true;
42 }
43 
44 static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
45 {
46     TCGv_i32 count = NULL;
47     TCGOp *icount_start_insn = NULL;
48 
49     if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) {
50         count = tcg_temp_new_i32();
51         tcg_gen_ld_i32(count, tcg_env,
52                        offsetof(ArchCPU, parent_obj.neg.icount_decr.u32)
53                        - offsetof(ArchCPU, env));
54     }
55 
56     if (cflags & CF_USE_ICOUNT) {
57         /*
58          * We emit a sub with a dummy immediate argument. Keep the insn index
59          * of the sub so that we later (when we know the actual insn count)
60          * can update the argument with the actual insn count.
61          */
62         tcg_gen_sub_i32(count, count, tcg_constant_i32(0));
63         icount_start_insn = tcg_last_op();
64     }
65 
66     /*
67      * Emit the check against icount_decr.u32 to see if we should exit
68      * unless we suppress the check with CF_NOIRQ. If we are using
69      * icount and have suppressed interruption the higher level code
70      * should have ensured we don't run more instructions than the
71      * budget.
72      */
73     if (cflags & CF_NOIRQ) {
74         tcg_ctx->exitreq_label = NULL;
75     } else {
76         tcg_ctx->exitreq_label = gen_new_label();
77         tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
78     }
79 
80     if (cflags & CF_USE_ICOUNT) {
81         tcg_gen_st16_i32(count, tcg_env,
82                          offsetof(ArchCPU, parent_obj.neg.icount_decr.u16.low)
83                          - offsetof(ArchCPU, env));
84     }
85 
86     return icount_start_insn;
87 }
88 
89 static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags,
90                        TCGOp *icount_start_insn, int num_insns)
91 {
92     if (cflags & CF_USE_ICOUNT) {
93         /*
94          * Update the num_insn immediate parameter now that we know
95          * the actual insn count.
96          */
97         tcg_set_insn_param(icount_start_insn, 2,
98                            tcgv_i32_arg(tcg_constant_i32(num_insns)));
99     }
100 
101     if (tcg_ctx->exitreq_label) {
102         gen_set_label(tcg_ctx->exitreq_label);
103         tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
104     }
105 }
106 
107 bool translator_is_same_page(const DisasContextBase *db, vaddr addr)
108 {
109     return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
110 }
111 
112 bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
113 {
114     /* Suppress goto_tb if requested. */
115     if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
116         return false;
117     }
118 
119     /* Check for the dest on the same page as the start of the TB.  */
120     return translator_is_same_page(db, dest);
121 }
122 
123 void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
124                      vaddr pc, void *host_pc, const TranslatorOps *ops,
125                      DisasContextBase *db)
126 {
127     uint32_t cflags = tb_cflags(tb);
128     TCGOp *icount_start_insn;
129     TCGOp *first_insn_start = NULL;
130     bool plugin_enabled;
131 
132     /* Initialize DisasContext */
133     db->tb = tb;
134     db->pc_first = pc;
135     db->pc_next = pc;
136     db->is_jmp = DISAS_NEXT;
137     db->num_insns = 0;
138     db->max_insns = *max_insns;
139     db->insn_start = NULL;
140     db->fake_insn = false;
141     db->host_addr[0] = host_pc;
142     db->host_addr[1] = NULL;
143     db->record_start = 0;
144     db->record_len = 0;
145     db->code_mmuidx = cpu_mmu_index(cpu, true);
146 
147     ops->init_disas_context(db, cpu);
148     tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
149 
150     /* Start translating.  */
151     icount_start_insn = gen_tb_start(db, cflags);
152     ops->tb_start(db, cpu);
153     tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
154 
155     plugin_enabled = plugin_gen_tb_start(cpu, db);
156     db->plugin_enabled = plugin_enabled;
157 
158     while (true) {
159         *max_insns = ++db->num_insns;
160         ops->insn_start(db, cpu);
161         db->insn_start = tcg_last_op();
162         if (first_insn_start == NULL) {
163             first_insn_start = db->insn_start;
164         }
165         tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
166 
167         if (plugin_enabled) {
168             plugin_gen_insn_start(cpu, db);
169         }
170 
171         /*
172          * Disassemble one instruction.  The translate_insn hook should
173          * update db->pc_next and db->is_jmp to indicate what should be
174          * done next -- either exiting this loop or locate the start of
175          * the next instruction.
176          */
177         ops->translate_insn(db, cpu);
178 
179         /*
180          * We can't instrument after instructions that change control
181          * flow although this only really affects post-load operations.
182          *
183          * Calling plugin_gen_insn_end() before we possibly stop translation
184          * is important. Even if this ends up as dead code, plugin generation
185          * needs to see a matching plugin_gen_insn_{start,end}() pair in order
186          * to accurately track instrumented helpers that might access memory.
187          */
188         if (plugin_enabled) {
189             plugin_gen_insn_end();
190         }
191 
192         /* Stop translation if translate_insn so indicated.  */
193         if (db->is_jmp != DISAS_NEXT) {
194             break;
195         }
196 
197         /* Stop translation if the output buffer is full,
198            or we have executed all of the allowed instructions.  */
199         if (tcg_op_buf_full() || db->num_insns >= db->max_insns) {
200             db->is_jmp = DISAS_TOO_MANY;
201             break;
202         }
203     }
204 
205     /* Emit code to exit the TB, as indicated by db->is_jmp.  */
206     ops->tb_stop(db, cpu);
207     gen_tb_end(tb, cflags, icount_start_insn, db->num_insns);
208 
209     /*
210      * Manage can_do_io for the translation block: set to false before
211      * the first insn and set to true before the last insn.
212      */
213     if (db->num_insns == 1) {
214         tcg_debug_assert(first_insn_start == db->insn_start);
215     } else {
216         tcg_debug_assert(first_insn_start != db->insn_start);
217         tcg_ctx->emit_before_op = first_insn_start;
218         set_can_do_io(db, false);
219     }
220     tcg_ctx->emit_before_op = db->insn_start;
221     set_can_do_io(db, true);
222     tcg_ctx->emit_before_op = NULL;
223 
224     /* May be used by disas_log or plugin callbacks. */
225     tb->size = db->pc_next - db->pc_first;
226     tb->icount = db->num_insns;
227 
228     if (plugin_enabled) {
229         plugin_gen_tb_end(cpu, db->num_insns);
230     }
231 
232     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
233         && qemu_log_in_addr_range(db->pc_first)) {
234         FILE *logfile = qemu_log_trylock();
235         if (logfile) {
236             fprintf(logfile, "----------------\n");
237 
238             if (!ops->disas_log ||
239                 !ops->disas_log(db, cpu, logfile)) {
240                 fprintf(logfile, "IN: %s\n", lookup_symbol(db->pc_first));
241                 target_disas(logfile, cpu, db);
242             }
243             fprintf(logfile, "\n");
244             qemu_log_unlock(logfile);
245         }
246     }
247 }
248 
249 static bool translator_ld(CPUArchState *env, DisasContextBase *db,
250                           void *dest, vaddr pc, size_t len)
251 {
252     TranslationBlock *tb = db->tb;
253     vaddr last = pc + len - 1;
254     void *host;
255     vaddr base;
256 
257     /* Use slow path if first page is MMIO. */
258     if (unlikely(tb_page_addr0(tb) == -1)) {
259         /* We capped translation with first page MMIO in tb_gen_code. */
260         tcg_debug_assert(db->max_insns == 1);
261         return false;
262     }
263 
264     host = db->host_addr[0];
265     base = db->pc_first;
266 
267     if (likely(((base ^ last) & TARGET_PAGE_MASK) == 0)) {
268         /* Entire read is from the first page. */
269         goto do_read;
270     }
271 
272     if (unlikely(((base ^ pc) & TARGET_PAGE_MASK) == 0)) {
273         /*
274          * Read begins on the first page and extends to the second.
275          * The unaligned read is never atomic.
276          */
277         size_t len0 = -(pc | TARGET_PAGE_MASK);
278         memcpy(dest, host + (pc - base), len0);
279         pc += len0;
280         dest += len0;
281         len -= len0;
282     }
283 
284     /*
285      * The read must conclude on the second page and not extend to a third.
286      *
287      * TODO: We could allow the two pages to be virtually discontiguous,
288      * since we already allow the two pages to be physically discontiguous.
289      * The only reasonable use case would be executing an insn at the end
290      * of the address space wrapping around to the beginning.  For that,
291      * we would need to know the current width of the address space.
292      * In the meantime, assert.
293      */
294     base = (base & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
295     assert(((base ^ pc) & TARGET_PAGE_MASK) == 0);
296     assert(((base ^ last) & TARGET_PAGE_MASK) == 0);
297     host = db->host_addr[1];
298 
299     if (host == NULL) {
300         tb_page_addr_t page0, old_page1, new_page1;
301 
302         new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]);
303 
304         /*
305          * If the second page is MMIO, treat as if the first page
306          * was MMIO as well, so that we do not cache the TB.
307          */
308         if (unlikely(new_page1 == -1)) {
309             tb_unlock_pages(tb);
310             tb_set_page_addr0(tb, -1);
311             /* Require that this be the final insn. */
312             db->max_insns = db->num_insns;
313             return false;
314         }
315 
316         /*
317          * If this is not the first time around, and page1 matches,
318          * then we already have the page locked.  Alternately, we're
319          * not doing anything to prevent the PTE from changing, so
320          * we might wind up with a different page, requiring us to
321          * re-do the locking.
322          */
323         old_page1 = tb_page_addr1(tb);
324         if (likely(new_page1 != old_page1)) {
325             page0 = tb_page_addr0(tb);
326             if (unlikely(old_page1 != -1)) {
327                 tb_unlock_page1(page0, old_page1);
328             }
329             tb_set_page_addr1(tb, new_page1);
330             tb_lock_page1(page0, new_page1);
331         }
332         host = db->host_addr[1];
333     }
334 
335  do_read:
336     /*
337      * Assume aligned reads should be atomic, if possible.
338      * We're not in a position to jump out with EXCP_ATOMIC.
339      */
340     host += pc - base;
341     switch (len) {
342     case 2:
343         if (QEMU_IS_ALIGNED(pc, 2)) {
344             uint16_t t = qatomic_read((uint16_t *)host);
345             stw_he_p(dest, t);
346             return true;
347         }
348         break;
349     case 4:
350         if (QEMU_IS_ALIGNED(pc, 4)) {
351             uint32_t t = qatomic_read((uint32_t *)host);
352             stl_he_p(dest, t);
353             return true;
354         }
355         break;
356 #ifdef CONFIG_ATOMIC64
357     case 8:
358         if (QEMU_IS_ALIGNED(pc, 8)) {
359             uint64_t t = qatomic_read__nocheck((uint64_t *)host);
360             stq_he_p(dest, t);
361             return true;
362         }
363         break;
364 #endif
365     }
366     /* Unaligned or partial read from the second page is not atomic. */
367     memcpy(dest, host, len);
368     return true;
369 }
370 
371 static void record_save(DisasContextBase *db, vaddr pc,
372                         const void *from, int size)
373 {
374     int offset;
375 
376     /* Do not record probes before the start of TB. */
377     if (pc < db->pc_first) {
378         return;
379     }
380 
381     /*
382      * In translator_access, we verified that pc is within 2 pages
383      * of pc_first, thus this will never overflow.
384      */
385     offset = pc - db->pc_first;
386 
387     /*
388      * Either the first or second page may be I/O.  If it is the second,
389      * then the first byte we need to record will be at a non-zero offset.
390      * In either case, we should not need to record but a single insn.
391      */
392     if (db->record_len == 0) {
393         db->record_start = offset;
394         db->record_len = size;
395     } else {
396         assert(offset == db->record_start + db->record_len);
397         assert(db->record_len + size <= sizeof(db->record));
398         db->record_len += size;
399     }
400 
401     memcpy(db->record + (offset - db->record_start), from, size);
402 }
403 
404 size_t translator_st_len(const DisasContextBase *db)
405 {
406     return db->fake_insn ? db->record_len : db->tb->size;
407 }
408 
409 bool translator_st(const DisasContextBase *db, void *dest,
410                    vaddr addr, size_t len)
411 {
412     size_t offset, offset_end;
413 
414     if (addr < db->pc_first) {
415         return false;
416     }
417     offset = addr - db->pc_first;
418     offset_end = offset + len;
419     if (offset_end > translator_st_len(db)) {
420         return false;
421     }
422 
423     if (!db->fake_insn) {
424         size_t offset_page1 = -(db->pc_first | TARGET_PAGE_MASK);
425 
426         /* Get all the bytes from the first page. */
427         if (db->host_addr[0]) {
428             if (offset_end <= offset_page1) {
429                 memcpy(dest, db->host_addr[0] + offset, len);
430                 return true;
431             }
432             if (offset < offset_page1) {
433                 size_t len0 = offset_page1 - offset;
434                 memcpy(dest, db->host_addr[0] + offset, len0);
435                 offset += len0;
436                 dest += len0;
437             }
438         }
439 
440         /* Get any bytes from the second page. */
441         if (db->host_addr[1] && offset >= offset_page1) {
442             memcpy(dest, db->host_addr[1] + (offset - offset_page1),
443                    offset_end - offset);
444             return true;
445         }
446     }
447 
448     /* Else get recorded bytes. */
449     if (db->record_len != 0 &&
450         offset >= db->record_start &&
451         offset_end <= db->record_start + db->record_len) {
452         memcpy(dest, db->record + (offset - db->record_start),
453                offset_end - offset);
454         return true;
455     }
456     return false;
457 }
458 
459 uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc)
460 {
461     uint8_t val;
462 
463     if (!translator_ld(env, db, &val, pc, sizeof(val))) {
464         MemOpIdx oi = make_memop_idx(MO_UB, db->code_mmuidx);
465         val = cpu_ldb_code_mmu(env, pc, oi, 0);
466         record_save(db, pc, &val, sizeof(val));
467     }
468     return val;
469 }
470 
471 uint16_t translator_lduw_end(CPUArchState *env, DisasContextBase *db,
472                              vaddr pc, MemOp endian)
473 {
474     uint16_t val;
475 
476     if (!translator_ld(env, db, &val, pc, sizeof(val))) {
477         MemOpIdx oi = make_memop_idx(MO_UW, db->code_mmuidx);
478         val = cpu_ldw_code_mmu(env, pc, oi, 0);
479         record_save(db, pc, &val, sizeof(val));
480     }
481     if (endian & MO_BSWAP) {
482         val = bswap16(val);
483     }
484     return val;
485 }
486 
487 uint32_t translator_ldl_end(CPUArchState *env, DisasContextBase *db,
488                             vaddr pc, MemOp endian)
489 {
490     uint32_t val;
491 
492     if (!translator_ld(env, db, &val, pc, sizeof(val))) {
493         MemOpIdx oi = make_memop_idx(MO_UL, db->code_mmuidx);
494         val = cpu_ldl_code_mmu(env, pc, oi, 0);
495         record_save(db, pc, &val, sizeof(val));
496     }
497     if (endian & MO_BSWAP) {
498         val = bswap32(val);
499     }
500     return val;
501 }
502 
503 uint64_t translator_ldq_end(CPUArchState *env, DisasContextBase *db,
504                             vaddr pc, MemOp endian)
505 {
506     uint64_t val;
507 
508     if (!translator_ld(env, db, &val, pc, sizeof(val))) {
509         MemOpIdx oi = make_memop_idx(MO_UQ, db->code_mmuidx);
510         val = cpu_ldq_code_mmu(env, pc, oi, 0);
511         record_save(db, pc, &val, sizeof(val));
512     }
513     if (endian & MO_BSWAP) {
514         val = bswap64(val);
515     }
516     return val;
517 }
518 
519 void translator_fake_ld(DisasContextBase *db, const void *data, size_t len)
520 {
521     db->fake_insn = true;
522     record_save(db, db->pc_first, data, len);
523 }
524