xref: /qemu/accel/tcg/translator.c (revision fc524567087c2537b5103cdfc1d41e4f442892b6)
1 /*
2  * Generic intermediate code generation.
3  *
4  * Copyright (C) 2016-2017 Lluís Vilanova <vilanova@ac.upc.edu>
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/bswap.h"
12 #include "qemu/log.h"
13 #include "qemu/error-report.h"
14 #include "accel/tcg/cpu-ldst-common.h"
15 #include "accel/tcg/cpu-mmu-index.h"
16 #include "exec/target_page.h"
17 #include "exec/translator.h"
18 #include "exec/plugin-gen.h"
19 #include "tcg/tcg-op-common.h"
20 #include "internal-common.h"
21 #include "disas/disas.h"
22 #include "tb-internal.h"
23 
set_can_do_io(DisasContextBase * db,bool val)24 static void set_can_do_io(DisasContextBase *db, bool val)
25 {
26     QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
27     tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env,
28                     offsetof(CPUState, neg.can_do_io) - sizeof(CPUState));
29 }
30 
translator_io_start(DisasContextBase * db)31 bool translator_io_start(DisasContextBase *db)
32 {
33     /*
34      * Ensure that this instruction will be the last in the TB.
35      * The target may override this to something more forceful.
36      */
37     if (db->is_jmp == DISAS_NEXT) {
38         db->is_jmp = DISAS_TOO_MANY;
39     }
40     return true;
41 }
42 
gen_tb_start(DisasContextBase * db,uint32_t cflags)43 static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
44 {
45     TCGv_i32 count = NULL;
46     TCGOp *icount_start_insn = NULL;
47 
48     if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) {
49         count = tcg_temp_new_i32();
50         tcg_gen_ld_i32(count, tcg_env,
51                        offsetof(CPUState, neg.icount_decr.u32) -
52                        sizeof(CPUState));
53     }
54 
55     if (cflags & CF_USE_ICOUNT) {
56         /*
57          * We emit a sub with a dummy immediate argument. Keep the insn index
58          * of the sub so that we later (when we know the actual insn count)
59          * can update the argument with the actual insn count.
60          */
61         tcg_gen_sub_i32(count, count, tcg_constant_i32(0));
62         icount_start_insn = tcg_last_op();
63     }
64 
65     /*
66      * Emit the check against icount_decr.u32 to see if we should exit
67      * unless we suppress the check with CF_NOIRQ. If we are using
68      * icount and have suppressed interruption the higher level code
69      * should have ensured we don't run more instructions than the
70      * budget.
71      */
72     if (cflags & CF_NOIRQ) {
73         tcg_ctx->exitreq_label = NULL;
74     } else {
75         tcg_ctx->exitreq_label = gen_new_label();
76         tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
77     }
78 
79     if (cflags & CF_USE_ICOUNT) {
80         tcg_gen_st16_i32(count, tcg_env,
81                          offsetof(CPUState, neg.icount_decr.u16.low) -
82                          sizeof(CPUState));
83     }
84 
85     return icount_start_insn;
86 }
87 
gen_tb_end(const TranslationBlock * tb,uint32_t cflags,TCGOp * icount_start_insn,int num_insns)88 static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags,
89                        TCGOp *icount_start_insn, int num_insns)
90 {
91     if (cflags & CF_USE_ICOUNT) {
92         /*
93          * Update the num_insn immediate parameter now that we know
94          * the actual insn count.
95          */
96         tcg_set_insn_param(icount_start_insn, 2,
97                            tcgv_i32_arg(tcg_constant_i32(num_insns)));
98     }
99 
100     if (tcg_ctx->exitreq_label) {
101         gen_set_label(tcg_ctx->exitreq_label);
102         tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
103     }
104 }
105 
translator_is_same_page(const DisasContextBase * db,vaddr addr)106 bool translator_is_same_page(const DisasContextBase *db, vaddr addr)
107 {
108     return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
109 }
110 
translator_use_goto_tb(DisasContextBase * db,vaddr dest)111 bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
112 {
113     /* Suppress goto_tb if requested. */
114     if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
115         return false;
116     }
117 
118     /* Check for the dest on the same page as the start of the TB.  */
119     return translator_is_same_page(db, dest);
120 }
121 
translator_loop(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc,const TranslatorOps * ops,DisasContextBase * db)122 void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
123                      vaddr pc, void *host_pc, const TranslatorOps *ops,
124                      DisasContextBase *db)
125 {
126     uint32_t cflags = tb_cflags(tb);
127     TCGOp *icount_start_insn;
128     TCGOp *first_insn_start = NULL;
129     bool plugin_enabled;
130 
131     /* Initialize DisasContext */
132     db->tb = tb;
133     db->pc_first = pc;
134     db->pc_next = pc;
135     db->is_jmp = DISAS_NEXT;
136     db->num_insns = 0;
137     db->max_insns = *max_insns;
138     db->insn_start = NULL;
139     db->fake_insn = false;
140     db->host_addr[0] = host_pc;
141     db->host_addr[1] = NULL;
142     db->record_start = 0;
143     db->record_len = 0;
144     db->code_mmuidx = cpu_mmu_index(cpu, true);
145 
146     ops->init_disas_context(db, cpu);
147     tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
148 
149     /* Start translating.  */
150     icount_start_insn = gen_tb_start(db, cflags);
151     ops->tb_start(db, cpu);
152     tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
153 
154     plugin_enabled = plugin_gen_tb_start(cpu, db);
155     db->plugin_enabled = plugin_enabled;
156 
157     while (true) {
158         *max_insns = ++db->num_insns;
159         ops->insn_start(db, cpu);
160         db->insn_start = tcg_last_op();
161         if (first_insn_start == NULL) {
162             first_insn_start = db->insn_start;
163         }
164         tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
165 
166         if (plugin_enabled) {
167             plugin_gen_insn_start(cpu, db);
168         }
169 
170         /*
171          * Disassemble one instruction.  The translate_insn hook should
172          * update db->pc_next and db->is_jmp to indicate what should be
173          * done next -- either exiting this loop or locate the start of
174          * the next instruction.
175          */
176         ops->translate_insn(db, cpu);
177 
178         /*
179          * We can't instrument after instructions that change control
180          * flow although this only really affects post-load operations.
181          *
182          * Calling plugin_gen_insn_end() before we possibly stop translation
183          * is important. Even if this ends up as dead code, plugin generation
184          * needs to see a matching plugin_gen_insn_{start,end}() pair in order
185          * to accurately track instrumented helpers that might access memory.
186          */
187         if (plugin_enabled) {
188             plugin_gen_insn_end();
189         }
190 
191         /* Stop translation if translate_insn so indicated.  */
192         if (db->is_jmp != DISAS_NEXT) {
193             break;
194         }
195 
196         /* Stop translation if the output buffer is full,
197            or we have executed all of the allowed instructions.  */
198         if (tcg_op_buf_full() || db->num_insns >= db->max_insns) {
199             db->is_jmp = DISAS_TOO_MANY;
200             break;
201         }
202     }
203 
204     /* Emit code to exit the TB, as indicated by db->is_jmp.  */
205     ops->tb_stop(db, cpu);
206     gen_tb_end(tb, cflags, icount_start_insn, db->num_insns);
207 
208     /*
209      * Manage can_do_io for the translation block: set to false before
210      * the first insn and set to true before the last insn.
211      */
212     if (db->num_insns == 1) {
213         tcg_debug_assert(first_insn_start == db->insn_start);
214     } else {
215         tcg_debug_assert(first_insn_start != db->insn_start);
216         tcg_ctx->emit_before_op = first_insn_start;
217         set_can_do_io(db, false);
218     }
219     tcg_ctx->emit_before_op = db->insn_start;
220     set_can_do_io(db, true);
221     tcg_ctx->emit_before_op = NULL;
222 
223     /* May be used by disas_log or plugin callbacks. */
224     tb->size = db->pc_next - db->pc_first;
225     tb->icount = db->num_insns;
226 
227     if (plugin_enabled) {
228         plugin_gen_tb_end(cpu, db->num_insns);
229     }
230 
231     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
232         && qemu_log_in_addr_range(db->pc_first)) {
233         FILE *logfile = qemu_log_trylock();
234         if (logfile) {
235             fprintf(logfile, "----------------\n");
236 
237             if (!ops->disas_log ||
238                 !ops->disas_log(db, cpu, logfile)) {
239                 fprintf(logfile, "IN: %s\n", lookup_symbol(db->pc_first));
240                 target_disas(logfile, cpu, db);
241             }
242             fprintf(logfile, "\n");
243             qemu_log_unlock(logfile);
244         }
245     }
246 }
247 
translator_ld(CPUArchState * env,DisasContextBase * db,void * dest,vaddr pc,size_t len)248 static bool translator_ld(CPUArchState *env, DisasContextBase *db,
249                           void *dest, vaddr pc, size_t len)
250 {
251     TranslationBlock *tb = db->tb;
252     vaddr last = pc + len - 1;
253     void *host;
254     vaddr base;
255 
256     /* Use slow path if first page is MMIO. */
257     if (unlikely(tb_page_addr0(tb) == -1)) {
258         /* We capped translation with first page MMIO in tb_gen_code. */
259         tcg_debug_assert(db->max_insns == 1);
260         return false;
261     }
262 
263     host = db->host_addr[0];
264     base = db->pc_first;
265 
266     if (likely(((base ^ last) & TARGET_PAGE_MASK) == 0)) {
267         /* Entire read is from the first page. */
268         goto do_read;
269     }
270 
271     if (unlikely(((base ^ pc) & TARGET_PAGE_MASK) == 0)) {
272         /*
273          * Read begins on the first page and extends to the second.
274          * The unaligned read is never atomic.
275          */
276         size_t len0 = -(pc | TARGET_PAGE_MASK);
277         memcpy(dest, host + (pc - base), len0);
278         pc += len0;
279         dest += len0;
280         len -= len0;
281     }
282 
283     /*
284      * The read must conclude on the second page and not extend to a third.
285      *
286      * TODO: We could allow the two pages to be virtually discontiguous,
287      * since we already allow the two pages to be physically discontiguous.
288      * The only reasonable use case would be executing an insn at the end
289      * of the address space wrapping around to the beginning.  For that,
290      * we would need to know the current width of the address space.
291      * In the meantime, assert.
292      */
293     base = (base & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
294     assert(((base ^ pc) & TARGET_PAGE_MASK) == 0);
295     assert(((base ^ last) & TARGET_PAGE_MASK) == 0);
296     host = db->host_addr[1];
297 
298     if (host == NULL) {
299         tb_page_addr_t page0, old_page1, new_page1;
300 
301         new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]);
302 
303         /*
304          * If the second page is MMIO, treat as if the first page
305          * was MMIO as well, so that we do not cache the TB.
306          */
307         if (unlikely(new_page1 == -1)) {
308             tb_unlock_pages(tb);
309             tb_set_page_addr0(tb, -1);
310             /* Require that this be the final insn. */
311             db->max_insns = db->num_insns;
312             return false;
313         }
314 
315         /*
316          * If this is not the first time around, and page1 matches,
317          * then we already have the page locked.  Alternately, we're
318          * not doing anything to prevent the PTE from changing, so
319          * we might wind up with a different page, requiring us to
320          * re-do the locking.
321          */
322         old_page1 = tb_page_addr1(tb);
323         if (likely(new_page1 != old_page1)) {
324             page0 = tb_page_addr0(tb);
325             if (unlikely(old_page1 != -1)) {
326                 tb_unlock_page1(page0, old_page1);
327             }
328             tb_set_page_addr1(tb, new_page1);
329             tb_lock_page1(page0, new_page1);
330         }
331         host = db->host_addr[1];
332     }
333 
334  do_read:
335     /*
336      * Assume aligned reads should be atomic, if possible.
337      * We're not in a position to jump out with EXCP_ATOMIC.
338      */
339     host += pc - base;
340     switch (len) {
341     case 2:
342         if (QEMU_IS_ALIGNED(pc, 2)) {
343             uint16_t t = qatomic_read((uint16_t *)host);
344             stw_he_p(dest, t);
345             return true;
346         }
347         break;
348     case 4:
349         if (QEMU_IS_ALIGNED(pc, 4)) {
350             uint32_t t = qatomic_read((uint32_t *)host);
351             stl_he_p(dest, t);
352             return true;
353         }
354         break;
355 #ifdef CONFIG_ATOMIC64
356     case 8:
357         if (QEMU_IS_ALIGNED(pc, 8)) {
358             uint64_t t = qatomic_read__nocheck((uint64_t *)host);
359             stq_he_p(dest, t);
360             return true;
361         }
362         break;
363 #endif
364     }
365     /* Unaligned or partial read from the second page is not atomic. */
366     memcpy(dest, host, len);
367     return true;
368 }
369 
record_save(DisasContextBase * db,vaddr pc,const void * from,int size)370 static void record_save(DisasContextBase *db, vaddr pc,
371                         const void *from, int size)
372 {
373     int offset;
374 
375     /* Do not record probes before the start of TB. */
376     if (pc < db->pc_first) {
377         return;
378     }
379 
380     /*
381      * In translator_access, we verified that pc is within 2 pages
382      * of pc_first, thus this will never overflow.
383      */
384     offset = pc - db->pc_first;
385 
386     /*
387      * Either the first or second page may be I/O.  If it is the second,
388      * then the first byte we need to record will be at a non-zero offset.
389      * In either case, we should not need to record but a single insn.
390      */
391     if (db->record_len == 0) {
392         db->record_start = offset;
393         db->record_len = size;
394     } else {
395         assert(offset == db->record_start + db->record_len);
396         assert(db->record_len + size <= sizeof(db->record));
397         db->record_len += size;
398     }
399 
400     memcpy(db->record + (offset - db->record_start), from, size);
401 }
402 
translator_st_len(const DisasContextBase * db)403 size_t translator_st_len(const DisasContextBase *db)
404 {
405     return db->fake_insn ? db->record_len : db->tb->size;
406 }
407 
translator_st(const DisasContextBase * db,void * dest,vaddr addr,size_t len)408 bool translator_st(const DisasContextBase *db, void *dest,
409                    vaddr addr, size_t len)
410 {
411     size_t offset, offset_end;
412 
413     if (addr < db->pc_first) {
414         return false;
415     }
416     offset = addr - db->pc_first;
417     offset_end = offset + len;
418     if (offset_end > translator_st_len(db)) {
419         return false;
420     }
421 
422     if (!db->fake_insn) {
423         size_t offset_page1 = -(db->pc_first | TARGET_PAGE_MASK);
424 
425         /* Get all the bytes from the first page. */
426         if (db->host_addr[0]) {
427             if (offset_end <= offset_page1) {
428                 memcpy(dest, db->host_addr[0] + offset, len);
429                 return true;
430             }
431             if (offset < offset_page1) {
432                 size_t len0 = offset_page1 - offset;
433                 memcpy(dest, db->host_addr[0] + offset, len0);
434                 offset += len0;
435                 dest += len0;
436             }
437         }
438 
439         /* Get any bytes from the second page. */
440         if (db->host_addr[1] && offset >= offset_page1) {
441             memcpy(dest, db->host_addr[1] + (offset - offset_page1),
442                    offset_end - offset);
443             return true;
444         }
445     }
446 
447     /* Else get recorded bytes. */
448     if (db->record_len != 0 &&
449         offset >= db->record_start &&
450         offset_end <= db->record_start + db->record_len) {
451         memcpy(dest, db->record + (offset - db->record_start),
452                offset_end - offset);
453         return true;
454     }
455     return false;
456 }
457 
translator_ldub(CPUArchState * env,DisasContextBase * db,vaddr pc)458 uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc)
459 {
460     uint8_t val;
461 
462     if (!translator_ld(env, db, &val, pc, sizeof(val))) {
463         MemOpIdx oi = make_memop_idx(MO_UB, db->code_mmuidx);
464         val = cpu_ldb_code_mmu(env, pc, oi, 0);
465         record_save(db, pc, &val, sizeof(val));
466     }
467     return val;
468 }
469 
translator_lduw_end(CPUArchState * env,DisasContextBase * db,vaddr pc,MemOp endian)470 uint16_t translator_lduw_end(CPUArchState *env, DisasContextBase *db,
471                              vaddr pc, MemOp endian)
472 {
473     uint16_t val;
474 
475     if (!translator_ld(env, db, &val, pc, sizeof(val))) {
476         MemOpIdx oi = make_memop_idx(MO_UW, db->code_mmuidx);
477         val = cpu_ldw_code_mmu(env, pc, oi, 0);
478         record_save(db, pc, &val, sizeof(val));
479     }
480     if (endian & MO_BSWAP) {
481         val = bswap16(val);
482     }
483     return val;
484 }
485 
translator_ldl_end(CPUArchState * env,DisasContextBase * db,vaddr pc,MemOp endian)486 uint32_t translator_ldl_end(CPUArchState *env, DisasContextBase *db,
487                             vaddr pc, MemOp endian)
488 {
489     uint32_t val;
490 
491     if (!translator_ld(env, db, &val, pc, sizeof(val))) {
492         MemOpIdx oi = make_memop_idx(MO_UL, db->code_mmuidx);
493         val = cpu_ldl_code_mmu(env, pc, oi, 0);
494         record_save(db, pc, &val, sizeof(val));
495     }
496     if (endian & MO_BSWAP) {
497         val = bswap32(val);
498     }
499     return val;
500 }
501 
translator_ldq_end(CPUArchState * env,DisasContextBase * db,vaddr pc,MemOp endian)502 uint64_t translator_ldq_end(CPUArchState *env, DisasContextBase *db,
503                             vaddr pc, MemOp endian)
504 {
505     uint64_t val;
506 
507     if (!translator_ld(env, db, &val, pc, sizeof(val))) {
508         MemOpIdx oi = make_memop_idx(MO_UQ, db->code_mmuidx);
509         val = cpu_ldq_code_mmu(env, pc, oi, 0);
510         record_save(db, pc, &val, sizeof(val));
511     }
512     if (endian & MO_BSWAP) {
513         val = bswap64(val);
514     }
515     return val;
516 }
517 
translator_fake_ld(DisasContextBase * db,const void * data,size_t len)518 void translator_fake_ld(DisasContextBase *db, const void *data, size_t len)
519 {
520     db->fake_insn = true;
521     record_save(db, db->pc_first, data, len);
522 }
523