xref: /qemu/accel/tcg/translate-all.c (revision 4705a71db5909ac5586e87397b2dece132b9e330)
1  /*
2   *  Host code generation
3   *
4   *  Copyright (c) 2003 Fabrice Bellard
5   *
6   * This library is free software; you can redistribute it and/or
7   * modify it under the terms of the GNU Lesser General Public
8   * License as published by the Free Software Foundation; either
9   * version 2.1 of the License, or (at your option) any later version.
10   *
11   * This library is distributed in the hope that it will be useful,
12   * but WITHOUT ANY WARRANTY; without even the implied warranty of
13   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   * Lesser General Public License for more details.
15   *
16   * You should have received a copy of the GNU Lesser General Public
17   * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18   */
19  
20  #include "qemu/osdep.h"
21  
22  #include "trace.h"
23  #include "disas/disas.h"
24  #include "exec/exec-all.h"
25  #include "tcg/tcg.h"
26  #if defined(CONFIG_USER_ONLY)
27  #include "qemu.h"
28  #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
29  #include <sys/param.h>
30  #if __FreeBSD_version >= 700104
31  #define HAVE_KINFO_GETVMMAP
32  #define sigqueue sigqueue_freebsd  /* avoid redefinition */
33  #include <sys/proc.h>
34  #include <machine/profile.h>
35  #define _KERNEL
36  #include <sys/user.h>
37  #undef _KERNEL
38  #undef sigqueue
39  #include <libutil.h>
40  #endif
41  #endif
42  #else
43  #include "system/ram_addr.h"
44  #endif
45  
46  #include "exec/cputlb.h"
47  #include "exec/page-protection.h"
48  #include "exec/mmap-lock.h"
49  #include "tb-internal.h"
50  #include "exec/translator.h"
51  #include "exec/tb-flush.h"
52  #include "qemu/bitmap.h"
53  #include "qemu/qemu-print.h"
54  #include "qemu/main-loop.h"
55  #include "qemu/cacheinfo.h"
56  #include "qemu/timer.h"
57  #include "exec/log.h"
58  #include "system/cpu-timers.h"
59  #include "system/tcg.h"
60  #include "qapi/error.h"
61  #include "accel/tcg/cpu-ops.h"
62  #include "tb-jmp-cache.h"
63  #include "tb-hash.h"
64  #include "tb-context.h"
65  #include "tb-internal.h"
66  #include "internal-common.h"
67  #include "internal-target.h"
68  #include "tcg/perf.h"
69  #include "tcg/insn-start-words.h"
70  
71  TBContext tb_ctx;
72  
73  /*
74   * Encode VAL as a signed leb128 sequence at P.
75   * Return P incremented past the encoded value.
76   */
77  static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
78  {
79      int more, byte;
80  
81      do {
82          byte = val & 0x7f;
83          val >>= 7;
84          more = !((val == 0 && (byte & 0x40) == 0)
85                   || (val == -1 && (byte & 0x40) != 0));
86          if (more) {
87              byte |= 0x80;
88          }
89          *p++ = byte;
90      } while (more);
91  
92      return p;
93  }
94  
95  /*
96   * Decode a signed leb128 sequence at *PP; increment *PP past the
97   * decoded value.  Return the decoded value.
98   */
99  static int64_t decode_sleb128(const uint8_t **pp)
100  {
101      const uint8_t *p = *pp;
102      int64_t val = 0;
103      int byte, shift = 0;
104  
105      do {
106          byte = *p++;
107          val |= (int64_t)(byte & 0x7f) << shift;
108          shift += 7;
109      } while (byte & 0x80);
110      if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
111          val |= -(int64_t)1 << shift;
112      }
113  
114      *pp = p;
115      return val;
116  }
117  
118  /* Encode the data collected about the instructions while compiling TB.
119     Place the data at BLOCK, and return the number of bytes consumed.
120  
121     The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
122     which come from the target's insn_start data, followed by a uintptr_t
123     which comes from the host pc of the end of the code implementing the insn.
124  
125     Each line of the table is encoded as sleb128 deltas from the previous
126     line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
127     That is, the first column is seeded with the guest pc, the last column
128     with the host pc, and the middle columns with zeros.  */
129  
130  static int encode_search(TranslationBlock *tb, uint8_t *block)
131  {
132      uint8_t *highwater = tcg_ctx->code_gen_highwater;
133      uint64_t *insn_data = tcg_ctx->gen_insn_data;
134      uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off;
135      uint8_t *p = block;
136      int i, j, n;
137  
138      for (i = 0, n = tb->icount; i < n; ++i) {
139          uint64_t prev, curr;
140  
141          for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
142              if (i == 0) {
143                  prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
144              } else {
145                  prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
146              }
147              curr = insn_data[i * TARGET_INSN_START_WORDS + j];
148              p = encode_sleb128(p, curr - prev);
149          }
150          prev = (i == 0 ? 0 : insn_end_off[i - 1]);
151          curr = insn_end_off[i];
152          p = encode_sleb128(p, curr - prev);
153  
154          /* Test for (pending) buffer overflow.  The assumption is that any
155             one row beginning below the high water mark cannot overrun
156             the buffer completely.  Thus we can test for overflow after
157             encoding a row without having to check during encoding.  */
158          if (unlikely(p > highwater)) {
159              return -1;
160          }
161      }
162  
163      return p - block;
164  }
165  
166  static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
167                                     uint64_t *data)
168  {
169      uintptr_t iter_pc = (uintptr_t)tb->tc.ptr;
170      const uint8_t *p = tb->tc.ptr + tb->tc.size;
171      int i, j, num_insns = tb->icount;
172  
173      host_pc -= GETPC_ADJ;
174  
175      if (host_pc < iter_pc) {
176          return -1;
177      }
178  
179      memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
180      if (!(tb_cflags(tb) & CF_PCREL)) {
181          data[0] = tb->pc;
182      }
183  
184      /*
185       * Reconstruct the stored insn data while looking for the point
186       * at which the end of the insn exceeds host_pc.
187       */
188      for (i = 0; i < num_insns; ++i) {
189          for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
190              data[j] += decode_sleb128(&p);
191          }
192          iter_pc += decode_sleb128(&p);
193          if (iter_pc > host_pc) {
194              return num_insns - i;
195          }
196      }
197      return -1;
198  }
199  
200  /*
201   * The cpu state corresponding to 'host_pc' is restored in
202   * preparation for exiting the TB.
203   */
204  void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
205                                 uintptr_t host_pc)
206  {
207      uint64_t data[TARGET_INSN_START_WORDS];
208      int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
209  
210      if (insns_left < 0) {
211          return;
212      }
213  
214      if (tb_cflags(tb) & CF_USE_ICOUNT) {
215          assert(icount_enabled());
216          /*
217           * Reset the cycle counter to the start of the block and
218           * shift if to the number of actually executed instructions.
219           */
220          cpu->neg.icount_decr.u16.low += insns_left;
221      }
222  
223      cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
224  }
225  
226  bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
227  {
228      /*
229       * The host_pc has to be in the rx region of the code buffer.
230       * If it is not we will not be able to resolve it here.
231       * The two cases where host_pc will not be correct are:
232       *
233       *  - fault during translation (instruction fetch)
234       *  - fault from helper (not using GETPC() macro)
235       *
236       * Either way we need return early as we can't resolve it here.
237       */
238      if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
239          TranslationBlock *tb = tcg_tb_lookup(host_pc);
240          if (tb) {
241              cpu_restore_state_from_tb(cpu, tb, host_pc);
242              return true;
243          }
244      }
245      return false;
246  }
247  
248  bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data)
249  {
250      if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
251          TranslationBlock *tb = tcg_tb_lookup(host_pc);
252          if (tb) {
253              return cpu_unwind_data_from_tb(tb, host_pc, data) >= 0;
254          }
255      }
256      return false;
257  }
258  
259  void page_init(void)
260  {
261      page_table_config_init();
262  }
263  
264  /*
265   * Isolate the portion of code gen which can setjmp/longjmp.
266   * Return the size of the generated code, or negative on error.
267   */
268  static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
269                             vaddr pc, void *host_pc,
270                             int *max_insns, int64_t *ti)
271  {
272      int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
273      if (unlikely(ret != 0)) {
274          return ret;
275      }
276  
277      tcg_func_start(tcg_ctx);
278  
279      CPUState *cs = env_cpu(env);
280      tcg_ctx->cpu = cs;
281      cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
282  
283      assert(tb->size != 0);
284      tcg_ctx->cpu = NULL;
285      *max_insns = tb->icount;
286  
287      return tcg_gen_code(tcg_ctx, tb, pc);
288  }
289  
290  /* Called with mmap_lock held for user mode emulation.  */
291  TranslationBlock *tb_gen_code(CPUState *cpu,
292                                vaddr pc, uint64_t cs_base,
293                                uint32_t flags, int cflags)
294  {
295      CPUArchState *env = cpu_env(cpu);
296      TranslationBlock *tb, *existing_tb;
297      tb_page_addr_t phys_pc, phys_p2;
298      tcg_insn_unit *gen_code_buf;
299      int gen_code_size, search_size, max_insns;
300      int64_t ti;
301      void *host_pc;
302  
303      assert_memory_lock();
304      qemu_thread_jit_write();
305  
306      phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
307  
308      if (phys_pc == -1) {
309          /* Generate a one-shot TB with 1 insn in it */
310          cflags = (cflags & ~CF_COUNT_MASK) | 1;
311      }
312  
313      max_insns = cflags & CF_COUNT_MASK;
314      if (max_insns == 0) {
315          max_insns = TCG_MAX_INSNS;
316      }
317      QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
318  
319   buffer_overflow:
320      assert_no_pages_locked();
321      tb = tcg_tb_alloc(tcg_ctx);
322      if (unlikely(!tb)) {
323          /* flush must be done */
324          tb_flush(cpu);
325          mmap_unlock();
326          /* Make the execution loop process the flush as soon as possible.  */
327          cpu->exception_index = EXCP_INTERRUPT;
328          cpu_loop_exit(cpu);
329      }
330  
331      gen_code_buf = tcg_ctx->code_gen_ptr;
332      tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
333      if (!(cflags & CF_PCREL)) {
334          tb->pc = pc;
335      }
336      tb->cs_base = cs_base;
337      tb->flags = flags;
338      tb->cflags = cflags;
339      tb_set_page_addr0(tb, phys_pc);
340      tb_set_page_addr1(tb, -1);
341      if (phys_pc != -1) {
342          tb_lock_page0(phys_pc);
343      }
344  
345      tcg_ctx->gen_tb = tb;
346      tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
347  #ifdef CONFIG_SOFTMMU
348      tcg_ctx->page_bits = TARGET_PAGE_BITS;
349      tcg_ctx->page_mask = TARGET_PAGE_MASK;
350      tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
351  #endif
352      tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
353  #ifdef TCG_GUEST_DEFAULT_MO
354      tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO;
355  #else
356      tcg_ctx->guest_mo = TCG_MO_ALL;
357  #endif
358  
359   restart_translate:
360      trace_translate_block(tb, pc, tb->tc.ptr);
361  
362      gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
363      if (unlikely(gen_code_size < 0)) {
364          switch (gen_code_size) {
365          case -1:
366              /*
367               * Overflow of code_gen_buffer, or the current slice of it.
368               *
369               * TODO: We don't need to re-do tcg_ops->translate_code, nor
370               * should we re-do the tcg optimization currently hidden
371               * inside tcg_gen_code.  All that should be required is to
372               * flush the TBs, allocate a new TB, re-initialize it per
373               * above, and re-do the actual code generation.
374               */
375              qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
376                            "Restarting code generation for "
377                            "code_gen_buffer overflow\n");
378              tb_unlock_pages(tb);
379              tcg_ctx->gen_tb = NULL;
380              goto buffer_overflow;
381  
382          case -2:
383              /*
384               * The code generated for the TranslationBlock is too large.
385               * The maximum size allowed by the unwind info is 64k.
386               * There may be stricter constraints from relocations
387               * in the tcg backend.
388               *
389               * Try again with half as many insns as we attempted this time.
390               * If a single insn overflows, there's a bug somewhere...
391               */
392              assert(max_insns > 1);
393              max_insns /= 2;
394              qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
395                            "Restarting code generation with "
396                            "smaller translation block (max %d insns)\n",
397                            max_insns);
398  
399              /*
400               * The half-sized TB may not cross pages.
401               * TODO: Fix all targets that cross pages except with
402               * the first insn, at which point this can't be reached.
403               */
404              phys_p2 = tb_page_addr1(tb);
405              if (unlikely(phys_p2 != -1)) {
406                  tb_unlock_page1(phys_pc, phys_p2);
407                  tb_set_page_addr1(tb, -1);
408              }
409              goto restart_translate;
410  
411          case -3:
412              /*
413               * We had a page lock ordering problem.  In order to avoid
414               * deadlock we had to drop the lock on page0, which means
415               * that everything we translated so far is compromised.
416               * Restart with locks held on both pages.
417               */
418              qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
419                            "Restarting code generation with re-locked pages");
420              goto restart_translate;
421  
422          default:
423              g_assert_not_reached();
424          }
425      }
426      tcg_ctx->gen_tb = NULL;
427  
428      search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
429      if (unlikely(search_size < 0)) {
430          tb_unlock_pages(tb);
431          goto buffer_overflow;
432      }
433      tb->tc.size = gen_code_size;
434  
435      /*
436       * For CF_PCREL, attribute all executions of the generated code
437       * to its first mapping.
438       */
439      perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
440  
441      if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
442          qemu_log_in_addr_range(pc)) {
443          FILE *logfile = qemu_log_trylock();
444          if (logfile) {
445              int code_size, data_size;
446              const tcg_target_ulong *rx_data_gen_ptr;
447              size_t chunk_start;
448              int insn = 0;
449  
450              if (tcg_ctx->data_gen_ptr) {
451                  rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
452                  code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
453                  data_size = gen_code_size - code_size;
454              } else {
455                  rx_data_gen_ptr = 0;
456                  code_size = gen_code_size;
457                  data_size = 0;
458              }
459  
460              /* Dump header and the first instruction */
461              fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
462              fprintf(logfile,
463                      "  -- guest addr 0x%016" PRIx64 " + tb prologue\n",
464                      tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
465              chunk_start = tcg_ctx->gen_insn_end_off[insn];
466              disas(logfile, tb->tc.ptr, chunk_start);
467  
468              /*
469               * Dump each instruction chunk, wrapping up empty chunks into
470               * the next instruction. The whole array is offset so the
471               * first entry is the beginning of the 2nd instruction.
472               */
473              while (insn < tb->icount) {
474                  size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
475                  if (chunk_end > chunk_start) {
476                      fprintf(logfile, "  -- guest addr 0x%016" PRIx64 "\n",
477                              tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
478                      disas(logfile, tb->tc.ptr + chunk_start,
479                            chunk_end - chunk_start);
480                      chunk_start = chunk_end;
481                  }
482                  insn++;
483              }
484  
485              if (chunk_start < code_size) {
486                  fprintf(logfile, "  -- tb slow paths + alignment\n");
487                  disas(logfile, tb->tc.ptr + chunk_start,
488                        code_size - chunk_start);
489              }
490  
491              /* Finally dump any data we may have after the block */
492              if (data_size) {
493                  int i;
494                  fprintf(logfile, "  data: [size=%d]\n", data_size);
495                  for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
496                      if (sizeof(tcg_target_ulong) == 8) {
497                          fprintf(logfile,
498                                  "0x%08" PRIxPTR ":  .quad  0x%016" TCG_PRIlx "\n",
499                                  (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
500                      } else if (sizeof(tcg_target_ulong) == 4) {
501                          fprintf(logfile,
502                                  "0x%08" PRIxPTR ":  .long  0x%08" TCG_PRIlx "\n",
503                                  (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
504                      } else {
505                          qemu_build_not_reached();
506                      }
507                  }
508              }
509              fprintf(logfile, "\n");
510              qemu_log_unlock(logfile);
511          }
512      }
513  
514      qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
515          ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
516                   CODE_GEN_ALIGN));
517  
518      /* init jump list */
519      qemu_spin_init(&tb->jmp_lock);
520      tb->jmp_list_head = (uintptr_t)NULL;
521      tb->jmp_list_next[0] = (uintptr_t)NULL;
522      tb->jmp_list_next[1] = (uintptr_t)NULL;
523      tb->jmp_dest[0] = (uintptr_t)NULL;
524      tb->jmp_dest[1] = (uintptr_t)NULL;
525  
526      /* init original jump addresses which have been set during tcg_gen_code() */
527      if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
528          tb_reset_jump(tb, 0);
529      }
530      if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
531          tb_reset_jump(tb, 1);
532      }
533  
534      /*
535       * Insert TB into the corresponding region tree before publishing it
536       * through QHT. Otherwise rewinding happened in the TB might fail to
537       * lookup itself using host PC.
538       */
539      tcg_tb_insert(tb);
540  
541      /*
542       * If the TB is not associated with a physical RAM page then it must be
543       * a temporary one-insn TB.
544       *
545       * Such TBs must be added to region trees in order to make sure that
546       * restore_state_to_opc() - which on some architectures is not limited to
547       * rewinding, but also affects exception handling! - is called when such a
548       * TB causes an exception.
549       *
550       * At the same time, temporary one-insn TBs must be executed at most once,
551       * because subsequent reads from, e.g., I/O memory may return different
552       * values. So return early before attempting to link to other TBs or add
553       * to the QHT.
554       */
555      if (tb_page_addr0(tb) == -1) {
556          assert_no_pages_locked();
557          return tb;
558      }
559  
560      /*
561       * No explicit memory barrier is required -- tb_link_page() makes the
562       * TB visible in a consistent state.
563       */
564      existing_tb = tb_link_page(tb);
565      assert_no_pages_locked();
566  
567      /* if the TB already exists, discard what we just translated */
568      if (unlikely(existing_tb != tb)) {
569          uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
570  
571          orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
572          qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
573          tcg_tb_remove(tb);
574          return existing_tb;
575      }
576      return tb;
577  }
578  
579  /* user-mode: call with mmap_lock held */
580  void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
581  {
582      TranslationBlock *tb;
583  
584      assert_memory_lock();
585  
586      tb = tcg_tb_lookup(retaddr);
587      if (tb) {
588          /* We can use retranslation to find the PC.  */
589          cpu_restore_state_from_tb(cpu, tb, retaddr);
590          tb_phys_invalidate(tb, -1);
591      } else {
592          /* The exception probably happened in a helper.  The CPU state should
593             have been saved before calling it. Fetch the PC from there.  */
594          CPUArchState *env = cpu_env(cpu);
595          vaddr pc;
596          uint64_t cs_base;
597          tb_page_addr_t addr;
598          uint32_t flags;
599  
600          cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
601          addr = get_page_addr_code(env, pc);
602          if (addr != -1) {
603              tb_invalidate_phys_range(addr, addr);
604          }
605      }
606  }
607  
608  #ifndef CONFIG_USER_ONLY
609  /*
610   * In deterministic execution mode, instructions doing device I/Os
611   * must be at the end of the TB.
612   *
613   * Called by softmmu_template.h, with iothread mutex not held.
614   */
615  void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
616  {
617      TranslationBlock *tb;
618      CPUClass *cc;
619      uint32_t n;
620  
621      tb = tcg_tb_lookup(retaddr);
622      if (!tb) {
623          cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
624                    (void *)retaddr);
625      }
626      cpu_restore_state_from_tb(cpu, tb, retaddr);
627  
628      /*
629       * Some guests must re-execute the branch when re-executing a delay
630       * slot instruction.  When this is the case, adjust icount and N
631       * to account for the re-execution of the branch.
632       */
633      n = 1;
634      cc = cpu->cc;
635      if (cc->tcg_ops->io_recompile_replay_branch &&
636          cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
637          cpu->neg.icount_decr.u16.low++;
638          n = 2;
639      }
640  
641      /*
642       * Exit the loop and potentially generate a new TB executing the
643       * just the I/O insns. We also limit instrumentation to memory
644       * operations only (which execute after completion) so we don't
645       * double instrument the instruction. Also don't let an IRQ sneak
646       * in before we execute it.
647       */
648      cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_NOIRQ | n;
649  
650      if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
651          vaddr pc = cpu->cc->get_pc(cpu);
652          if (qemu_log_in_addr_range(pc)) {
653              qemu_log("cpu_io_recompile: rewound execution of TB to %016"
654                       VADDR_PRIx "\n", pc);
655          }
656      }
657  
658      cpu_loop_exit_noexc(cpu);
659  }
660  
661  #endif /* CONFIG_USER_ONLY */
662  
663  /*
664   * Called by generic code at e.g. cpu reset after cpu creation,
665   * therefore we must be prepared to allocate the jump cache.
666   */
667  void tcg_flush_jmp_cache(CPUState *cpu)
668  {
669      CPUJumpCache *jc = cpu->tb_jmp_cache;
670  
671      /* During early initialization, the cache may not yet be allocated. */
672      if (unlikely(jc == NULL)) {
673          return;
674      }
675  
676      for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
677          qatomic_set(&jc->array[i].tb, NULL);
678      }
679  }
680