xref: /qemu/accel/tcg/translate-all.c (revision 072e057ed90d6bbc4f01ac04e627e63f275f57f0)
1  /*
2   *  Host code generation
3   *
4   *  Copyright (c) 2003 Fabrice Bellard
5   *
6   * This library is free software; you can redistribute it and/or
7   * modify it under the terms of the GNU Lesser General Public
8   * License as published by the Free Software Foundation; either
9   * version 2.1 of the License, or (at your option) any later version.
10   *
11   * This library is distributed in the hope that it will be useful,
12   * but WITHOUT ANY WARRANTY; without even the implied warranty of
13   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   * Lesser General Public License for more details.
15   *
16   * You should have received a copy of the GNU Lesser General Public
17   * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18   */
19  
20  #include "qemu/osdep.h"
21  
22  #include "trace.h"
23  #include "disas/disas.h"
24  #include "exec/exec-all.h"
25  #include "tcg/tcg.h"
26  #if defined(CONFIG_USER_ONLY)
27  #include "qemu.h"
28  #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
29  #include <sys/param.h>
30  #if __FreeBSD_version >= 700104
31  #define HAVE_KINFO_GETVMMAP
32  #define sigqueue sigqueue_freebsd  /* avoid redefinition */
33  #include <sys/proc.h>
34  #include <machine/profile.h>
35  #define _KERNEL
36  #include <sys/user.h>
37  #undef _KERNEL
38  #undef sigqueue
39  #include <libutil.h>
40  #endif
41  #endif
42  #else
43  #include "system/ram_addr.h"
44  #endif
45  
46  #include "cpu-param.h"
47  #include "exec/cputlb.h"
48  #include "exec/page-protection.h"
49  #include "exec/mmap-lock.h"
50  #include "tb-internal.h"
51  #include "tlb-bounds.h"
52  #include "exec/translator.h"
53  #include "exec/tb-flush.h"
54  #include "qemu/bitmap.h"
55  #include "qemu/qemu-print.h"
56  #include "qemu/main-loop.h"
57  #include "qemu/cacheinfo.h"
58  #include "qemu/timer.h"
59  #include "exec/log.h"
60  #include "exec/icount.h"
61  #include "system/tcg.h"
62  #include "qapi/error.h"
63  #include "accel/tcg/cpu-ops.h"
64  #include "tb-jmp-cache.h"
65  #include "tb-hash.h"
66  #include "tb-context.h"
67  #include "tb-internal.h"
68  #include "internal-common.h"
69  #include "internal-target.h"
70  #include "tcg/perf.h"
71  #include "tcg/insn-start-words.h"
72  #include "cpu.h"
73  
74  TBContext tb_ctx;
75  
76  /*
77   * Encode VAL as a signed leb128 sequence at P.
78   * Return P incremented past the encoded value.
79   */
80  static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
81  {
82      int more, byte;
83  
84      do {
85          byte = val & 0x7f;
86          val >>= 7;
87          more = !((val == 0 && (byte & 0x40) == 0)
88                   || (val == -1 && (byte & 0x40) != 0));
89          if (more) {
90              byte |= 0x80;
91          }
92          *p++ = byte;
93      } while (more);
94  
95      return p;
96  }
97  
98  /*
99   * Decode a signed leb128 sequence at *PP; increment *PP past the
100   * decoded value.  Return the decoded value.
101   */
102  static int64_t decode_sleb128(const uint8_t **pp)
103  {
104      const uint8_t *p = *pp;
105      int64_t val = 0;
106      int byte, shift = 0;
107  
108      do {
109          byte = *p++;
110          val |= (int64_t)(byte & 0x7f) << shift;
111          shift += 7;
112      } while (byte & 0x80);
113      if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
114          val |= -(int64_t)1 << shift;
115      }
116  
117      *pp = p;
118      return val;
119  }
120  
121  /* Encode the data collected about the instructions while compiling TB.
122     Place the data at BLOCK, and return the number of bytes consumed.
123  
124     The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
125     which come from the target's insn_start data, followed by a uintptr_t
126     which comes from the host pc of the end of the code implementing the insn.
127  
128     Each line of the table is encoded as sleb128 deltas from the previous
129     line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
130     That is, the first column is seeded with the guest pc, the last column
131     with the host pc, and the middle columns with zeros.  */
132  
133  static int encode_search(TranslationBlock *tb, uint8_t *block)
134  {
135      uint8_t *highwater = tcg_ctx->code_gen_highwater;
136      uint64_t *insn_data = tcg_ctx->gen_insn_data;
137      uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off;
138      uint8_t *p = block;
139      int i, j, n;
140  
141      for (i = 0, n = tb->icount; i < n; ++i) {
142          uint64_t prev, curr;
143  
144          for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
145              if (i == 0) {
146                  prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
147              } else {
148                  prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
149              }
150              curr = insn_data[i * TARGET_INSN_START_WORDS + j];
151              p = encode_sleb128(p, curr - prev);
152          }
153          prev = (i == 0 ? 0 : insn_end_off[i - 1]);
154          curr = insn_end_off[i];
155          p = encode_sleb128(p, curr - prev);
156  
157          /* Test for (pending) buffer overflow.  The assumption is that any
158             one row beginning below the high water mark cannot overrun
159             the buffer completely.  Thus we can test for overflow after
160             encoding a row without having to check during encoding.  */
161          if (unlikely(p > highwater)) {
162              return -1;
163          }
164      }
165  
166      return p - block;
167  }
168  
169  static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
170                                     uint64_t *data)
171  {
172      uintptr_t iter_pc = (uintptr_t)tb->tc.ptr;
173      const uint8_t *p = tb->tc.ptr + tb->tc.size;
174      int i, j, num_insns = tb->icount;
175  
176      host_pc -= GETPC_ADJ;
177  
178      if (host_pc < iter_pc) {
179          return -1;
180      }
181  
182      memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
183      if (!(tb_cflags(tb) & CF_PCREL)) {
184          data[0] = tb->pc;
185      }
186  
187      /*
188       * Reconstruct the stored insn data while looking for the point
189       * at which the end of the insn exceeds host_pc.
190       */
191      for (i = 0; i < num_insns; ++i) {
192          for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
193              data[j] += decode_sleb128(&p);
194          }
195          iter_pc += decode_sleb128(&p);
196          if (iter_pc > host_pc) {
197              return num_insns - i;
198          }
199      }
200      return -1;
201  }
202  
203  /*
204   * The cpu state corresponding to 'host_pc' is restored in
205   * preparation for exiting the TB.
206   */
207  void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
208                                 uintptr_t host_pc)
209  {
210      uint64_t data[TARGET_INSN_START_WORDS];
211      int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
212  
213      if (insns_left < 0) {
214          return;
215      }
216  
217      if (tb_cflags(tb) & CF_USE_ICOUNT) {
218          assert(icount_enabled());
219          /*
220           * Reset the cycle counter to the start of the block and
221           * shift if to the number of actually executed instructions.
222           */
223          cpu->neg.icount_decr.u16.low += insns_left;
224      }
225  
226      cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
227  }
228  
229  bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
230  {
231      /*
232       * The host_pc has to be in the rx region of the code buffer.
233       * If it is not we will not be able to resolve it here.
234       * The two cases where host_pc will not be correct are:
235       *
236       *  - fault during translation (instruction fetch)
237       *  - fault from helper (not using GETPC() macro)
238       *
239       * Either way we need return early as we can't resolve it here.
240       */
241      if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
242          TranslationBlock *tb = tcg_tb_lookup(host_pc);
243          if (tb) {
244              cpu_restore_state_from_tb(cpu, tb, host_pc);
245              return true;
246          }
247      }
248      return false;
249  }
250  
251  bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data)
252  {
253      if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
254          TranslationBlock *tb = tcg_tb_lookup(host_pc);
255          if (tb) {
256              return cpu_unwind_data_from_tb(tb, host_pc, data) >= 0;
257          }
258      }
259      return false;
260  }
261  
262  void page_init(void)
263  {
264      page_table_config_init();
265  }
266  
267  /*
268   * Isolate the portion of code gen which can setjmp/longjmp.
269   * Return the size of the generated code, or negative on error.
270   */
271  static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
272                             vaddr pc, void *host_pc,
273                             int *max_insns, int64_t *ti)
274  {
275      int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
276      if (unlikely(ret != 0)) {
277          return ret;
278      }
279  
280      tcg_func_start(tcg_ctx);
281  
282      CPUState *cs = env_cpu(env);
283      tcg_ctx->cpu = cs;
284      cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
285  
286      assert(tb->size != 0);
287      tcg_ctx->cpu = NULL;
288      *max_insns = tb->icount;
289  
290      return tcg_gen_code(tcg_ctx, tb, pc);
291  }
292  
293  /* Called with mmap_lock held for user mode emulation.  */
294  TranslationBlock *tb_gen_code(CPUState *cpu,
295                                vaddr pc, uint64_t cs_base,
296                                uint32_t flags, int cflags)
297  {
298      CPUArchState *env = cpu_env(cpu);
299      TranslationBlock *tb, *existing_tb;
300      tb_page_addr_t phys_pc, phys_p2;
301      tcg_insn_unit *gen_code_buf;
302      int gen_code_size, search_size, max_insns;
303      int64_t ti;
304      void *host_pc;
305  
306      assert_memory_lock();
307      qemu_thread_jit_write();
308  
309      phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
310  
311      if (phys_pc == -1) {
312          /* Generate a one-shot TB with 1 insn in it */
313          cflags = (cflags & ~CF_COUNT_MASK) | 1;
314      }
315  
316      max_insns = cflags & CF_COUNT_MASK;
317      if (max_insns == 0) {
318          max_insns = TCG_MAX_INSNS;
319      }
320      QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
321  
322   buffer_overflow:
323      assert_no_pages_locked();
324      tb = tcg_tb_alloc(tcg_ctx);
325      if (unlikely(!tb)) {
326          /* flush must be done */
327          tb_flush(cpu);
328          mmap_unlock();
329          /* Make the execution loop process the flush as soon as possible.  */
330          cpu->exception_index = EXCP_INTERRUPT;
331          cpu_loop_exit(cpu);
332      }
333  
334      gen_code_buf = tcg_ctx->code_gen_ptr;
335      tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
336      if (!(cflags & CF_PCREL)) {
337          tb->pc = pc;
338      }
339      tb->cs_base = cs_base;
340      tb->flags = flags;
341      tb->cflags = cflags;
342      tb_set_page_addr0(tb, phys_pc);
343      tb_set_page_addr1(tb, -1);
344      if (phys_pc != -1) {
345          tb_lock_page0(phys_pc);
346      }
347  
348      tcg_ctx->gen_tb = tb;
349      tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
350  #ifdef CONFIG_SOFTMMU
351      tcg_ctx->page_bits = TARGET_PAGE_BITS;
352      tcg_ctx->page_mask = TARGET_PAGE_MASK;
353      tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
354  #endif
355      tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
356      tcg_ctx->guest_mo = cpu->cc->tcg_ops->guest_default_memory_order;
357  
358   restart_translate:
359      trace_translate_block(tb, pc, tb->tc.ptr);
360  
361      gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
362      if (unlikely(gen_code_size < 0)) {
363          switch (gen_code_size) {
364          case -1:
365              /*
366               * Overflow of code_gen_buffer, or the current slice of it.
367               *
368               * TODO: We don't need to re-do tcg_ops->translate_code, nor
369               * should we re-do the tcg optimization currently hidden
370               * inside tcg_gen_code.  All that should be required is to
371               * flush the TBs, allocate a new TB, re-initialize it per
372               * above, and re-do the actual code generation.
373               */
374              qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
375                            "Restarting code generation for "
376                            "code_gen_buffer overflow\n");
377              tb_unlock_pages(tb);
378              tcg_ctx->gen_tb = NULL;
379              goto buffer_overflow;
380  
381          case -2:
382              /*
383               * The code generated for the TranslationBlock is too large.
384               * The maximum size allowed by the unwind info is 64k.
385               * There may be stricter constraints from relocations
386               * in the tcg backend.
387               *
388               * Try again with half as many insns as we attempted this time.
389               * If a single insn overflows, there's a bug somewhere...
390               */
391              assert(max_insns > 1);
392              max_insns /= 2;
393              qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
394                            "Restarting code generation with "
395                            "smaller translation block (max %d insns)\n",
396                            max_insns);
397  
398              /*
399               * The half-sized TB may not cross pages.
400               * TODO: Fix all targets that cross pages except with
401               * the first insn, at which point this can't be reached.
402               */
403              phys_p2 = tb_page_addr1(tb);
404              if (unlikely(phys_p2 != -1)) {
405                  tb_unlock_page1(phys_pc, phys_p2);
406                  tb_set_page_addr1(tb, -1);
407              }
408              goto restart_translate;
409  
410          case -3:
411              /*
412               * We had a page lock ordering problem.  In order to avoid
413               * deadlock we had to drop the lock on page0, which means
414               * that everything we translated so far is compromised.
415               * Restart with locks held on both pages.
416               */
417              qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
418                            "Restarting code generation with re-locked pages");
419              goto restart_translate;
420  
421          default:
422              g_assert_not_reached();
423          }
424      }
425      tcg_ctx->gen_tb = NULL;
426  
427      search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
428      if (unlikely(search_size < 0)) {
429          tb_unlock_pages(tb);
430          goto buffer_overflow;
431      }
432      tb->tc.size = gen_code_size;
433  
434      /*
435       * For CF_PCREL, attribute all executions of the generated code
436       * to its first mapping.
437       */
438      perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
439  
440      if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
441          qemu_log_in_addr_range(pc)) {
442          FILE *logfile = qemu_log_trylock();
443          if (logfile) {
444              int code_size, data_size;
445              const tcg_target_ulong *rx_data_gen_ptr;
446              size_t chunk_start;
447              int insn = 0;
448  
449              if (tcg_ctx->data_gen_ptr) {
450                  rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
451                  code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
452                  data_size = gen_code_size - code_size;
453              } else {
454                  rx_data_gen_ptr = 0;
455                  code_size = gen_code_size;
456                  data_size = 0;
457              }
458  
459              /* Dump header and the first instruction */
460              fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
461              fprintf(logfile,
462                      "  -- guest addr 0x%016" PRIx64 " + tb prologue\n",
463                      tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
464              chunk_start = tcg_ctx->gen_insn_end_off[insn];
465              disas(logfile, tb->tc.ptr, chunk_start);
466  
467              /*
468               * Dump each instruction chunk, wrapping up empty chunks into
469               * the next instruction. The whole array is offset so the
470               * first entry is the beginning of the 2nd instruction.
471               */
472              while (insn < tb->icount) {
473                  size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
474                  if (chunk_end > chunk_start) {
475                      fprintf(logfile, "  -- guest addr 0x%016" PRIx64 "\n",
476                              tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
477                      disas(logfile, tb->tc.ptr + chunk_start,
478                            chunk_end - chunk_start);
479                      chunk_start = chunk_end;
480                  }
481                  insn++;
482              }
483  
484              if (chunk_start < code_size) {
485                  fprintf(logfile, "  -- tb slow paths + alignment\n");
486                  disas(logfile, tb->tc.ptr + chunk_start,
487                        code_size - chunk_start);
488              }
489  
490              /* Finally dump any data we may have after the block */
491              if (data_size) {
492                  int i;
493                  fprintf(logfile, "  data: [size=%d]\n", data_size);
494                  for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
495                      if (sizeof(tcg_target_ulong) == 8) {
496                          fprintf(logfile,
497                                  "0x%08" PRIxPTR ":  .quad  0x%016" TCG_PRIlx "\n",
498                                  (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
499                      } else if (sizeof(tcg_target_ulong) == 4) {
500                          fprintf(logfile,
501                                  "0x%08" PRIxPTR ":  .long  0x%08" TCG_PRIlx "\n",
502                                  (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
503                      } else {
504                          qemu_build_not_reached();
505                      }
506                  }
507              }
508              fprintf(logfile, "\n");
509              qemu_log_unlock(logfile);
510          }
511      }
512  
513      qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
514          ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
515                   CODE_GEN_ALIGN));
516  
517      /* init jump list */
518      qemu_spin_init(&tb->jmp_lock);
519      tb->jmp_list_head = (uintptr_t)NULL;
520      tb->jmp_list_next[0] = (uintptr_t)NULL;
521      tb->jmp_list_next[1] = (uintptr_t)NULL;
522      tb->jmp_dest[0] = (uintptr_t)NULL;
523      tb->jmp_dest[1] = (uintptr_t)NULL;
524  
525      /* init original jump addresses which have been set during tcg_gen_code() */
526      if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
527          tb_reset_jump(tb, 0);
528      }
529      if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
530          tb_reset_jump(tb, 1);
531      }
532  
533      /*
534       * Insert TB into the corresponding region tree before publishing it
535       * through QHT. Otherwise rewinding happened in the TB might fail to
536       * lookup itself using host PC.
537       */
538      tcg_tb_insert(tb);
539  
540      /*
541       * If the TB is not associated with a physical RAM page then it must be
542       * a temporary one-insn TB.
543       *
544       * Such TBs must be added to region trees in order to make sure that
545       * restore_state_to_opc() - which on some architectures is not limited to
546       * rewinding, but also affects exception handling! - is called when such a
547       * TB causes an exception.
548       *
549       * At the same time, temporary one-insn TBs must be executed at most once,
550       * because subsequent reads from, e.g., I/O memory may return different
551       * values. So return early before attempting to link to other TBs or add
552       * to the QHT.
553       */
554      if (tb_page_addr0(tb) == -1) {
555          assert_no_pages_locked();
556          return tb;
557      }
558  
559      /*
560       * No explicit memory barrier is required -- tb_link_page() makes the
561       * TB visible in a consistent state.
562       */
563      existing_tb = tb_link_page(tb);
564      assert_no_pages_locked();
565  
566      /* if the TB already exists, discard what we just translated */
567      if (unlikely(existing_tb != tb)) {
568          uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
569  
570          orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
571          qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
572          tcg_tb_remove(tb);
573          return existing_tb;
574      }
575      return tb;
576  }
577  
578  /* user-mode: call with mmap_lock held */
579  void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
580  {
581      TranslationBlock *tb;
582  
583      assert_memory_lock();
584  
585      tb = tcg_tb_lookup(retaddr);
586      if (tb) {
587          /* We can use retranslation to find the PC.  */
588          cpu_restore_state_from_tb(cpu, tb, retaddr);
589          tb_phys_invalidate(tb, -1);
590      } else {
591          /* The exception probably happened in a helper.  The CPU state should
592             have been saved before calling it. Fetch the PC from there.  */
593          CPUArchState *env = cpu_env(cpu);
594          vaddr pc;
595          uint64_t cs_base;
596          tb_page_addr_t addr;
597          uint32_t flags;
598  
599          cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
600          addr = get_page_addr_code(env, pc);
601          if (addr != -1) {
602              tb_invalidate_phys_range(cpu, addr, addr);
603          }
604      }
605  }
606  
607  #ifndef CONFIG_USER_ONLY
608  /*
609   * In deterministic execution mode, instructions doing device I/Os
610   * must be at the end of the TB.
611   *
612   * Called by softmmu_template.h, with iothread mutex not held.
613   */
614  void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
615  {
616      TranslationBlock *tb;
617      CPUClass *cc;
618      uint32_t n;
619  
620      tb = tcg_tb_lookup(retaddr);
621      if (!tb) {
622          cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
623                    (void *)retaddr);
624      }
625      cpu_restore_state_from_tb(cpu, tb, retaddr);
626  
627      /*
628       * Some guests must re-execute the branch when re-executing a delay
629       * slot instruction.  When this is the case, adjust icount and N
630       * to account for the re-execution of the branch.
631       */
632      n = 1;
633      cc = cpu->cc;
634      if (cc->tcg_ops->io_recompile_replay_branch &&
635          cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
636          cpu->neg.icount_decr.u16.low++;
637          n = 2;
638      }
639  
640      /*
641       * Exit the loop and potentially generate a new TB executing the
642       * just the I/O insns. We also limit instrumentation to memory
643       * operations only (which execute after completion) so we don't
644       * double instrument the instruction. Also don't let an IRQ sneak
645       * in before we execute it.
646       */
647      cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_NOIRQ | n;
648  
649      if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
650          vaddr pc = cpu->cc->get_pc(cpu);
651          if (qemu_log_in_addr_range(pc)) {
652              qemu_log("cpu_io_recompile: rewound execution of TB to %016"
653                       VADDR_PRIx "\n", pc);
654          }
655      }
656  
657      cpu_loop_exit_noexc(cpu);
658  }
659  
660  #endif /* CONFIG_USER_ONLY */
661  
662  /*
663   * Called by generic code at e.g. cpu reset after cpu creation,
664   * therefore we must be prepared to allocate the jump cache.
665   */
666  void tcg_flush_jmp_cache(CPUState *cpu)
667  {
668      CPUJumpCache *jc = cpu->tb_jmp_cache;
669  
670      /* During early initialization, the cache may not yet be allocated. */
671      if (unlikely(jc == NULL)) {
672          return;
673      }
674  
675      for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
676          qatomic_set(&jc->array[i].tb, NULL);
677      }
678  }
679