xref: /qemu/accel/tcg/translate-all.c (revision 3072961b6edc99abfbd87caac3de29bb58a52ccf)
1 /*
2  *  Host code generation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 
22 #include "trace.h"
23 #include "disas/disas.h"
24 #include "tcg/tcg.h"
25 #include "exec/mmap-lock.h"
26 #include "tb-internal.h"
27 #include "exec/tb-flush.h"
28 #include "qemu/cacheinfo.h"
29 #include "qemu/target-info.h"
30 #include "exec/log.h"
31 #include "exec/icount.h"
32 #include "accel/tcg/cpu-ops.h"
33 #include "tb-jmp-cache.h"
34 #include "tb-hash.h"
35 #include "tb-context.h"
36 #include "tb-internal.h"
37 #include "internal-common.h"
38 #include "tcg/perf.h"
39 #include "tcg/insn-start-words.h"
40 
41 TBContext tb_ctx;
42 
43 /*
44  * Encode VAL as a signed leb128 sequence at P.
45  * Return P incremented past the encoded value.
46  */
encode_sleb128(uint8_t * p,int64_t val)47 static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
48 {
49     int more, byte;
50 
51     do {
52         byte = val & 0x7f;
53         val >>= 7;
54         more = !((val == 0 && (byte & 0x40) == 0)
55                  || (val == -1 && (byte & 0x40) != 0));
56         if (more) {
57             byte |= 0x80;
58         }
59         *p++ = byte;
60     } while (more);
61 
62     return p;
63 }
64 
65 /*
66  * Decode a signed leb128 sequence at *PP; increment *PP past the
67  * decoded value.  Return the decoded value.
68  */
decode_sleb128(const uint8_t ** pp)69 static int64_t decode_sleb128(const uint8_t **pp)
70 {
71     const uint8_t *p = *pp;
72     int64_t val = 0;
73     int byte, shift = 0;
74 
75     do {
76         byte = *p++;
77         val |= (int64_t)(byte & 0x7f) << shift;
78         shift += 7;
79     } while (byte & 0x80);
80     if (shift < 64 && (byte & 0x40)) {
81         val |= -(int64_t)1 << shift;
82     }
83 
84     *pp = p;
85     return val;
86 }
87 
88 /* Encode the data collected about the instructions while compiling TB.
89    Place the data at BLOCK, and return the number of bytes consumed.
90 
91    The logical table consists of INSN_START_WORDS uint64_t's,
92    which come from the target's insn_start data, followed by a uintptr_t
93    which comes from the host pc of the end of the code implementing the insn.
94 
95    Each line of the table is encoded as sleb128 deltas from the previous
96    line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
97    That is, the first column is seeded with the guest pc, the last column
98    with the host pc, and the middle columns with zeros.  */
99 
encode_search(TranslationBlock * tb,uint8_t * block)100 static int encode_search(TranslationBlock *tb, uint8_t *block)
101 {
102     uint8_t *highwater = tcg_ctx->code_gen_highwater;
103     uint64_t *insn_data = tcg_ctx->gen_insn_data;
104     uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off;
105     uint8_t *p = block;
106     int i, j, n;
107 
108     for (i = 0, n = tb->icount; i < n; ++i) {
109         uint64_t prev, curr;
110 
111         for (j = 0; j < INSN_START_WORDS; ++j) {
112             if (i == 0) {
113                 prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
114             } else {
115                 prev = insn_data[(i - 1) * INSN_START_WORDS + j];
116             }
117             curr = insn_data[i * INSN_START_WORDS + j];
118             p = encode_sleb128(p, curr - prev);
119         }
120         prev = (i == 0 ? 0 : insn_end_off[i - 1]);
121         curr = insn_end_off[i];
122         p = encode_sleb128(p, curr - prev);
123 
124         /* Test for (pending) buffer overflow.  The assumption is that any
125            one row beginning below the high water mark cannot overrun
126            the buffer completely.  Thus we can test for overflow after
127            encoding a row without having to check during encoding.  */
128         if (unlikely(p > highwater)) {
129             return -1;
130         }
131     }
132 
133     return p - block;
134 }
135 
cpu_unwind_data_from_tb(TranslationBlock * tb,uintptr_t host_pc,uint64_t * data)136 static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
137                                    uint64_t *data)
138 {
139     uintptr_t iter_pc = (uintptr_t)tb->tc.ptr;
140     const uint8_t *p = tb->tc.ptr + tb->tc.size;
141     int i, j, num_insns = tb->icount;
142 
143     host_pc -= GETPC_ADJ;
144 
145     if (host_pc < iter_pc) {
146         return -1;
147     }
148 
149     memset(data, 0, sizeof(uint64_t) * INSN_START_WORDS);
150     if (!(tb_cflags(tb) & CF_PCREL)) {
151         data[0] = tb->pc;
152     }
153 
154     /*
155      * Reconstruct the stored insn data while looking for the point
156      * at which the end of the insn exceeds host_pc.
157      */
158     for (i = 0; i < num_insns; ++i) {
159         for (j = 0; j < INSN_START_WORDS; ++j) {
160             data[j] += decode_sleb128(&p);
161         }
162         iter_pc += decode_sleb128(&p);
163         if (iter_pc > host_pc) {
164             return num_insns - i;
165         }
166     }
167     return -1;
168 }
169 
170 /*
171  * The cpu state corresponding to 'host_pc' is restored in
172  * preparation for exiting the TB.
173  */
cpu_restore_state_from_tb(CPUState * cpu,TranslationBlock * tb,uintptr_t host_pc)174 void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
175                                uintptr_t host_pc)
176 {
177     uint64_t data[INSN_START_WORDS];
178     int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
179 
180     if (insns_left < 0) {
181         return;
182     }
183 
184     if (tb_cflags(tb) & CF_USE_ICOUNT) {
185         assert(icount_enabled());
186         /*
187          * Reset the cycle counter to the start of the block and
188          * shift if to the number of actually executed instructions.
189          */
190         cpu->neg.icount_decr.u16.low += insns_left;
191     }
192 
193     cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
194 }
195 
cpu_restore_state(CPUState * cpu,uintptr_t host_pc)196 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
197 {
198     /*
199      * The host_pc has to be in the rx region of the code buffer.
200      * If it is not we will not be able to resolve it here.
201      * The two cases where host_pc will not be correct are:
202      *
203      *  - fault during translation (instruction fetch)
204      *  - fault from helper (not using GETPC() macro)
205      *
206      * Either way we need return early as we can't resolve it here.
207      */
208     if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
209         TranslationBlock *tb = tcg_tb_lookup(host_pc);
210         if (tb) {
211             cpu_restore_state_from_tb(cpu, tb, host_pc);
212             return true;
213         }
214     }
215     return false;
216 }
217 
cpu_unwind_state_data(CPUState * cpu,uintptr_t host_pc,uint64_t * data)218 bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data)
219 {
220     if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
221         TranslationBlock *tb = tcg_tb_lookup(host_pc);
222         if (tb) {
223             return cpu_unwind_data_from_tb(tb, host_pc, data) >= 0;
224         }
225     }
226     return false;
227 }
228 
page_init(void)229 void page_init(void)
230 {
231     page_table_config_init();
232 }
233 
234 /*
235  * Isolate the portion of code gen which can setjmp/longjmp.
236  * Return the size of the generated code, or negative on error.
237  */
setjmp_gen_code(CPUArchState * env,TranslationBlock * tb,vaddr pc,void * host_pc,int * max_insns,int64_t * ti)238 static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
239                            vaddr pc, void *host_pc,
240                            int *max_insns, int64_t *ti)
241 {
242     int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
243     if (unlikely(ret != 0)) {
244         return ret;
245     }
246 
247     tcg_func_start(tcg_ctx);
248 
249     CPUState *cs = env_cpu(env);
250     tcg_ctx->cpu = cs;
251     cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
252 
253     assert(tb->size != 0);
254     tcg_ctx->cpu = NULL;
255     *max_insns = tb->icount;
256 
257     return tcg_gen_code(tcg_ctx, tb, pc);
258 }
259 
260 /* Called with mmap_lock held for user mode emulation.  */
tb_gen_code(CPUState * cpu,TCGTBCPUState s)261 TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s)
262 {
263     CPUArchState *env = cpu_env(cpu);
264     TranslationBlock *tb, *existing_tb;
265     tb_page_addr_t phys_pc, phys_p2;
266     tcg_insn_unit *gen_code_buf;
267     int gen_code_size, search_size, max_insns;
268     int64_t ti;
269     void *host_pc;
270 
271     assert_memory_lock();
272     qemu_thread_jit_write();
273 
274     phys_pc = get_page_addr_code_hostp(env, s.pc, &host_pc);
275 
276     if (phys_pc == -1) {
277         /* Generate a one-shot TB with 1 insn in it */
278         s.cflags = (s.cflags & ~CF_COUNT_MASK) | 1;
279     }
280 
281     max_insns = s.cflags & CF_COUNT_MASK;
282     if (max_insns == 0) {
283         max_insns = TCG_MAX_INSNS;
284     }
285     QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
286 
287  buffer_overflow:
288     assert_no_pages_locked();
289     tb = tcg_tb_alloc(tcg_ctx);
290     if (unlikely(!tb)) {
291         /* flush must be done */
292         tb_flush(cpu);
293         mmap_unlock();
294         /* Make the execution loop process the flush as soon as possible.  */
295         cpu->exception_index = EXCP_INTERRUPT;
296         cpu_loop_exit(cpu);
297     }
298 
299     gen_code_buf = tcg_ctx->code_gen_ptr;
300     tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
301     if (!(s.cflags & CF_PCREL)) {
302         tb->pc = s.pc;
303     }
304     tb->cs_base = s.cs_base;
305     tb->flags = s.flags;
306     tb->cflags = s.cflags;
307     tb_set_page_addr0(tb, phys_pc);
308     tb_set_page_addr1(tb, -1);
309     if (phys_pc != -1) {
310         tb_lock_page0(phys_pc);
311     }
312 
313     tcg_ctx->gen_tb = tb;
314     tcg_ctx->addr_type = target_long_bits() == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
315     tcg_ctx->guest_mo = cpu->cc->tcg_ops->guest_default_memory_order;
316 
317  restart_translate:
318     trace_translate_block(tb, s.pc, tb->tc.ptr);
319 
320     gen_code_size = setjmp_gen_code(env, tb, s.pc, host_pc, &max_insns, &ti);
321     if (unlikely(gen_code_size < 0)) {
322         switch (gen_code_size) {
323         case -1:
324             /*
325              * Overflow of code_gen_buffer, or the current slice of it.
326              *
327              * TODO: We don't need to re-do tcg_ops->translate_code, nor
328              * should we re-do the tcg optimization currently hidden
329              * inside tcg_gen_code.  All that should be required is to
330              * flush the TBs, allocate a new TB, re-initialize it per
331              * above, and re-do the actual code generation.
332              */
333             qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
334                           "Restarting code generation for "
335                           "code_gen_buffer overflow\n");
336             tb_unlock_pages(tb);
337             tcg_ctx->gen_tb = NULL;
338             goto buffer_overflow;
339 
340         case -2:
341             /*
342              * The code generated for the TranslationBlock is too large.
343              * The maximum size allowed by the unwind info is 64k.
344              * There may be stricter constraints from relocations
345              * in the tcg backend.
346              *
347              * Try again with half as many insns as we attempted this time.
348              * If a single insn overflows, there's a bug somewhere...
349              */
350             assert(max_insns > 1);
351             max_insns /= 2;
352             qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
353                           "Restarting code generation with "
354                           "smaller translation block (max %d insns)\n",
355                           max_insns);
356 
357             /*
358              * The half-sized TB may not cross pages.
359              * TODO: Fix all targets that cross pages except with
360              * the first insn, at which point this can't be reached.
361              */
362             phys_p2 = tb_page_addr1(tb);
363             if (unlikely(phys_p2 != -1)) {
364                 tb_unlock_page1(phys_pc, phys_p2);
365                 tb_set_page_addr1(tb, -1);
366             }
367             goto restart_translate;
368 
369         case -3:
370             /*
371              * We had a page lock ordering problem.  In order to avoid
372              * deadlock we had to drop the lock on page0, which means
373              * that everything we translated so far is compromised.
374              * Restart with locks held on both pages.
375              */
376             qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
377                           "Restarting code generation with re-locked pages");
378             goto restart_translate;
379 
380         default:
381             g_assert_not_reached();
382         }
383     }
384     tcg_ctx->gen_tb = NULL;
385 
386     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
387     if (unlikely(search_size < 0)) {
388         tb_unlock_pages(tb);
389         goto buffer_overflow;
390     }
391     tb->tc.size = gen_code_size;
392 
393     /*
394      * For CF_PCREL, attribute all executions of the generated code
395      * to its first mapping.
396      */
397     perf_report_code(s.pc, tb, tcg_splitwx_to_rx(gen_code_buf));
398 
399     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
400         qemu_log_in_addr_range(s.pc)) {
401         FILE *logfile = qemu_log_trylock();
402         if (logfile) {
403             int code_size, data_size;
404             const tcg_target_ulong *rx_data_gen_ptr;
405             size_t chunk_start;
406             int insn = 0;
407 
408             if (tcg_ctx->data_gen_ptr) {
409                 rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
410                 code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
411                 data_size = gen_code_size - code_size;
412             } else {
413                 rx_data_gen_ptr = 0;
414                 code_size = gen_code_size;
415                 data_size = 0;
416             }
417 
418             /* Dump header and the first instruction */
419             fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
420             fprintf(logfile,
421                     "  -- guest addr 0x%016" PRIx64 " + tb prologue\n",
422                     tcg_ctx->gen_insn_data[insn * INSN_START_WORDS]);
423             chunk_start = tcg_ctx->gen_insn_end_off[insn];
424             disas(logfile, tb->tc.ptr, chunk_start);
425 
426             /*
427              * Dump each instruction chunk, wrapping up empty chunks into
428              * the next instruction. The whole array is offset so the
429              * first entry is the beginning of the 2nd instruction.
430              */
431             while (insn < tb->icount) {
432                 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
433                 if (chunk_end > chunk_start) {
434                     fprintf(logfile, "  -- guest addr 0x%016" PRIx64 "\n",
435                             tcg_ctx->gen_insn_data[insn * INSN_START_WORDS]);
436                     disas(logfile, tb->tc.ptr + chunk_start,
437                           chunk_end - chunk_start);
438                     chunk_start = chunk_end;
439                 }
440                 insn++;
441             }
442 
443             if (chunk_start < code_size) {
444                 fprintf(logfile, "  -- tb slow paths + alignment\n");
445                 disas(logfile, tb->tc.ptr + chunk_start,
446                       code_size - chunk_start);
447             }
448 
449             /* Finally dump any data we may have after the block */
450             if (data_size) {
451                 int i;
452                 fprintf(logfile, "  data: [size=%d]\n", data_size);
453                 for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
454                     if (sizeof(tcg_target_ulong) == 8) {
455                         fprintf(logfile,
456                                 "0x%08" PRIxPTR ":  .quad  0x%016" TCG_PRIlx "\n",
457                                 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
458                     } else if (sizeof(tcg_target_ulong) == 4) {
459                         fprintf(logfile,
460                                 "0x%08" PRIxPTR ":  .long  0x%08" TCG_PRIlx "\n",
461                                 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
462                     } else {
463                         qemu_build_not_reached();
464                     }
465                 }
466             }
467             fprintf(logfile, "\n");
468             qemu_log_unlock(logfile);
469         }
470     }
471 
472     qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
473         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
474                  CODE_GEN_ALIGN));
475 
476     /* init jump list */
477     qemu_spin_init(&tb->jmp_lock);
478     tb->jmp_list_head = (uintptr_t)NULL;
479     tb->jmp_list_next[0] = (uintptr_t)NULL;
480     tb->jmp_list_next[1] = (uintptr_t)NULL;
481     tb->jmp_dest[0] = (uintptr_t)NULL;
482     tb->jmp_dest[1] = (uintptr_t)NULL;
483 
484     /* init original jump addresses which have been set during tcg_gen_code() */
485     if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
486         tb_reset_jump(tb, 0);
487     }
488     if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
489         tb_reset_jump(tb, 1);
490     }
491 
492     /*
493      * Insert TB into the corresponding region tree before publishing it
494      * through QHT. Otherwise rewinding happened in the TB might fail to
495      * lookup itself using host PC.
496      */
497     tcg_tb_insert(tb);
498 
499     /*
500      * If the TB is not associated with a physical RAM page then it must be
501      * a temporary one-insn TB.
502      *
503      * Such TBs must be added to region trees in order to make sure that
504      * restore_state_to_opc() - which on some architectures is not limited to
505      * rewinding, but also affects exception handling! - is called when such a
506      * TB causes an exception.
507      *
508      * At the same time, temporary one-insn TBs must be executed at most once,
509      * because subsequent reads from, e.g., I/O memory may return different
510      * values. So return early before attempting to link to other TBs or add
511      * to the QHT.
512      */
513     if (tb_page_addr0(tb) == -1) {
514         assert_no_pages_locked();
515         return tb;
516     }
517 
518     /*
519      * No explicit memory barrier is required -- tb_link_page() makes the
520      * TB visible in a consistent state.
521      */
522     existing_tb = tb_link_page(tb);
523     assert_no_pages_locked();
524 
525     /* if the TB already exists, discard what we just translated */
526     if (unlikely(existing_tb != tb)) {
527         uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
528 
529         orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
530         qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
531         tcg_tb_remove(tb);
532         return existing_tb;
533     }
534     return tb;
535 }
536 
537 /* user-mode: call with mmap_lock held */
tb_check_watchpoint(CPUState * cpu,uintptr_t retaddr)538 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
539 {
540     TranslationBlock *tb;
541 
542     assert_memory_lock();
543 
544     tb = tcg_tb_lookup(retaddr);
545     if (tb) {
546         /* We can use retranslation to find the PC.  */
547         cpu_restore_state_from_tb(cpu, tb, retaddr);
548         tb_phys_invalidate(tb, -1);
549     } else {
550         /* The exception probably happened in a helper.  The CPU state should
551            have been saved before calling it. Fetch the PC from there.  */
552         CPUArchState *env = cpu_env(cpu);
553         TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
554         tb_page_addr_t addr = get_page_addr_code(env, s.pc);
555 
556         if (addr != -1) {
557             tb_invalidate_phys_range(cpu, addr, addr);
558         }
559     }
560 }
561 
562 #ifndef CONFIG_USER_ONLY
563 /*
564  * In deterministic execution mode, instructions doing device I/Os
565  * must be at the end of the TB.
566  *
567  * Called by softmmu_template.h, with iothread mutex not held.
568  */
cpu_io_recompile(CPUState * cpu,uintptr_t retaddr)569 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
570 {
571     TranslationBlock *tb;
572     CPUClass *cc;
573     uint32_t n;
574 
575     tb = tcg_tb_lookup(retaddr);
576     if (!tb) {
577         cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
578                   (void *)retaddr);
579     }
580     cpu_restore_state_from_tb(cpu, tb, retaddr);
581 
582     /*
583      * Some guests must re-execute the branch when re-executing a delay
584      * slot instruction.  When this is the case, adjust icount and N
585      * to account for the re-execution of the branch.
586      */
587     n = 1;
588     cc = cpu->cc;
589     if (cc->tcg_ops->io_recompile_replay_branch &&
590         cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
591         cpu->neg.icount_decr.u16.low++;
592         n = 2;
593     }
594 
595     /*
596      * Exit the loop and potentially generate a new TB executing the
597      * just the I/O insns. We also limit instrumentation to memory
598      * operations only (which execute after completion) so we don't
599      * double instrument the instruction. Also don't let an IRQ sneak
600      * in before we execute it.
601      */
602     cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_NOIRQ | n;
603 
604     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
605         vaddr pc = cpu->cc->get_pc(cpu);
606         if (qemu_log_in_addr_range(pc)) {
607             qemu_log("cpu_io_recompile: rewound execution of TB to %016"
608                      VADDR_PRIx "\n", pc);
609         }
610     }
611 
612     cpu_loop_exit_noexc(cpu);
613 }
614 
615 #endif /* CONFIG_USER_ONLY */
616 
617 /*
618  * Called by generic code at e.g. cpu reset after cpu creation,
619  * therefore we must be prepared to allocate the jump cache.
620  */
tcg_flush_jmp_cache(CPUState * cpu)621 void tcg_flush_jmp_cache(CPUState *cpu)
622 {
623     CPUJumpCache *jc = cpu->tb_jmp_cache;
624 
625     /* During early initialization, the cache may not yet be allocated. */
626     if (unlikely(jc == NULL)) {
627         return;
628     }
629 
630     for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
631         qatomic_set(&jc->array[i].tb, NULL);
632     }
633 }
634