xref: /qemu/tcg/region.c (revision 834361efd9d52947663aa5b297693f8e352bef2a)
15ff7258cSRichard Henderson /*
25ff7258cSRichard Henderson  * Memory region management for Tiny Code Generator for QEMU
35ff7258cSRichard Henderson  *
45ff7258cSRichard Henderson  * Copyright (c) 2008 Fabrice Bellard
55ff7258cSRichard Henderson  *
65ff7258cSRichard Henderson  * Permission is hereby granted, free of charge, to any person obtaining a copy
75ff7258cSRichard Henderson  * of this software and associated documentation files (the "Software"), to deal
85ff7258cSRichard Henderson  * in the Software without restriction, including without limitation the rights
95ff7258cSRichard Henderson  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
105ff7258cSRichard Henderson  * copies of the Software, and to permit persons to whom the Software is
115ff7258cSRichard Henderson  * furnished to do so, subject to the following conditions:
125ff7258cSRichard Henderson  *
135ff7258cSRichard Henderson  * The above copyright notice and this permission notice shall be included in
145ff7258cSRichard Henderson  * all copies or substantial portions of the Software.
155ff7258cSRichard Henderson  *
165ff7258cSRichard Henderson  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
175ff7258cSRichard Henderson  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
185ff7258cSRichard Henderson  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
195ff7258cSRichard Henderson  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
205ff7258cSRichard Henderson  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
215ff7258cSRichard Henderson  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
225ff7258cSRichard Henderson  * THE SOFTWARE.
235ff7258cSRichard Henderson  */
245ff7258cSRichard Henderson 
255ff7258cSRichard Henderson #include "qemu/osdep.h"
26c46184a9SRichard Henderson #include "qemu/units.h"
27c46184a9SRichard Henderson #include "qapi/error.h"
285ff7258cSRichard Henderson #include "exec/exec-all.h"
295ff7258cSRichard Henderson #include "tcg/tcg.h"
305ff7258cSRichard Henderson #include "tcg-internal.h"
315ff7258cSRichard Henderson 
325ff7258cSRichard Henderson 
335ff7258cSRichard Henderson struct tcg_region_tree {
345ff7258cSRichard Henderson     QemuMutex lock;
355ff7258cSRichard Henderson     GTree *tree;
365ff7258cSRichard Henderson     /* padding to avoid false sharing is computed at run-time */
375ff7258cSRichard Henderson };
385ff7258cSRichard Henderson 
395ff7258cSRichard Henderson /*
405ff7258cSRichard Henderson  * We divide code_gen_buffer into equally-sized "regions" that TCG threads
415ff7258cSRichard Henderson  * dynamically allocate from as demand dictates. Given appropriate region
425ff7258cSRichard Henderson  * sizing, this minimizes flushes even when some TCG threads generate a lot
435ff7258cSRichard Henderson  * more code than others.
445ff7258cSRichard Henderson  */
455ff7258cSRichard Henderson struct tcg_region_state {
465ff7258cSRichard Henderson     QemuMutex lock;
475ff7258cSRichard Henderson 
485ff7258cSRichard Henderson     /* fields set at init time */
495ff7258cSRichard Henderson     void *start_aligned;
50c2471ca0SRichard Henderson     void *after_prologue;
515ff7258cSRichard Henderson     size_t n;
525ff7258cSRichard Henderson     size_t size; /* size of one region */
535ff7258cSRichard Henderson     size_t stride; /* .size + guard size */
5477bd7fd1SRichard Henderson     size_t total_size; /* size of entire buffer, >= n * stride */
555ff7258cSRichard Henderson 
565ff7258cSRichard Henderson     /* fields protected by the lock */
575ff7258cSRichard Henderson     size_t current; /* current region index */
585ff7258cSRichard Henderson     size_t agg_size_full; /* aggregate size of full regions */
595ff7258cSRichard Henderson };
605ff7258cSRichard Henderson 
615ff7258cSRichard Henderson static struct tcg_region_state region;
625ff7258cSRichard Henderson 
635ff7258cSRichard Henderson /*
645ff7258cSRichard Henderson  * This is an array of struct tcg_region_tree's, with padding.
655ff7258cSRichard Henderson  * We use void * to simplify the computation of region_trees[i]; each
665ff7258cSRichard Henderson  * struct is found every tree_size bytes.
675ff7258cSRichard Henderson  */
685ff7258cSRichard Henderson static void *region_trees;
695ff7258cSRichard Henderson static size_t tree_size;
705ff7258cSRichard Henderson 
7147d590dfSRichard Henderson bool in_code_gen_buffer(const void *p)
7247d590dfSRichard Henderson {
7347d590dfSRichard Henderson     /*
7447d590dfSRichard Henderson      * Much like it is valid to have a pointer to the byte past the
7547d590dfSRichard Henderson      * end of an array (so long as you don't dereference it), allow
7647d590dfSRichard Henderson      * a pointer to the byte past the end of the code gen buffer.
7747d590dfSRichard Henderson      */
78032a4b1bSRichard Henderson     return (size_t)(p - region.start_aligned) <= region.total_size;
7947d590dfSRichard Henderson }
8047d590dfSRichard Henderson 
8147d590dfSRichard Henderson #ifdef CONFIG_DEBUG_TCG
8247d590dfSRichard Henderson const void *tcg_splitwx_to_rx(void *rw)
8347d590dfSRichard Henderson {
8447d590dfSRichard Henderson     /* Pass NULL pointers unchanged. */
8547d590dfSRichard Henderson     if (rw) {
8647d590dfSRichard Henderson         g_assert(in_code_gen_buffer(rw));
8747d590dfSRichard Henderson         rw += tcg_splitwx_diff;
8847d590dfSRichard Henderson     }
8947d590dfSRichard Henderson     return rw;
9047d590dfSRichard Henderson }
9147d590dfSRichard Henderson 
9247d590dfSRichard Henderson void *tcg_splitwx_to_rw(const void *rx)
9347d590dfSRichard Henderson {
9447d590dfSRichard Henderson     /* Pass NULL pointers unchanged. */
9547d590dfSRichard Henderson     if (rx) {
9647d590dfSRichard Henderson         rx -= tcg_splitwx_diff;
9747d590dfSRichard Henderson         /* Assert that we end with a pointer in the rw region. */
9847d590dfSRichard Henderson         g_assert(in_code_gen_buffer(rx));
9947d590dfSRichard Henderson     }
10047d590dfSRichard Henderson     return (void *)rx;
10147d590dfSRichard Henderson }
10247d590dfSRichard Henderson #endif /* CONFIG_DEBUG_TCG */
10347d590dfSRichard Henderson 
1045ff7258cSRichard Henderson /* compare a pointer @ptr and a tb_tc @s */
1055ff7258cSRichard Henderson static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
1065ff7258cSRichard Henderson {
1075ff7258cSRichard Henderson     if (ptr >= s->ptr + s->size) {
1085ff7258cSRichard Henderson         return 1;
1095ff7258cSRichard Henderson     } else if (ptr < s->ptr) {
1105ff7258cSRichard Henderson         return -1;
1115ff7258cSRichard Henderson     }
1125ff7258cSRichard Henderson     return 0;
1135ff7258cSRichard Henderson }
1145ff7258cSRichard Henderson 
115*834361efSLiren Wei static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp, gpointer userdata)
1165ff7258cSRichard Henderson {
1175ff7258cSRichard Henderson     const struct tb_tc *a = ap;
1185ff7258cSRichard Henderson     const struct tb_tc *b = bp;
1195ff7258cSRichard Henderson 
1205ff7258cSRichard Henderson     /*
1215ff7258cSRichard Henderson      * When both sizes are set, we know this isn't a lookup.
1225ff7258cSRichard Henderson      * This is the most likely case: every TB must be inserted; lookups
1235ff7258cSRichard Henderson      * are a lot less frequent.
1245ff7258cSRichard Henderson      */
1255ff7258cSRichard Henderson     if (likely(a->size && b->size)) {
1265ff7258cSRichard Henderson         if (a->ptr > b->ptr) {
1275ff7258cSRichard Henderson             return 1;
1285ff7258cSRichard Henderson         } else if (a->ptr < b->ptr) {
1295ff7258cSRichard Henderson             return -1;
1305ff7258cSRichard Henderson         }
1315ff7258cSRichard Henderson         /* a->ptr == b->ptr should happen only on deletions */
1325ff7258cSRichard Henderson         g_assert(a->size == b->size);
1335ff7258cSRichard Henderson         return 0;
1345ff7258cSRichard Henderson     }
1355ff7258cSRichard Henderson     /*
1365ff7258cSRichard Henderson      * All lookups have either .size field set to 0.
1375ff7258cSRichard Henderson      * From the glib sources we see that @ap is always the lookup key. However
1385ff7258cSRichard Henderson      * the docs provide no guarantee, so we just mark this case as likely.
1395ff7258cSRichard Henderson      */
1405ff7258cSRichard Henderson     if (likely(a->size == 0)) {
1415ff7258cSRichard Henderson         return ptr_cmp_tb_tc(a->ptr, b);
1425ff7258cSRichard Henderson     }
1435ff7258cSRichard Henderson     return ptr_cmp_tb_tc(b->ptr, a);
1445ff7258cSRichard Henderson }
1455ff7258cSRichard Henderson 
146*834361efSLiren Wei static void tb_destroy(gpointer value)
147*834361efSLiren Wei {
148*834361efSLiren Wei     TranslationBlock *tb = value;
149*834361efSLiren Wei     qemu_spin_destroy(&tb->jmp_lock);
150*834361efSLiren Wei }
151*834361efSLiren Wei 
1525ff7258cSRichard Henderson static void tcg_region_trees_init(void)
1535ff7258cSRichard Henderson {
1545ff7258cSRichard Henderson     size_t i;
1555ff7258cSRichard Henderson 
1565ff7258cSRichard Henderson     tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
1575ff7258cSRichard Henderson     region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
1585ff7258cSRichard Henderson     for (i = 0; i < region.n; i++) {
1595ff7258cSRichard Henderson         struct tcg_region_tree *rt = region_trees + i * tree_size;
1605ff7258cSRichard Henderson 
1615ff7258cSRichard Henderson         qemu_mutex_init(&rt->lock);
162*834361efSLiren Wei         rt->tree = g_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy);
1635ff7258cSRichard Henderson     }
1645ff7258cSRichard Henderson }
1655ff7258cSRichard Henderson 
1665ff7258cSRichard Henderson static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p)
1675ff7258cSRichard Henderson {
1685ff7258cSRichard Henderson     size_t region_idx;
1695ff7258cSRichard Henderson 
1705ff7258cSRichard Henderson     /*
1715ff7258cSRichard Henderson      * Like tcg_splitwx_to_rw, with no assert.  The pc may come from
1725ff7258cSRichard Henderson      * a signal handler over which the caller has no control.
1735ff7258cSRichard Henderson      */
1745ff7258cSRichard Henderson     if (!in_code_gen_buffer(p)) {
1755ff7258cSRichard Henderson         p -= tcg_splitwx_diff;
1765ff7258cSRichard Henderson         if (!in_code_gen_buffer(p)) {
1775ff7258cSRichard Henderson             return NULL;
1785ff7258cSRichard Henderson         }
1795ff7258cSRichard Henderson     }
1805ff7258cSRichard Henderson 
1815ff7258cSRichard Henderson     if (p < region.start_aligned) {
1825ff7258cSRichard Henderson         region_idx = 0;
1835ff7258cSRichard Henderson     } else {
1845ff7258cSRichard Henderson         ptrdiff_t offset = p - region.start_aligned;
1855ff7258cSRichard Henderson 
1865ff7258cSRichard Henderson         if (offset > region.stride * (region.n - 1)) {
1875ff7258cSRichard Henderson             region_idx = region.n - 1;
1885ff7258cSRichard Henderson         } else {
1895ff7258cSRichard Henderson             region_idx = offset / region.stride;
1905ff7258cSRichard Henderson         }
1915ff7258cSRichard Henderson     }
1925ff7258cSRichard Henderson     return region_trees + region_idx * tree_size;
1935ff7258cSRichard Henderson }
1945ff7258cSRichard Henderson 
1955ff7258cSRichard Henderson void tcg_tb_insert(TranslationBlock *tb)
1965ff7258cSRichard Henderson {
1975ff7258cSRichard Henderson     struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
1985ff7258cSRichard Henderson 
1995ff7258cSRichard Henderson     g_assert(rt != NULL);
2005ff7258cSRichard Henderson     qemu_mutex_lock(&rt->lock);
2015ff7258cSRichard Henderson     g_tree_insert(rt->tree, &tb->tc, tb);
2025ff7258cSRichard Henderson     qemu_mutex_unlock(&rt->lock);
2035ff7258cSRichard Henderson }
2045ff7258cSRichard Henderson 
2055ff7258cSRichard Henderson void tcg_tb_remove(TranslationBlock *tb)
2065ff7258cSRichard Henderson {
2075ff7258cSRichard Henderson     struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
2085ff7258cSRichard Henderson 
2095ff7258cSRichard Henderson     g_assert(rt != NULL);
2105ff7258cSRichard Henderson     qemu_mutex_lock(&rt->lock);
2115ff7258cSRichard Henderson     g_tree_remove(rt->tree, &tb->tc);
2125ff7258cSRichard Henderson     qemu_mutex_unlock(&rt->lock);
2135ff7258cSRichard Henderson }
2145ff7258cSRichard Henderson 
2155ff7258cSRichard Henderson /*
2165ff7258cSRichard Henderson  * Find the TB 'tb' such that
2175ff7258cSRichard Henderson  * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
2185ff7258cSRichard Henderson  * Return NULL if not found.
2195ff7258cSRichard Henderson  */
2205ff7258cSRichard Henderson TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
2215ff7258cSRichard Henderson {
2225ff7258cSRichard Henderson     struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
2235ff7258cSRichard Henderson     TranslationBlock *tb;
2245ff7258cSRichard Henderson     struct tb_tc s = { .ptr = (void *)tc_ptr };
2255ff7258cSRichard Henderson 
2265ff7258cSRichard Henderson     if (rt == NULL) {
2275ff7258cSRichard Henderson         return NULL;
2285ff7258cSRichard Henderson     }
2295ff7258cSRichard Henderson 
2305ff7258cSRichard Henderson     qemu_mutex_lock(&rt->lock);
2315ff7258cSRichard Henderson     tb = g_tree_lookup(rt->tree, &s);
2325ff7258cSRichard Henderson     qemu_mutex_unlock(&rt->lock);
2335ff7258cSRichard Henderson     return tb;
2345ff7258cSRichard Henderson }
2355ff7258cSRichard Henderson 
2365ff7258cSRichard Henderson static void tcg_region_tree_lock_all(void)
2375ff7258cSRichard Henderson {
2385ff7258cSRichard Henderson     size_t i;
2395ff7258cSRichard Henderson 
2405ff7258cSRichard Henderson     for (i = 0; i < region.n; i++) {
2415ff7258cSRichard Henderson         struct tcg_region_tree *rt = region_trees + i * tree_size;
2425ff7258cSRichard Henderson 
2435ff7258cSRichard Henderson         qemu_mutex_lock(&rt->lock);
2445ff7258cSRichard Henderson     }
2455ff7258cSRichard Henderson }
2465ff7258cSRichard Henderson 
2475ff7258cSRichard Henderson static void tcg_region_tree_unlock_all(void)
2485ff7258cSRichard Henderson {
2495ff7258cSRichard Henderson     size_t i;
2505ff7258cSRichard Henderson 
2515ff7258cSRichard Henderson     for (i = 0; i < region.n; i++) {
2525ff7258cSRichard Henderson         struct tcg_region_tree *rt = region_trees + i * tree_size;
2535ff7258cSRichard Henderson 
2545ff7258cSRichard Henderson         qemu_mutex_unlock(&rt->lock);
2555ff7258cSRichard Henderson     }
2565ff7258cSRichard Henderson }
2575ff7258cSRichard Henderson 
2585ff7258cSRichard Henderson void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
2595ff7258cSRichard Henderson {
2605ff7258cSRichard Henderson     size_t i;
2615ff7258cSRichard Henderson 
2625ff7258cSRichard Henderson     tcg_region_tree_lock_all();
2635ff7258cSRichard Henderson     for (i = 0; i < region.n; i++) {
2645ff7258cSRichard Henderson         struct tcg_region_tree *rt = region_trees + i * tree_size;
2655ff7258cSRichard Henderson 
2665ff7258cSRichard Henderson         g_tree_foreach(rt->tree, func, user_data);
2675ff7258cSRichard Henderson     }
2685ff7258cSRichard Henderson     tcg_region_tree_unlock_all();
2695ff7258cSRichard Henderson }
2705ff7258cSRichard Henderson 
2715ff7258cSRichard Henderson size_t tcg_nb_tbs(void)
2725ff7258cSRichard Henderson {
2735ff7258cSRichard Henderson     size_t nb_tbs = 0;
2745ff7258cSRichard Henderson     size_t i;
2755ff7258cSRichard Henderson 
2765ff7258cSRichard Henderson     tcg_region_tree_lock_all();
2775ff7258cSRichard Henderson     for (i = 0; i < region.n; i++) {
2785ff7258cSRichard Henderson         struct tcg_region_tree *rt = region_trees + i * tree_size;
2795ff7258cSRichard Henderson 
2805ff7258cSRichard Henderson         nb_tbs += g_tree_nnodes(rt->tree);
2815ff7258cSRichard Henderson     }
2825ff7258cSRichard Henderson     tcg_region_tree_unlock_all();
2835ff7258cSRichard Henderson     return nb_tbs;
2845ff7258cSRichard Henderson }
2855ff7258cSRichard Henderson 
2865ff7258cSRichard Henderson static void tcg_region_tree_reset_all(void)
2875ff7258cSRichard Henderson {
2885ff7258cSRichard Henderson     size_t i;
2895ff7258cSRichard Henderson 
2905ff7258cSRichard Henderson     tcg_region_tree_lock_all();
2915ff7258cSRichard Henderson     for (i = 0; i < region.n; i++) {
2925ff7258cSRichard Henderson         struct tcg_region_tree *rt = region_trees + i * tree_size;
2935ff7258cSRichard Henderson 
2945ff7258cSRichard Henderson         /* Increment the refcount first so that destroy acts as a reset */
2955ff7258cSRichard Henderson         g_tree_ref(rt->tree);
2965ff7258cSRichard Henderson         g_tree_destroy(rt->tree);
2975ff7258cSRichard Henderson     }
2985ff7258cSRichard Henderson     tcg_region_tree_unlock_all();
2995ff7258cSRichard Henderson }
3005ff7258cSRichard Henderson 
3015ff7258cSRichard Henderson static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
3025ff7258cSRichard Henderson {
3035ff7258cSRichard Henderson     void *start, *end;
3045ff7258cSRichard Henderson 
3055ff7258cSRichard Henderson     start = region.start_aligned + curr_region * region.stride;
3065ff7258cSRichard Henderson     end = start + region.size;
3075ff7258cSRichard Henderson 
3085ff7258cSRichard Henderson     if (curr_region == 0) {
309c2471ca0SRichard Henderson         start = region.after_prologue;
3105ff7258cSRichard Henderson     }
31177bd7fd1SRichard Henderson     /* The final region may have a few extra pages due to earlier rounding. */
3125ff7258cSRichard Henderson     if (curr_region == region.n - 1) {
31377bd7fd1SRichard Henderson         end = region.start_aligned + region.total_size;
3145ff7258cSRichard Henderson     }
3155ff7258cSRichard Henderson 
3165ff7258cSRichard Henderson     *pstart = start;
3175ff7258cSRichard Henderson     *pend = end;
3185ff7258cSRichard Henderson }
3195ff7258cSRichard Henderson 
3205ff7258cSRichard Henderson static void tcg_region_assign(TCGContext *s, size_t curr_region)
3215ff7258cSRichard Henderson {
3225ff7258cSRichard Henderson     void *start, *end;
3235ff7258cSRichard Henderson 
3245ff7258cSRichard Henderson     tcg_region_bounds(curr_region, &start, &end);
3255ff7258cSRichard Henderson 
3265ff7258cSRichard Henderson     s->code_gen_buffer = start;
3275ff7258cSRichard Henderson     s->code_gen_ptr = start;
3285ff7258cSRichard Henderson     s->code_gen_buffer_size = end - start;
3295ff7258cSRichard Henderson     s->code_gen_highwater = end - TCG_HIGHWATER;
3305ff7258cSRichard Henderson }
3315ff7258cSRichard Henderson 
3325ff7258cSRichard Henderson static bool tcg_region_alloc__locked(TCGContext *s)
3335ff7258cSRichard Henderson {
3345ff7258cSRichard Henderson     if (region.current == region.n) {
3355ff7258cSRichard Henderson         return true;
3365ff7258cSRichard Henderson     }
3375ff7258cSRichard Henderson     tcg_region_assign(s, region.current);
3385ff7258cSRichard Henderson     region.current++;
3395ff7258cSRichard Henderson     return false;
3405ff7258cSRichard Henderson }
3415ff7258cSRichard Henderson 
3425ff7258cSRichard Henderson /*
3435ff7258cSRichard Henderson  * Request a new region once the one in use has filled up.
3445ff7258cSRichard Henderson  * Returns true on error.
3455ff7258cSRichard Henderson  */
3465ff7258cSRichard Henderson bool tcg_region_alloc(TCGContext *s)
3475ff7258cSRichard Henderson {
3485ff7258cSRichard Henderson     bool err;
3495ff7258cSRichard Henderson     /* read the region size now; alloc__locked will overwrite it on success */
3505ff7258cSRichard Henderson     size_t size_full = s->code_gen_buffer_size;
3515ff7258cSRichard Henderson 
3525ff7258cSRichard Henderson     qemu_mutex_lock(&region.lock);
3535ff7258cSRichard Henderson     err = tcg_region_alloc__locked(s);
3545ff7258cSRichard Henderson     if (!err) {
3555ff7258cSRichard Henderson         region.agg_size_full += size_full - TCG_HIGHWATER;
3565ff7258cSRichard Henderson     }
3575ff7258cSRichard Henderson     qemu_mutex_unlock(&region.lock);
3585ff7258cSRichard Henderson     return err;
3595ff7258cSRichard Henderson }
3605ff7258cSRichard Henderson 
3615ff7258cSRichard Henderson /*
3625ff7258cSRichard Henderson  * Perform a context's first region allocation.
3635ff7258cSRichard Henderson  * This function does _not_ increment region.agg_size_full.
3645ff7258cSRichard Henderson  */
3655ff7258cSRichard Henderson static void tcg_region_initial_alloc__locked(TCGContext *s)
3665ff7258cSRichard Henderson {
3675ff7258cSRichard Henderson     bool err = tcg_region_alloc__locked(s);
3685ff7258cSRichard Henderson     g_assert(!err);
3695ff7258cSRichard Henderson }
3705ff7258cSRichard Henderson 
3715ff7258cSRichard Henderson void tcg_region_initial_alloc(TCGContext *s)
3725ff7258cSRichard Henderson {
3735ff7258cSRichard Henderson     qemu_mutex_lock(&region.lock);
3745ff7258cSRichard Henderson     tcg_region_initial_alloc__locked(s);
3755ff7258cSRichard Henderson     qemu_mutex_unlock(&region.lock);
3765ff7258cSRichard Henderson }
3775ff7258cSRichard Henderson 
3785ff7258cSRichard Henderson /* Call from a safe-work context */
3795ff7258cSRichard Henderson void tcg_region_reset_all(void)
3805ff7258cSRichard Henderson {
3810e2d61cfSRichard Henderson     unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
3825ff7258cSRichard Henderson     unsigned int i;
3835ff7258cSRichard Henderson 
3845ff7258cSRichard Henderson     qemu_mutex_lock(&region.lock);
3855ff7258cSRichard Henderson     region.current = 0;
3865ff7258cSRichard Henderson     region.agg_size_full = 0;
3875ff7258cSRichard Henderson 
3885ff7258cSRichard Henderson     for (i = 0; i < n_ctxs; i++) {
3895ff7258cSRichard Henderson         TCGContext *s = qatomic_read(&tcg_ctxs[i]);
3905ff7258cSRichard Henderson         tcg_region_initial_alloc__locked(s);
3915ff7258cSRichard Henderson     }
3925ff7258cSRichard Henderson     qemu_mutex_unlock(&region.lock);
3935ff7258cSRichard Henderson 
3945ff7258cSRichard Henderson     tcg_region_tree_reset_all();
3955ff7258cSRichard Henderson }
3965ff7258cSRichard Henderson 
39701afda99SRichard Henderson static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus)
3985ff7258cSRichard Henderson {
39943b972b7SRichard Henderson #ifdef CONFIG_USER_ONLY
4005ff7258cSRichard Henderson     return 1;
4015ff7258cSRichard Henderson #else
40201afda99SRichard Henderson     size_t n_regions;
40301afda99SRichard Henderson 
4045ff7258cSRichard Henderson     /*
40543b972b7SRichard Henderson      * It is likely that some vCPUs will translate more code than others,
40643b972b7SRichard Henderson      * so we first try to set more regions than max_cpus, with those regions
40743b972b7SRichard Henderson      * being of reasonable size. If that's not possible we make do by evenly
40843b972b7SRichard Henderson      * dividing the code_gen_buffer among the vCPUs.
4095ff7258cSRichard Henderson      */
4105ff7258cSRichard Henderson     /* Use a single region if all we have is one vCPU thread */
4115ff7258cSRichard Henderson     if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
4125ff7258cSRichard Henderson         return 1;
4135ff7258cSRichard Henderson     }
4145ff7258cSRichard Henderson 
41501afda99SRichard Henderson     /*
41601afda99SRichard Henderson      * Try to have more regions than max_cpus, with each region being >= 2 MB.
41701afda99SRichard Henderson      * If we can't, then just allocate one region per vCPU thread.
41801afda99SRichard Henderson      */
41901afda99SRichard Henderson     n_regions = tb_size / (2 * MiB);
42001afda99SRichard Henderson     if (n_regions <= max_cpus) {
4215ff7258cSRichard Henderson         return max_cpus;
42201afda99SRichard Henderson     }
42301afda99SRichard Henderson     return MIN(n_regions, max_cpus * 8);
4245ff7258cSRichard Henderson #endif
42543b972b7SRichard Henderson }
4265ff7258cSRichard Henderson 
4275ff7258cSRichard Henderson /*
428c46184a9SRichard Henderson  * Minimum size of the code gen buffer.  This number is randomly chosen,
429c46184a9SRichard Henderson  * but not so small that we can't have a fair number of TB's live.
43026a75d12SRichard Henderson  *
43126a75d12SRichard Henderson  * Maximum size, MAX_CODE_GEN_BUFFER_SIZE, is defined in tcg-target.h.
43226a75d12SRichard Henderson  * Unless otherwise indicated, this is constrained by the range of
43326a75d12SRichard Henderson  * direct branches on the host cpu, as used by the TCG implementation
43426a75d12SRichard Henderson  * of goto_tb.
435c46184a9SRichard Henderson  */
436c46184a9SRichard Henderson #define MIN_CODE_GEN_BUFFER_SIZE     (1 * MiB)
437c46184a9SRichard Henderson 
438c46184a9SRichard Henderson #if TCG_TARGET_REG_BITS == 32
439c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
440c46184a9SRichard Henderson #ifdef CONFIG_USER_ONLY
441c46184a9SRichard Henderson /*
442c46184a9SRichard Henderson  * For user mode on smaller 32 bit systems we may run into trouble
443c46184a9SRichard Henderson  * allocating big chunks of data in the right place. On these systems
444c46184a9SRichard Henderson  * we utilise a static code generation buffer directly in the binary.
445c46184a9SRichard Henderson  */
446c46184a9SRichard Henderson #define USE_STATIC_CODE_GEN_BUFFER
447c46184a9SRichard Henderson #endif
448c46184a9SRichard Henderson #else /* TCG_TARGET_REG_BITS == 64 */
449c46184a9SRichard Henderson #ifdef CONFIG_USER_ONLY
450c46184a9SRichard Henderson /*
451c46184a9SRichard Henderson  * As user-mode emulation typically means running multiple instances
452c46184a9SRichard Henderson  * of the translator don't go too nuts with our default code gen
453c46184a9SRichard Henderson  * buffer lest we make things too hard for the OS.
454c46184a9SRichard Henderson  */
455c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
456c46184a9SRichard Henderson #else
457c46184a9SRichard Henderson /*
458c46184a9SRichard Henderson  * We expect most system emulation to run one or two guests per host.
459c46184a9SRichard Henderson  * Users running large scale system emulation may want to tweak their
460c46184a9SRichard Henderson  * runtime setup via the tb-size control on the command line.
461c46184a9SRichard Henderson  */
462c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
463c46184a9SRichard Henderson #endif
464c46184a9SRichard Henderson #endif
465c46184a9SRichard Henderson 
466c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE \
467c46184a9SRichard Henderson   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
468c46184a9SRichard Henderson    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
469c46184a9SRichard Henderson 
470c46184a9SRichard Henderson #ifdef __mips__
471c46184a9SRichard Henderson /*
472c46184a9SRichard Henderson  * In order to use J and JAL within the code_gen_buffer, we require
473c46184a9SRichard Henderson  * that the buffer not cross a 256MB boundary.
474c46184a9SRichard Henderson  */
475c46184a9SRichard Henderson static inline bool cross_256mb(void *addr, size_t size)
476c46184a9SRichard Henderson {
477c46184a9SRichard Henderson     return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
478c46184a9SRichard Henderson }
479c46184a9SRichard Henderson 
480c46184a9SRichard Henderson /*
481c46184a9SRichard Henderson  * We weren't able to allocate a buffer without crossing that boundary,
482c46184a9SRichard Henderson  * so make do with the larger portion of the buffer that doesn't cross.
483a4df1b2dSRichard Henderson  * Returns the new base and size of the buffer in *obuf and *osize.
484c46184a9SRichard Henderson  */
485a4df1b2dSRichard Henderson static inline void split_cross_256mb(void **obuf, size_t *osize,
486a4df1b2dSRichard Henderson                                      void *buf1, size_t size1)
487c46184a9SRichard Henderson {
488c46184a9SRichard Henderson     void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
489c46184a9SRichard Henderson     size_t size2 = buf1 + size1 - buf2;
490c46184a9SRichard Henderson 
491c46184a9SRichard Henderson     size1 = buf2 - buf1;
492c46184a9SRichard Henderson     if (size1 < size2) {
493c46184a9SRichard Henderson         size1 = size2;
494c46184a9SRichard Henderson         buf1 = buf2;
495c46184a9SRichard Henderson     }
496c46184a9SRichard Henderson 
497a4df1b2dSRichard Henderson     *obuf = buf1;
498a4df1b2dSRichard Henderson     *osize = size1;
499c46184a9SRichard Henderson }
500c46184a9SRichard Henderson #endif
501c46184a9SRichard Henderson 
502c46184a9SRichard Henderson #ifdef USE_STATIC_CODE_GEN_BUFFER
503c46184a9SRichard Henderson static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
504c46184a9SRichard Henderson     __attribute__((aligned(CODE_GEN_ALIGN)));
505c46184a9SRichard Henderson 
5067be9ebcfSRichard Henderson static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
507c46184a9SRichard Henderson {
508c46184a9SRichard Henderson     void *buf, *end;
509c46184a9SRichard Henderson     size_t size;
510c46184a9SRichard Henderson 
511c46184a9SRichard Henderson     if (splitwx > 0) {
512c46184a9SRichard Henderson         error_setg(errp, "jit split-wx not supported");
5137be9ebcfSRichard Henderson         return -1;
514c46184a9SRichard Henderson     }
515c46184a9SRichard Henderson 
516c46184a9SRichard Henderson     /* page-align the beginning and end of the buffer */
517c46184a9SRichard Henderson     buf = static_code_gen_buffer;
518c46184a9SRichard Henderson     end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
519c46184a9SRichard Henderson     buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
520c46184a9SRichard Henderson     end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
521c46184a9SRichard Henderson 
522c46184a9SRichard Henderson     size = end - buf;
523c46184a9SRichard Henderson 
524c46184a9SRichard Henderson     /* Honor a command-line option limiting the size of the buffer.  */
525c46184a9SRichard Henderson     if (size > tb_size) {
526c46184a9SRichard Henderson         size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
527c46184a9SRichard Henderson     }
528c46184a9SRichard Henderson 
529c46184a9SRichard Henderson #ifdef __mips__
530c46184a9SRichard Henderson     if (cross_256mb(buf, size)) {
531a4df1b2dSRichard Henderson         split_cross_256mb(&buf, &size, buf, size);
532c46184a9SRichard Henderson     }
533c46184a9SRichard Henderson #endif
534c46184a9SRichard Henderson 
535032a4b1bSRichard Henderson     region.start_aligned = buf;
536032a4b1bSRichard Henderson     region.total_size = size;
5377be9ebcfSRichard Henderson 
5387be9ebcfSRichard Henderson     return PROT_READ | PROT_WRITE;
539c46184a9SRichard Henderson }
540c46184a9SRichard Henderson #elif defined(_WIN32)
5417be9ebcfSRichard Henderson static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
542c46184a9SRichard Henderson {
543c46184a9SRichard Henderson     void *buf;
544c46184a9SRichard Henderson 
545c46184a9SRichard Henderson     if (splitwx > 0) {
546c46184a9SRichard Henderson         error_setg(errp, "jit split-wx not supported");
5477be9ebcfSRichard Henderson         return -1;
548c46184a9SRichard Henderson     }
549c46184a9SRichard Henderson 
550c46184a9SRichard Henderson     buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
551c46184a9SRichard Henderson                              PAGE_EXECUTE_READWRITE);
552c46184a9SRichard Henderson     if (buf == NULL) {
553c46184a9SRichard Henderson         error_setg_win32(errp, GetLastError(),
554c46184a9SRichard Henderson                          "allocate %zu bytes for jit buffer", size);
555c46184a9SRichard Henderson         return false;
556c46184a9SRichard Henderson     }
557c46184a9SRichard Henderson 
558032a4b1bSRichard Henderson     region.start_aligned = buf;
559032a4b1bSRichard Henderson     region.total_size = size;
5607be9ebcfSRichard Henderson 
5617be9ebcfSRichard Henderson     return PAGE_READ | PAGE_WRITE | PAGE_EXEC;
562c46184a9SRichard Henderson }
563c46184a9SRichard Henderson #else
5647be9ebcfSRichard Henderson static int alloc_code_gen_buffer_anon(size_t size, int prot,
565c46184a9SRichard Henderson                                       int flags, Error **errp)
566c46184a9SRichard Henderson {
567c46184a9SRichard Henderson     void *buf;
568c46184a9SRichard Henderson 
569c46184a9SRichard Henderson     buf = mmap(NULL, size, prot, flags, -1, 0);
570c46184a9SRichard Henderson     if (buf == MAP_FAILED) {
571c46184a9SRichard Henderson         error_setg_errno(errp, errno,
572c46184a9SRichard Henderson                          "allocate %zu bytes for jit buffer", size);
5737be9ebcfSRichard Henderson         return -1;
574c46184a9SRichard Henderson     }
575c46184a9SRichard Henderson 
576c46184a9SRichard Henderson #ifdef __mips__
577c46184a9SRichard Henderson     if (cross_256mb(buf, size)) {
578c46184a9SRichard Henderson         /*
579c46184a9SRichard Henderson          * Try again, with the original still mapped, to avoid re-acquiring
580c46184a9SRichard Henderson          * the same 256mb crossing.
581c46184a9SRichard Henderson          */
582c46184a9SRichard Henderson         size_t size2;
583c46184a9SRichard Henderson         void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
584c46184a9SRichard Henderson         switch ((int)(buf2 != MAP_FAILED)) {
585c46184a9SRichard Henderson         case 1:
586c46184a9SRichard Henderson             if (!cross_256mb(buf2, size)) {
587c46184a9SRichard Henderson                 /* Success!  Use the new buffer.  */
588c46184a9SRichard Henderson                 munmap(buf, size);
589c46184a9SRichard Henderson                 break;
590c46184a9SRichard Henderson             }
591c46184a9SRichard Henderson             /* Failure.  Work with what we had.  */
592c46184a9SRichard Henderson             munmap(buf2, size);
593c46184a9SRichard Henderson             /* fallthru */
594c46184a9SRichard Henderson         default:
595c46184a9SRichard Henderson             /* Split the original buffer.  Free the smaller half.  */
596a4df1b2dSRichard Henderson             split_cross_256mb(&buf2, &size2, buf, size);
597c46184a9SRichard Henderson             if (buf == buf2) {
598c46184a9SRichard Henderson                 munmap(buf + size2, size - size2);
599c46184a9SRichard Henderson             } else {
600c46184a9SRichard Henderson                 munmap(buf, size - size2);
601c46184a9SRichard Henderson             }
602c46184a9SRichard Henderson             size = size2;
603c46184a9SRichard Henderson             break;
604c46184a9SRichard Henderson         }
605c46184a9SRichard Henderson         buf = buf2;
606c46184a9SRichard Henderson     }
607c46184a9SRichard Henderson #endif
608c46184a9SRichard Henderson 
609032a4b1bSRichard Henderson     region.start_aligned = buf;
610032a4b1bSRichard Henderson     region.total_size = size;
6117be9ebcfSRichard Henderson     return prot;
612c46184a9SRichard Henderson }
613c46184a9SRichard Henderson 
614c46184a9SRichard Henderson #ifndef CONFIG_TCG_INTERPRETER
615c46184a9SRichard Henderson #ifdef CONFIG_POSIX
616c46184a9SRichard Henderson #include "qemu/memfd.h"
617c46184a9SRichard Henderson 
618c46184a9SRichard Henderson static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
619c46184a9SRichard Henderson {
620c46184a9SRichard Henderson     void *buf_rw = NULL, *buf_rx = MAP_FAILED;
621c46184a9SRichard Henderson     int fd = -1;
622c46184a9SRichard Henderson 
623c46184a9SRichard Henderson #ifdef __mips__
624c46184a9SRichard Henderson     /* Find space for the RX mapping, vs the 256MiB regions. */
6257be9ebcfSRichard Henderson     if (alloc_code_gen_buffer_anon(size, PROT_NONE,
626c46184a9SRichard Henderson                                    MAP_PRIVATE | MAP_ANONYMOUS |
6277be9ebcfSRichard Henderson                                    MAP_NORESERVE, errp) < 0) {
628c46184a9SRichard Henderson         return false;
629c46184a9SRichard Henderson     }
630c46184a9SRichard Henderson     /* The size of the mapping may have been adjusted. */
631032a4b1bSRichard Henderson     buf_rx = region.start_aligned;
632032a4b1bSRichard Henderson     size = region.total_size;
633c46184a9SRichard Henderson #endif
634c46184a9SRichard Henderson 
635c46184a9SRichard Henderson     buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
636c46184a9SRichard Henderson     if (buf_rw == NULL) {
637c46184a9SRichard Henderson         goto fail;
638c46184a9SRichard Henderson     }
639c46184a9SRichard Henderson 
640c46184a9SRichard Henderson #ifdef __mips__
641c46184a9SRichard Henderson     void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
642c46184a9SRichard Henderson                      MAP_SHARED | MAP_FIXED, fd, 0);
643c46184a9SRichard Henderson     if (tmp != buf_rx) {
644c46184a9SRichard Henderson         goto fail_rx;
645c46184a9SRichard Henderson     }
646c46184a9SRichard Henderson #else
647c46184a9SRichard Henderson     buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
648c46184a9SRichard Henderson     if (buf_rx == MAP_FAILED) {
649c46184a9SRichard Henderson         goto fail_rx;
650c46184a9SRichard Henderson     }
651c46184a9SRichard Henderson #endif
652c46184a9SRichard Henderson 
653c46184a9SRichard Henderson     close(fd);
654032a4b1bSRichard Henderson     region.start_aligned = buf_rw;
655032a4b1bSRichard Henderson     region.total_size = size;
656c46184a9SRichard Henderson     tcg_splitwx_diff = buf_rx - buf_rw;
657c46184a9SRichard Henderson 
6587be9ebcfSRichard Henderson     return PROT_READ | PROT_WRITE;
659c46184a9SRichard Henderson 
660c46184a9SRichard Henderson  fail_rx:
661c46184a9SRichard Henderson     error_setg_errno(errp, errno, "failed to map shared memory for execute");
662c46184a9SRichard Henderson  fail:
663c46184a9SRichard Henderson     if (buf_rx != MAP_FAILED) {
664c46184a9SRichard Henderson         munmap(buf_rx, size);
665c46184a9SRichard Henderson     }
666c46184a9SRichard Henderson     if (buf_rw) {
667c46184a9SRichard Henderson         munmap(buf_rw, size);
668c46184a9SRichard Henderson     }
669c46184a9SRichard Henderson     if (fd >= 0) {
670c46184a9SRichard Henderson         close(fd);
671c46184a9SRichard Henderson     }
6727be9ebcfSRichard Henderson     return -1;
673c46184a9SRichard Henderson }
674c46184a9SRichard Henderson #endif /* CONFIG_POSIX */
675c46184a9SRichard Henderson 
676c46184a9SRichard Henderson #ifdef CONFIG_DARWIN
677c46184a9SRichard Henderson #include <mach/mach.h>
678c46184a9SRichard Henderson 
679c46184a9SRichard Henderson extern kern_return_t mach_vm_remap(vm_map_t target_task,
680c46184a9SRichard Henderson                                    mach_vm_address_t *target_address,
681c46184a9SRichard Henderson                                    mach_vm_size_t size,
682c46184a9SRichard Henderson                                    mach_vm_offset_t mask,
683c46184a9SRichard Henderson                                    int flags,
684c46184a9SRichard Henderson                                    vm_map_t src_task,
685c46184a9SRichard Henderson                                    mach_vm_address_t src_address,
686c46184a9SRichard Henderson                                    boolean_t copy,
687c46184a9SRichard Henderson                                    vm_prot_t *cur_protection,
688c46184a9SRichard Henderson                                    vm_prot_t *max_protection,
689c46184a9SRichard Henderson                                    vm_inherit_t inheritance);
690c46184a9SRichard Henderson 
6917be9ebcfSRichard Henderson static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
692c46184a9SRichard Henderson {
693c46184a9SRichard Henderson     kern_return_t ret;
694c46184a9SRichard Henderson     mach_vm_address_t buf_rw, buf_rx;
695c46184a9SRichard Henderson     vm_prot_t cur_prot, max_prot;
696c46184a9SRichard Henderson 
697c46184a9SRichard Henderson     /* Map the read-write portion via normal anon memory. */
698c46184a9SRichard Henderson     if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
699c46184a9SRichard Henderson                                     MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
7007be9ebcfSRichard Henderson         return -1;
701c46184a9SRichard Henderson     }
702c46184a9SRichard Henderson 
703032a4b1bSRichard Henderson     buf_rw = (mach_vm_address_t)region.start_aligned;
704c46184a9SRichard Henderson     buf_rx = 0;
705c46184a9SRichard Henderson     ret = mach_vm_remap(mach_task_self(),
706c46184a9SRichard Henderson                         &buf_rx,
707c46184a9SRichard Henderson                         size,
708c46184a9SRichard Henderson                         0,
709c46184a9SRichard Henderson                         VM_FLAGS_ANYWHERE,
710c46184a9SRichard Henderson                         mach_task_self(),
711c46184a9SRichard Henderson                         buf_rw,
712c46184a9SRichard Henderson                         false,
713c46184a9SRichard Henderson                         &cur_prot,
714c46184a9SRichard Henderson                         &max_prot,
715c46184a9SRichard Henderson                         VM_INHERIT_NONE);
716c46184a9SRichard Henderson     if (ret != KERN_SUCCESS) {
717c46184a9SRichard Henderson         /* TODO: Convert "ret" to a human readable error message. */
718c46184a9SRichard Henderson         error_setg(errp, "vm_remap for jit splitwx failed");
719c46184a9SRichard Henderson         munmap((void *)buf_rw, size);
7207be9ebcfSRichard Henderson         return -1;
721c46184a9SRichard Henderson     }
722c46184a9SRichard Henderson 
723c46184a9SRichard Henderson     if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
724c46184a9SRichard Henderson         error_setg_errno(errp, errno, "mprotect for jit splitwx");
725c46184a9SRichard Henderson         munmap((void *)buf_rx, size);
726c46184a9SRichard Henderson         munmap((void *)buf_rw, size);
7277be9ebcfSRichard Henderson         return -1;
728c46184a9SRichard Henderson     }
729c46184a9SRichard Henderson 
730c46184a9SRichard Henderson     tcg_splitwx_diff = buf_rx - buf_rw;
7317be9ebcfSRichard Henderson     return PROT_READ | PROT_WRITE;
732c46184a9SRichard Henderson }
733c46184a9SRichard Henderson #endif /* CONFIG_DARWIN */
734c46184a9SRichard Henderson #endif /* CONFIG_TCG_INTERPRETER */
735c46184a9SRichard Henderson 
7367be9ebcfSRichard Henderson static int alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
737c46184a9SRichard Henderson {
738c46184a9SRichard Henderson #ifndef CONFIG_TCG_INTERPRETER
739c46184a9SRichard Henderson # ifdef CONFIG_DARWIN
740c46184a9SRichard Henderson     return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
741c46184a9SRichard Henderson # endif
742c46184a9SRichard Henderson # ifdef CONFIG_POSIX
743c46184a9SRichard Henderson     return alloc_code_gen_buffer_splitwx_memfd(size, errp);
744c46184a9SRichard Henderson # endif
745c46184a9SRichard Henderson #endif
746c46184a9SRichard Henderson     error_setg(errp, "jit split-wx not supported");
7477be9ebcfSRichard Henderson     return -1;
748c46184a9SRichard Henderson }
749c46184a9SRichard Henderson 
7507be9ebcfSRichard Henderson static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
751c46184a9SRichard Henderson {
752c46184a9SRichard Henderson     ERRP_GUARD();
753c46184a9SRichard Henderson     int prot, flags;
754c46184a9SRichard Henderson 
755c46184a9SRichard Henderson     if (splitwx) {
7567be9ebcfSRichard Henderson         prot = alloc_code_gen_buffer_splitwx(size, errp);
7577be9ebcfSRichard Henderson         if (prot >= 0) {
7587be9ebcfSRichard Henderson             return prot;
759c46184a9SRichard Henderson         }
760c46184a9SRichard Henderson         /*
761c46184a9SRichard Henderson          * If splitwx force-on (1), fail;
762c46184a9SRichard Henderson          * if splitwx default-on (-1), fall through to splitwx off.
763c46184a9SRichard Henderson          */
764c46184a9SRichard Henderson         if (splitwx > 0) {
7657be9ebcfSRichard Henderson             return -1;
766c46184a9SRichard Henderson         }
767c46184a9SRichard Henderson         error_free_or_abort(errp);
768c46184a9SRichard Henderson     }
769c46184a9SRichard Henderson 
770b7da02daSRichard Henderson     /*
771b7da02daSRichard Henderson      * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
772b7da02daSRichard Henderson      * rejects a permission change from RWX -> NONE when reserving the
773b7da02daSRichard Henderson      * guard pages later.  We can go the other way with the same number
774b7da02daSRichard Henderson      * of syscalls, so always begin with PROT_NONE.
775b7da02daSRichard Henderson      */
776b7da02daSRichard Henderson     prot = PROT_NONE;
777c46184a9SRichard Henderson     flags = MAP_PRIVATE | MAP_ANONYMOUS;
778b7da02daSRichard Henderson #ifdef CONFIG_DARWIN
779c46184a9SRichard Henderson     /* Applicable to both iOS and macOS (Apple Silicon). */
780c46184a9SRichard Henderson     if (!splitwx) {
781c46184a9SRichard Henderson         flags |= MAP_JIT;
782c46184a9SRichard Henderson     }
783c46184a9SRichard Henderson #endif
784c46184a9SRichard Henderson 
785c46184a9SRichard Henderson     return alloc_code_gen_buffer_anon(size, prot, flags, errp);
786c46184a9SRichard Henderson }
787c46184a9SRichard Henderson #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
788c46184a9SRichard Henderson 
789c46184a9SRichard Henderson /*
7905ff7258cSRichard Henderson  * Initializes region partitioning.
7915ff7258cSRichard Henderson  *
7925ff7258cSRichard Henderson  * Called at init time from the parent thread (i.e. the one calling
7935ff7258cSRichard Henderson  * tcg_context_init), after the target's TCG globals have been set.
7945ff7258cSRichard Henderson  *
7955ff7258cSRichard Henderson  * Region partitioning works by splitting code_gen_buffer into separate regions,
7965ff7258cSRichard Henderson  * and then assigning regions to TCG threads so that the threads can translate
7975ff7258cSRichard Henderson  * code in parallel without synchronization.
7985ff7258cSRichard Henderson  *
7995ff7258cSRichard Henderson  * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
8005ff7258cSRichard Henderson  * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
8015ff7258cSRichard Henderson  * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
8025ff7258cSRichard Henderson  * must have been parsed before calling this function, since it calls
8035ff7258cSRichard Henderson  * qemu_tcg_mttcg_enabled().
8045ff7258cSRichard Henderson  *
8055ff7258cSRichard Henderson  * In user-mode we use a single region.  Having multiple regions in user-mode
8065ff7258cSRichard Henderson  * is not supported, because the number of vCPU threads (recall that each thread
8075ff7258cSRichard Henderson  * spawned by the guest corresponds to a vCPU thread) is only bounded by the
8085ff7258cSRichard Henderson  * OS, and usually this number is huge (tens of thousands is not uncommon).
8095ff7258cSRichard Henderson  * Thus, given this large bound on the number of vCPU threads and the fact
8105ff7258cSRichard Henderson  * that code_gen_buffer is allocated at compile-time, we cannot guarantee
8115ff7258cSRichard Henderson  * that the availability of at least one region per vCPU thread.
8125ff7258cSRichard Henderson  *
8135ff7258cSRichard Henderson  * However, this user-mode limitation is unlikely to be a significant problem
8145ff7258cSRichard Henderson  * in practice. Multi-threaded guests share most if not all of their translated
8155ff7258cSRichard Henderson  * code, which makes parallel code generation less appealing than in softmmu.
8165ff7258cSRichard Henderson  */
81743b972b7SRichard Henderson void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
8185ff7258cSRichard Henderson {
819ba22783dSRichard Henderson     const size_t page_size = qemu_real_host_page_size;
8205ff7258cSRichard Henderson     size_t region_size;
82122c6a993SRichard Henderson     int have_prot, need_prot;
8225ff7258cSRichard Henderson 
823ba22783dSRichard Henderson     /* Size the buffer.  */
824ba22783dSRichard Henderson     if (tb_size == 0) {
825ba22783dSRichard Henderson         size_t phys_mem = qemu_get_host_physmem();
826ba22783dSRichard Henderson         if (phys_mem == 0) {
827ba22783dSRichard Henderson             tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
828ba22783dSRichard Henderson         } else {
829ba22783dSRichard Henderson             tb_size = QEMU_ALIGN_DOWN(phys_mem / 8, page_size);
830ba22783dSRichard Henderson             tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, tb_size);
831ba22783dSRichard Henderson         }
832ba22783dSRichard Henderson     }
833ba22783dSRichard Henderson     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
834ba22783dSRichard Henderson         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
835ba22783dSRichard Henderson     }
836ba22783dSRichard Henderson     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
837ba22783dSRichard Henderson         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
838ba22783dSRichard Henderson     }
839ba22783dSRichard Henderson 
840ba22783dSRichard Henderson     have_prot = alloc_code_gen_buffer(tb_size, splitwx, &error_fatal);
8417be9ebcfSRichard Henderson     assert(have_prot >= 0);
842c46184a9SRichard Henderson 
843cd9ea992SRichard Henderson     /* Request large pages for the buffer and the splitwx.  */
844cd9ea992SRichard Henderson     qemu_madvise(region.start_aligned, region.total_size, QEMU_MADV_HUGEPAGE);
845cd9ea992SRichard Henderson     if (tcg_splitwx_diff) {
846cd9ea992SRichard Henderson         qemu_madvise(region.start_aligned + tcg_splitwx_diff,
847cd9ea992SRichard Henderson                      region.total_size, QEMU_MADV_HUGEPAGE);
848cd9ea992SRichard Henderson     }
849cd9ea992SRichard Henderson 
8505ff7258cSRichard Henderson     /*
8515ff7258cSRichard Henderson      * Make region_size a multiple of page_size, using aligned as the start.
8525ff7258cSRichard Henderson      * As a result of this we might end up with a few extra pages at the end of
8535ff7258cSRichard Henderson      * the buffer; we will assign those to the last region.
8545ff7258cSRichard Henderson      */
855ba22783dSRichard Henderson     region.n = tcg_n_regions(tb_size, max_cpus);
856ba22783dSRichard Henderson     region_size = tb_size / region.n;
8575ff7258cSRichard Henderson     region_size = QEMU_ALIGN_DOWN(region_size, page_size);
8585ff7258cSRichard Henderson 
8595ff7258cSRichard Henderson     /* A region must have at least 2 pages; one code, one guard */
8605ff7258cSRichard Henderson     g_assert(region_size >= 2 * page_size);
861032a4b1bSRichard Henderson     region.stride = region_size;
862032a4b1bSRichard Henderson 
863032a4b1bSRichard Henderson     /* Reserve space for guard pages. */
864032a4b1bSRichard Henderson     region.size = region_size - page_size;
865032a4b1bSRichard Henderson     region.total_size -= page_size;
866032a4b1bSRichard Henderson 
867032a4b1bSRichard Henderson     /*
868032a4b1bSRichard Henderson      * The first region will be smaller than the others, via the prologue,
869032a4b1bSRichard Henderson      * which has yet to be allocated.  For now, the first region begins at
870032a4b1bSRichard Henderson      * the page boundary.
871032a4b1bSRichard Henderson      */
872032a4b1bSRichard Henderson     region.after_prologue = region.start_aligned;
8735ff7258cSRichard Henderson 
8745ff7258cSRichard Henderson     /* init the region struct */
8755ff7258cSRichard Henderson     qemu_mutex_init(&region.lock);
8765ff7258cSRichard Henderson 
8775ff7258cSRichard Henderson     /*
8785ff7258cSRichard Henderson      * Set guard pages in the rw buffer, as that's the one into which
8795ff7258cSRichard Henderson      * buffer overruns could occur.  Do not set guard pages in the rx
8805ff7258cSRichard Henderson      * buffer -- let that one use hugepages throughout.
88122c6a993SRichard Henderson      * Work with the page protections set up with the initial mapping.
8825ff7258cSRichard Henderson      */
88322c6a993SRichard Henderson     need_prot = PAGE_READ | PAGE_WRITE;
88422c6a993SRichard Henderson #ifndef CONFIG_TCG_INTERPRETER
88522c6a993SRichard Henderson     if (tcg_splitwx_diff == 0) {
88622c6a993SRichard Henderson         need_prot |= PAGE_EXEC;
88722c6a993SRichard Henderson     }
88822c6a993SRichard Henderson #endif
88922c6a993SRichard Henderson     for (size_t i = 0, n = region.n; i < n; i++) {
8905ff7258cSRichard Henderson         void *start, *end;
8915ff7258cSRichard Henderson 
8925ff7258cSRichard Henderson         tcg_region_bounds(i, &start, &end);
89322c6a993SRichard Henderson         if (have_prot != need_prot) {
89422c6a993SRichard Henderson             int rc;
8955ff7258cSRichard Henderson 
89622c6a993SRichard Henderson             if (need_prot == (PAGE_READ | PAGE_WRITE | PAGE_EXEC)) {
89722c6a993SRichard Henderson                 rc = qemu_mprotect_rwx(start, end - start);
89822c6a993SRichard Henderson             } else if (need_prot == (PAGE_READ | PAGE_WRITE)) {
89922c6a993SRichard Henderson                 rc = qemu_mprotect_rw(start, end - start);
90022c6a993SRichard Henderson             } else {
90122c6a993SRichard Henderson                 g_assert_not_reached();
90222c6a993SRichard Henderson             }
90322c6a993SRichard Henderson             if (rc) {
90422c6a993SRichard Henderson                 error_setg_errno(&error_fatal, errno,
90522c6a993SRichard Henderson                                  "mprotect of jit buffer");
90622c6a993SRichard Henderson             }
90722c6a993SRichard Henderson         }
90822c6a993SRichard Henderson         if (have_prot != 0) {
909b7da02daSRichard Henderson             /* Guard pages are nice for bug detection but are not essential. */
9105ff7258cSRichard Henderson             (void)qemu_mprotect_none(end, page_size);
9115ff7258cSRichard Henderson         }
91222c6a993SRichard Henderson     }
9135ff7258cSRichard Henderson 
9145ff7258cSRichard Henderson     tcg_region_trees_init();
9155ff7258cSRichard Henderson 
9165ff7258cSRichard Henderson     /*
9175ff7258cSRichard Henderson      * Leave the initial context initialized to the first region.
9185ff7258cSRichard Henderson      * This will be the context into which we generate the prologue.
9195ff7258cSRichard Henderson      * It is also the only context for CONFIG_USER_ONLY.
9205ff7258cSRichard Henderson      */
9215ff7258cSRichard Henderson     tcg_region_initial_alloc__locked(&tcg_init_ctx);
9225ff7258cSRichard Henderson }
9235ff7258cSRichard Henderson 
9245ff7258cSRichard Henderson void tcg_region_prologue_set(TCGContext *s)
9255ff7258cSRichard Henderson {
9265ff7258cSRichard Henderson     /* Deduct the prologue from the first region.  */
927c2471ca0SRichard Henderson     g_assert(region.start_aligned == s->code_gen_buffer);
928c2471ca0SRichard Henderson     region.after_prologue = s->code_ptr;
9295ff7258cSRichard Henderson 
9305ff7258cSRichard Henderson     /* Recompute boundaries of the first region. */
9315ff7258cSRichard Henderson     tcg_region_assign(s, 0);
9325ff7258cSRichard Henderson 
9335ff7258cSRichard Henderson     /* Register the balance of the buffer with gdb. */
934c2471ca0SRichard Henderson     tcg_register_jit(tcg_splitwx_to_rx(region.after_prologue),
935c2471ca0SRichard Henderson                      region.start_aligned + region.total_size -
936c2471ca0SRichard Henderson                      region.after_prologue);
9375ff7258cSRichard Henderson }
9385ff7258cSRichard Henderson 
9395ff7258cSRichard Henderson /*
9405ff7258cSRichard Henderson  * Returns the size (in bytes) of all translated code (i.e. from all regions)
9415ff7258cSRichard Henderson  * currently in the cache.
9425ff7258cSRichard Henderson  * See also: tcg_code_capacity()
9435ff7258cSRichard Henderson  * Do not confuse with tcg_current_code_size(); that one applies to a single
9445ff7258cSRichard Henderson  * TCG context.
9455ff7258cSRichard Henderson  */
9465ff7258cSRichard Henderson size_t tcg_code_size(void)
9475ff7258cSRichard Henderson {
9480e2d61cfSRichard Henderson     unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
9495ff7258cSRichard Henderson     unsigned int i;
9505ff7258cSRichard Henderson     size_t total;
9515ff7258cSRichard Henderson 
9525ff7258cSRichard Henderson     qemu_mutex_lock(&region.lock);
9535ff7258cSRichard Henderson     total = region.agg_size_full;
9545ff7258cSRichard Henderson     for (i = 0; i < n_ctxs; i++) {
9555ff7258cSRichard Henderson         const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
9565ff7258cSRichard Henderson         size_t size;
9575ff7258cSRichard Henderson 
9585ff7258cSRichard Henderson         size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
9595ff7258cSRichard Henderson         g_assert(size <= s->code_gen_buffer_size);
9605ff7258cSRichard Henderson         total += size;
9615ff7258cSRichard Henderson     }
9625ff7258cSRichard Henderson     qemu_mutex_unlock(&region.lock);
9635ff7258cSRichard Henderson     return total;
9645ff7258cSRichard Henderson }
9655ff7258cSRichard Henderson 
9665ff7258cSRichard Henderson /*
9675ff7258cSRichard Henderson  * Returns the code capacity (in bytes) of the entire cache, i.e. including all
9685ff7258cSRichard Henderson  * regions.
9695ff7258cSRichard Henderson  * See also: tcg_code_size()
9705ff7258cSRichard Henderson  */
9715ff7258cSRichard Henderson size_t tcg_code_capacity(void)
9725ff7258cSRichard Henderson {
9735ff7258cSRichard Henderson     size_t guard_size, capacity;
9745ff7258cSRichard Henderson 
9755ff7258cSRichard Henderson     /* no need for synchronization; these variables are set at init time */
9765ff7258cSRichard Henderson     guard_size = region.stride - region.size;
97777bd7fd1SRichard Henderson     capacity = region.total_size;
97877bd7fd1SRichard Henderson     capacity -= (region.n - 1) * guard_size;
97977bd7fd1SRichard Henderson     capacity -= region.n * TCG_HIGHWATER;
98077bd7fd1SRichard Henderson 
9815ff7258cSRichard Henderson     return capacity;
9825ff7258cSRichard Henderson }
9835ff7258cSRichard Henderson 
9845ff7258cSRichard Henderson size_t tcg_tb_phys_invalidate_count(void)
9855ff7258cSRichard Henderson {
9860e2d61cfSRichard Henderson     unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
9875ff7258cSRichard Henderson     unsigned int i;
9885ff7258cSRichard Henderson     size_t total = 0;
9895ff7258cSRichard Henderson 
9905ff7258cSRichard Henderson     for (i = 0; i < n_ctxs; i++) {
9915ff7258cSRichard Henderson         const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
9925ff7258cSRichard Henderson 
9935ff7258cSRichard Henderson         total += qatomic_read(&s->tb_phys_invalidate_count);
9945ff7258cSRichard Henderson     }
9955ff7258cSRichard Henderson     return total;
9965ff7258cSRichard Henderson }
997