15ff7258cSRichard Henderson /* 25ff7258cSRichard Henderson * Memory region management for Tiny Code Generator for QEMU 35ff7258cSRichard Henderson * 45ff7258cSRichard Henderson * Copyright (c) 2008 Fabrice Bellard 55ff7258cSRichard Henderson * 65ff7258cSRichard Henderson * Permission is hereby granted, free of charge, to any person obtaining a copy 75ff7258cSRichard Henderson * of this software and associated documentation files (the "Software"), to deal 85ff7258cSRichard Henderson * in the Software without restriction, including without limitation the rights 95ff7258cSRichard Henderson * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 105ff7258cSRichard Henderson * copies of the Software, and to permit persons to whom the Software is 115ff7258cSRichard Henderson * furnished to do so, subject to the following conditions: 125ff7258cSRichard Henderson * 135ff7258cSRichard Henderson * The above copyright notice and this permission notice shall be included in 145ff7258cSRichard Henderson * all copies or substantial portions of the Software. 155ff7258cSRichard Henderson * 165ff7258cSRichard Henderson * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 175ff7258cSRichard Henderson * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 185ff7258cSRichard Henderson * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 195ff7258cSRichard Henderson * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 205ff7258cSRichard Henderson * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 215ff7258cSRichard Henderson * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 225ff7258cSRichard Henderson * THE SOFTWARE. 235ff7258cSRichard Henderson */ 245ff7258cSRichard Henderson 255ff7258cSRichard Henderson #include "qemu/osdep.h" 26c46184a9SRichard Henderson #include "qemu/units.h" 27c46184a9SRichard Henderson #include "qapi/error.h" 285ff7258cSRichard Henderson #include "exec/exec-all.h" 295ff7258cSRichard Henderson #include "tcg/tcg.h" 305ff7258cSRichard Henderson #include "tcg-internal.h" 315ff7258cSRichard Henderson 325ff7258cSRichard Henderson 335ff7258cSRichard Henderson struct tcg_region_tree { 345ff7258cSRichard Henderson QemuMutex lock; 355ff7258cSRichard Henderson GTree *tree; 365ff7258cSRichard Henderson /* padding to avoid false sharing is computed at run-time */ 375ff7258cSRichard Henderson }; 385ff7258cSRichard Henderson 395ff7258cSRichard Henderson /* 405ff7258cSRichard Henderson * We divide code_gen_buffer into equally-sized "regions" that TCG threads 415ff7258cSRichard Henderson * dynamically allocate from as demand dictates. Given appropriate region 425ff7258cSRichard Henderson * sizing, this minimizes flushes even when some TCG threads generate a lot 435ff7258cSRichard Henderson * more code than others. 445ff7258cSRichard Henderson */ 455ff7258cSRichard Henderson struct tcg_region_state { 465ff7258cSRichard Henderson QemuMutex lock; 475ff7258cSRichard Henderson 485ff7258cSRichard Henderson /* fields set at init time */ 495ff7258cSRichard Henderson void *start_aligned; 50c2471ca0SRichard Henderson void *after_prologue; 515ff7258cSRichard Henderson size_t n; 525ff7258cSRichard Henderson size_t size; /* size of one region */ 535ff7258cSRichard Henderson size_t stride; /* .size + guard size */ 5477bd7fd1SRichard Henderson size_t total_size; /* size of entire buffer, >= n * stride */ 555ff7258cSRichard Henderson 565ff7258cSRichard Henderson /* fields protected by the lock */ 575ff7258cSRichard Henderson size_t current; /* current region index */ 585ff7258cSRichard Henderson size_t agg_size_full; /* aggregate size of full regions */ 595ff7258cSRichard Henderson }; 605ff7258cSRichard Henderson 615ff7258cSRichard Henderson static struct tcg_region_state region; 625ff7258cSRichard Henderson 635ff7258cSRichard Henderson /* 645ff7258cSRichard Henderson * This is an array of struct tcg_region_tree's, with padding. 655ff7258cSRichard Henderson * We use void * to simplify the computation of region_trees[i]; each 665ff7258cSRichard Henderson * struct is found every tree_size bytes. 675ff7258cSRichard Henderson */ 685ff7258cSRichard Henderson static void *region_trees; 695ff7258cSRichard Henderson static size_t tree_size; 705ff7258cSRichard Henderson 7147d590dfSRichard Henderson bool in_code_gen_buffer(const void *p) 7247d590dfSRichard Henderson { 7347d590dfSRichard Henderson /* 7447d590dfSRichard Henderson * Much like it is valid to have a pointer to the byte past the 7547d590dfSRichard Henderson * end of an array (so long as you don't dereference it), allow 7647d590dfSRichard Henderson * a pointer to the byte past the end of the code gen buffer. 7747d590dfSRichard Henderson */ 78032a4b1bSRichard Henderson return (size_t)(p - region.start_aligned) <= region.total_size; 7947d590dfSRichard Henderson } 8047d590dfSRichard Henderson 8147d590dfSRichard Henderson #ifdef CONFIG_DEBUG_TCG 8247d590dfSRichard Henderson const void *tcg_splitwx_to_rx(void *rw) 8347d590dfSRichard Henderson { 8447d590dfSRichard Henderson /* Pass NULL pointers unchanged. */ 8547d590dfSRichard Henderson if (rw) { 8647d590dfSRichard Henderson g_assert(in_code_gen_buffer(rw)); 8747d590dfSRichard Henderson rw += tcg_splitwx_diff; 8847d590dfSRichard Henderson } 8947d590dfSRichard Henderson return rw; 9047d590dfSRichard Henderson } 9147d590dfSRichard Henderson 9247d590dfSRichard Henderson void *tcg_splitwx_to_rw(const void *rx) 9347d590dfSRichard Henderson { 9447d590dfSRichard Henderson /* Pass NULL pointers unchanged. */ 9547d590dfSRichard Henderson if (rx) { 9647d590dfSRichard Henderson rx -= tcg_splitwx_diff; 9747d590dfSRichard Henderson /* Assert that we end with a pointer in the rw region. */ 9847d590dfSRichard Henderson g_assert(in_code_gen_buffer(rx)); 9947d590dfSRichard Henderson } 10047d590dfSRichard Henderson return (void *)rx; 10147d590dfSRichard Henderson } 10247d590dfSRichard Henderson #endif /* CONFIG_DEBUG_TCG */ 10347d590dfSRichard Henderson 1045ff7258cSRichard Henderson /* compare a pointer @ptr and a tb_tc @s */ 1055ff7258cSRichard Henderson static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s) 1065ff7258cSRichard Henderson { 1075ff7258cSRichard Henderson if (ptr >= s->ptr + s->size) { 1085ff7258cSRichard Henderson return 1; 1095ff7258cSRichard Henderson } else if (ptr < s->ptr) { 1105ff7258cSRichard Henderson return -1; 1115ff7258cSRichard Henderson } 1125ff7258cSRichard Henderson return 0; 1135ff7258cSRichard Henderson } 1145ff7258cSRichard Henderson 1155ff7258cSRichard Henderson static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp) 1165ff7258cSRichard Henderson { 1175ff7258cSRichard Henderson const struct tb_tc *a = ap; 1185ff7258cSRichard Henderson const struct tb_tc *b = bp; 1195ff7258cSRichard Henderson 1205ff7258cSRichard Henderson /* 1215ff7258cSRichard Henderson * When both sizes are set, we know this isn't a lookup. 1225ff7258cSRichard Henderson * This is the most likely case: every TB must be inserted; lookups 1235ff7258cSRichard Henderson * are a lot less frequent. 1245ff7258cSRichard Henderson */ 1255ff7258cSRichard Henderson if (likely(a->size && b->size)) { 1265ff7258cSRichard Henderson if (a->ptr > b->ptr) { 1275ff7258cSRichard Henderson return 1; 1285ff7258cSRichard Henderson } else if (a->ptr < b->ptr) { 1295ff7258cSRichard Henderson return -1; 1305ff7258cSRichard Henderson } 1315ff7258cSRichard Henderson /* a->ptr == b->ptr should happen only on deletions */ 1325ff7258cSRichard Henderson g_assert(a->size == b->size); 1335ff7258cSRichard Henderson return 0; 1345ff7258cSRichard Henderson } 1355ff7258cSRichard Henderson /* 1365ff7258cSRichard Henderson * All lookups have either .size field set to 0. 1375ff7258cSRichard Henderson * From the glib sources we see that @ap is always the lookup key. However 1385ff7258cSRichard Henderson * the docs provide no guarantee, so we just mark this case as likely. 1395ff7258cSRichard Henderson */ 1405ff7258cSRichard Henderson if (likely(a->size == 0)) { 1415ff7258cSRichard Henderson return ptr_cmp_tb_tc(a->ptr, b); 1425ff7258cSRichard Henderson } 1435ff7258cSRichard Henderson return ptr_cmp_tb_tc(b->ptr, a); 1445ff7258cSRichard Henderson } 1455ff7258cSRichard Henderson 1465ff7258cSRichard Henderson static void tcg_region_trees_init(void) 1475ff7258cSRichard Henderson { 1485ff7258cSRichard Henderson size_t i; 1495ff7258cSRichard Henderson 1505ff7258cSRichard Henderson tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize); 1515ff7258cSRichard Henderson region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size); 1525ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 1535ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 1545ff7258cSRichard Henderson 1555ff7258cSRichard Henderson qemu_mutex_init(&rt->lock); 1565ff7258cSRichard Henderson rt->tree = g_tree_new(tb_tc_cmp); 1575ff7258cSRichard Henderson } 1585ff7258cSRichard Henderson } 1595ff7258cSRichard Henderson 1605ff7258cSRichard Henderson static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p) 1615ff7258cSRichard Henderson { 1625ff7258cSRichard Henderson size_t region_idx; 1635ff7258cSRichard Henderson 1645ff7258cSRichard Henderson /* 1655ff7258cSRichard Henderson * Like tcg_splitwx_to_rw, with no assert. The pc may come from 1665ff7258cSRichard Henderson * a signal handler over which the caller has no control. 1675ff7258cSRichard Henderson */ 1685ff7258cSRichard Henderson if (!in_code_gen_buffer(p)) { 1695ff7258cSRichard Henderson p -= tcg_splitwx_diff; 1705ff7258cSRichard Henderson if (!in_code_gen_buffer(p)) { 1715ff7258cSRichard Henderson return NULL; 1725ff7258cSRichard Henderson } 1735ff7258cSRichard Henderson } 1745ff7258cSRichard Henderson 1755ff7258cSRichard Henderson if (p < region.start_aligned) { 1765ff7258cSRichard Henderson region_idx = 0; 1775ff7258cSRichard Henderson } else { 1785ff7258cSRichard Henderson ptrdiff_t offset = p - region.start_aligned; 1795ff7258cSRichard Henderson 1805ff7258cSRichard Henderson if (offset > region.stride * (region.n - 1)) { 1815ff7258cSRichard Henderson region_idx = region.n - 1; 1825ff7258cSRichard Henderson } else { 1835ff7258cSRichard Henderson region_idx = offset / region.stride; 1845ff7258cSRichard Henderson } 1855ff7258cSRichard Henderson } 1865ff7258cSRichard Henderson return region_trees + region_idx * tree_size; 1875ff7258cSRichard Henderson } 1885ff7258cSRichard Henderson 1895ff7258cSRichard Henderson void tcg_tb_insert(TranslationBlock *tb) 1905ff7258cSRichard Henderson { 1915ff7258cSRichard Henderson struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); 1925ff7258cSRichard Henderson 1935ff7258cSRichard Henderson g_assert(rt != NULL); 1945ff7258cSRichard Henderson qemu_mutex_lock(&rt->lock); 1955ff7258cSRichard Henderson g_tree_insert(rt->tree, &tb->tc, tb); 1965ff7258cSRichard Henderson qemu_mutex_unlock(&rt->lock); 1975ff7258cSRichard Henderson } 1985ff7258cSRichard Henderson 1995ff7258cSRichard Henderson void tcg_tb_remove(TranslationBlock *tb) 2005ff7258cSRichard Henderson { 2015ff7258cSRichard Henderson struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); 2025ff7258cSRichard Henderson 2035ff7258cSRichard Henderson g_assert(rt != NULL); 2045ff7258cSRichard Henderson qemu_mutex_lock(&rt->lock); 2055ff7258cSRichard Henderson g_tree_remove(rt->tree, &tb->tc); 2065ff7258cSRichard Henderson qemu_mutex_unlock(&rt->lock); 2075ff7258cSRichard Henderson } 2085ff7258cSRichard Henderson 2095ff7258cSRichard Henderson /* 2105ff7258cSRichard Henderson * Find the TB 'tb' such that 2115ff7258cSRichard Henderson * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size 2125ff7258cSRichard Henderson * Return NULL if not found. 2135ff7258cSRichard Henderson */ 2145ff7258cSRichard Henderson TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr) 2155ff7258cSRichard Henderson { 2165ff7258cSRichard Henderson struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr); 2175ff7258cSRichard Henderson TranslationBlock *tb; 2185ff7258cSRichard Henderson struct tb_tc s = { .ptr = (void *)tc_ptr }; 2195ff7258cSRichard Henderson 2205ff7258cSRichard Henderson if (rt == NULL) { 2215ff7258cSRichard Henderson return NULL; 2225ff7258cSRichard Henderson } 2235ff7258cSRichard Henderson 2245ff7258cSRichard Henderson qemu_mutex_lock(&rt->lock); 2255ff7258cSRichard Henderson tb = g_tree_lookup(rt->tree, &s); 2265ff7258cSRichard Henderson qemu_mutex_unlock(&rt->lock); 2275ff7258cSRichard Henderson return tb; 2285ff7258cSRichard Henderson } 2295ff7258cSRichard Henderson 2305ff7258cSRichard Henderson static void tcg_region_tree_lock_all(void) 2315ff7258cSRichard Henderson { 2325ff7258cSRichard Henderson size_t i; 2335ff7258cSRichard Henderson 2345ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 2355ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 2365ff7258cSRichard Henderson 2375ff7258cSRichard Henderson qemu_mutex_lock(&rt->lock); 2385ff7258cSRichard Henderson } 2395ff7258cSRichard Henderson } 2405ff7258cSRichard Henderson 2415ff7258cSRichard Henderson static void tcg_region_tree_unlock_all(void) 2425ff7258cSRichard Henderson { 2435ff7258cSRichard Henderson size_t i; 2445ff7258cSRichard Henderson 2455ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 2465ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 2475ff7258cSRichard Henderson 2485ff7258cSRichard Henderson qemu_mutex_unlock(&rt->lock); 2495ff7258cSRichard Henderson } 2505ff7258cSRichard Henderson } 2515ff7258cSRichard Henderson 2525ff7258cSRichard Henderson void tcg_tb_foreach(GTraverseFunc func, gpointer user_data) 2535ff7258cSRichard Henderson { 2545ff7258cSRichard Henderson size_t i; 2555ff7258cSRichard Henderson 2565ff7258cSRichard Henderson tcg_region_tree_lock_all(); 2575ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 2585ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 2595ff7258cSRichard Henderson 2605ff7258cSRichard Henderson g_tree_foreach(rt->tree, func, user_data); 2615ff7258cSRichard Henderson } 2625ff7258cSRichard Henderson tcg_region_tree_unlock_all(); 2635ff7258cSRichard Henderson } 2645ff7258cSRichard Henderson 2655ff7258cSRichard Henderson size_t tcg_nb_tbs(void) 2665ff7258cSRichard Henderson { 2675ff7258cSRichard Henderson size_t nb_tbs = 0; 2685ff7258cSRichard Henderson size_t i; 2695ff7258cSRichard Henderson 2705ff7258cSRichard Henderson tcg_region_tree_lock_all(); 2715ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 2725ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 2735ff7258cSRichard Henderson 2745ff7258cSRichard Henderson nb_tbs += g_tree_nnodes(rt->tree); 2755ff7258cSRichard Henderson } 2765ff7258cSRichard Henderson tcg_region_tree_unlock_all(); 2775ff7258cSRichard Henderson return nb_tbs; 2785ff7258cSRichard Henderson } 2795ff7258cSRichard Henderson 2805ff7258cSRichard Henderson static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data) 2815ff7258cSRichard Henderson { 2825ff7258cSRichard Henderson TranslationBlock *tb = v; 2835ff7258cSRichard Henderson 2845ff7258cSRichard Henderson tb_destroy(tb); 2855ff7258cSRichard Henderson return FALSE; 2865ff7258cSRichard Henderson } 2875ff7258cSRichard Henderson 2885ff7258cSRichard Henderson static void tcg_region_tree_reset_all(void) 2895ff7258cSRichard Henderson { 2905ff7258cSRichard Henderson size_t i; 2915ff7258cSRichard Henderson 2925ff7258cSRichard Henderson tcg_region_tree_lock_all(); 2935ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 2945ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 2955ff7258cSRichard Henderson 2965ff7258cSRichard Henderson g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL); 2975ff7258cSRichard Henderson /* Increment the refcount first so that destroy acts as a reset */ 2985ff7258cSRichard Henderson g_tree_ref(rt->tree); 2995ff7258cSRichard Henderson g_tree_destroy(rt->tree); 3005ff7258cSRichard Henderson } 3015ff7258cSRichard Henderson tcg_region_tree_unlock_all(); 3025ff7258cSRichard Henderson } 3035ff7258cSRichard Henderson 3045ff7258cSRichard Henderson static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend) 3055ff7258cSRichard Henderson { 3065ff7258cSRichard Henderson void *start, *end; 3075ff7258cSRichard Henderson 3085ff7258cSRichard Henderson start = region.start_aligned + curr_region * region.stride; 3095ff7258cSRichard Henderson end = start + region.size; 3105ff7258cSRichard Henderson 3115ff7258cSRichard Henderson if (curr_region == 0) { 312c2471ca0SRichard Henderson start = region.after_prologue; 3135ff7258cSRichard Henderson } 31477bd7fd1SRichard Henderson /* The final region may have a few extra pages due to earlier rounding. */ 3155ff7258cSRichard Henderson if (curr_region == region.n - 1) { 31677bd7fd1SRichard Henderson end = region.start_aligned + region.total_size; 3175ff7258cSRichard Henderson } 3185ff7258cSRichard Henderson 3195ff7258cSRichard Henderson *pstart = start; 3205ff7258cSRichard Henderson *pend = end; 3215ff7258cSRichard Henderson } 3225ff7258cSRichard Henderson 3235ff7258cSRichard Henderson static void tcg_region_assign(TCGContext *s, size_t curr_region) 3245ff7258cSRichard Henderson { 3255ff7258cSRichard Henderson void *start, *end; 3265ff7258cSRichard Henderson 3275ff7258cSRichard Henderson tcg_region_bounds(curr_region, &start, &end); 3285ff7258cSRichard Henderson 3295ff7258cSRichard Henderson s->code_gen_buffer = start; 3305ff7258cSRichard Henderson s->code_gen_ptr = start; 3315ff7258cSRichard Henderson s->code_gen_buffer_size = end - start; 3325ff7258cSRichard Henderson s->code_gen_highwater = end - TCG_HIGHWATER; 3335ff7258cSRichard Henderson } 3345ff7258cSRichard Henderson 3355ff7258cSRichard Henderson static bool tcg_region_alloc__locked(TCGContext *s) 3365ff7258cSRichard Henderson { 3375ff7258cSRichard Henderson if (region.current == region.n) { 3385ff7258cSRichard Henderson return true; 3395ff7258cSRichard Henderson } 3405ff7258cSRichard Henderson tcg_region_assign(s, region.current); 3415ff7258cSRichard Henderson region.current++; 3425ff7258cSRichard Henderson return false; 3435ff7258cSRichard Henderson } 3445ff7258cSRichard Henderson 3455ff7258cSRichard Henderson /* 3465ff7258cSRichard Henderson * Request a new region once the one in use has filled up. 3475ff7258cSRichard Henderson * Returns true on error. 3485ff7258cSRichard Henderson */ 3495ff7258cSRichard Henderson bool tcg_region_alloc(TCGContext *s) 3505ff7258cSRichard Henderson { 3515ff7258cSRichard Henderson bool err; 3525ff7258cSRichard Henderson /* read the region size now; alloc__locked will overwrite it on success */ 3535ff7258cSRichard Henderson size_t size_full = s->code_gen_buffer_size; 3545ff7258cSRichard Henderson 3555ff7258cSRichard Henderson qemu_mutex_lock(®ion.lock); 3565ff7258cSRichard Henderson err = tcg_region_alloc__locked(s); 3575ff7258cSRichard Henderson if (!err) { 3585ff7258cSRichard Henderson region.agg_size_full += size_full - TCG_HIGHWATER; 3595ff7258cSRichard Henderson } 3605ff7258cSRichard Henderson qemu_mutex_unlock(®ion.lock); 3615ff7258cSRichard Henderson return err; 3625ff7258cSRichard Henderson } 3635ff7258cSRichard Henderson 3645ff7258cSRichard Henderson /* 3655ff7258cSRichard Henderson * Perform a context's first region allocation. 3665ff7258cSRichard Henderson * This function does _not_ increment region.agg_size_full. 3675ff7258cSRichard Henderson */ 3685ff7258cSRichard Henderson static void tcg_region_initial_alloc__locked(TCGContext *s) 3695ff7258cSRichard Henderson { 3705ff7258cSRichard Henderson bool err = tcg_region_alloc__locked(s); 3715ff7258cSRichard Henderson g_assert(!err); 3725ff7258cSRichard Henderson } 3735ff7258cSRichard Henderson 3745ff7258cSRichard Henderson void tcg_region_initial_alloc(TCGContext *s) 3755ff7258cSRichard Henderson { 3765ff7258cSRichard Henderson qemu_mutex_lock(®ion.lock); 3775ff7258cSRichard Henderson tcg_region_initial_alloc__locked(s); 3785ff7258cSRichard Henderson qemu_mutex_unlock(®ion.lock); 3795ff7258cSRichard Henderson } 3805ff7258cSRichard Henderson 3815ff7258cSRichard Henderson /* Call from a safe-work context */ 3825ff7258cSRichard Henderson void tcg_region_reset_all(void) 3835ff7258cSRichard Henderson { 3840e2d61cfSRichard Henderson unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); 3855ff7258cSRichard Henderson unsigned int i; 3865ff7258cSRichard Henderson 3875ff7258cSRichard Henderson qemu_mutex_lock(®ion.lock); 3885ff7258cSRichard Henderson region.current = 0; 3895ff7258cSRichard Henderson region.agg_size_full = 0; 3905ff7258cSRichard Henderson 3915ff7258cSRichard Henderson for (i = 0; i < n_ctxs; i++) { 3925ff7258cSRichard Henderson TCGContext *s = qatomic_read(&tcg_ctxs[i]); 3935ff7258cSRichard Henderson tcg_region_initial_alloc__locked(s); 3945ff7258cSRichard Henderson } 3955ff7258cSRichard Henderson qemu_mutex_unlock(®ion.lock); 3965ff7258cSRichard Henderson 3975ff7258cSRichard Henderson tcg_region_tree_reset_all(); 3985ff7258cSRichard Henderson } 3995ff7258cSRichard Henderson 40001afda99SRichard Henderson static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus) 4015ff7258cSRichard Henderson { 40243b972b7SRichard Henderson #ifdef CONFIG_USER_ONLY 4035ff7258cSRichard Henderson return 1; 4045ff7258cSRichard Henderson #else 40501afda99SRichard Henderson size_t n_regions; 40601afda99SRichard Henderson 4075ff7258cSRichard Henderson /* 40843b972b7SRichard Henderson * It is likely that some vCPUs will translate more code than others, 40943b972b7SRichard Henderson * so we first try to set more regions than max_cpus, with those regions 41043b972b7SRichard Henderson * being of reasonable size. If that's not possible we make do by evenly 41143b972b7SRichard Henderson * dividing the code_gen_buffer among the vCPUs. 4125ff7258cSRichard Henderson */ 4135ff7258cSRichard Henderson /* Use a single region if all we have is one vCPU thread */ 4145ff7258cSRichard Henderson if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) { 4155ff7258cSRichard Henderson return 1; 4165ff7258cSRichard Henderson } 4175ff7258cSRichard Henderson 41801afda99SRichard Henderson /* 41901afda99SRichard Henderson * Try to have more regions than max_cpus, with each region being >= 2 MB. 42001afda99SRichard Henderson * If we can't, then just allocate one region per vCPU thread. 42101afda99SRichard Henderson */ 42201afda99SRichard Henderson n_regions = tb_size / (2 * MiB); 42301afda99SRichard Henderson if (n_regions <= max_cpus) { 4245ff7258cSRichard Henderson return max_cpus; 42501afda99SRichard Henderson } 42601afda99SRichard Henderson return MIN(n_regions, max_cpus * 8); 4275ff7258cSRichard Henderson #endif 42843b972b7SRichard Henderson } 4295ff7258cSRichard Henderson 4305ff7258cSRichard Henderson /* 431c46184a9SRichard Henderson * Minimum size of the code gen buffer. This number is randomly chosen, 432c46184a9SRichard Henderson * but not so small that we can't have a fair number of TB's live. 43326a75d12SRichard Henderson * 43426a75d12SRichard Henderson * Maximum size, MAX_CODE_GEN_BUFFER_SIZE, is defined in tcg-target.h. 43526a75d12SRichard Henderson * Unless otherwise indicated, this is constrained by the range of 43626a75d12SRichard Henderson * direct branches on the host cpu, as used by the TCG implementation 43726a75d12SRichard Henderson * of goto_tb. 438c46184a9SRichard Henderson */ 439c46184a9SRichard Henderson #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB) 440c46184a9SRichard Henderson 441c46184a9SRichard Henderson #if TCG_TARGET_REG_BITS == 32 442c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB) 443c46184a9SRichard Henderson #ifdef CONFIG_USER_ONLY 444c46184a9SRichard Henderson /* 445c46184a9SRichard Henderson * For user mode on smaller 32 bit systems we may run into trouble 446c46184a9SRichard Henderson * allocating big chunks of data in the right place. On these systems 447c46184a9SRichard Henderson * we utilise a static code generation buffer directly in the binary. 448c46184a9SRichard Henderson */ 449c46184a9SRichard Henderson #define USE_STATIC_CODE_GEN_BUFFER 450c46184a9SRichard Henderson #endif 451c46184a9SRichard Henderson #else /* TCG_TARGET_REG_BITS == 64 */ 452c46184a9SRichard Henderson #ifdef CONFIG_USER_ONLY 453c46184a9SRichard Henderson /* 454c46184a9SRichard Henderson * As user-mode emulation typically means running multiple instances 455c46184a9SRichard Henderson * of the translator don't go too nuts with our default code gen 456c46184a9SRichard Henderson * buffer lest we make things too hard for the OS. 457c46184a9SRichard Henderson */ 458c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB) 459c46184a9SRichard Henderson #else 460c46184a9SRichard Henderson /* 461c46184a9SRichard Henderson * We expect most system emulation to run one or two guests per host. 462c46184a9SRichard Henderson * Users running large scale system emulation may want to tweak their 463c46184a9SRichard Henderson * runtime setup via the tb-size control on the command line. 464c46184a9SRichard Henderson */ 465c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB) 466c46184a9SRichard Henderson #endif 467c46184a9SRichard Henderson #endif 468c46184a9SRichard Henderson 469c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE \ 470c46184a9SRichard Henderson (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ 471c46184a9SRichard Henderson ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) 472c46184a9SRichard Henderson 473c46184a9SRichard Henderson #ifdef __mips__ 474c46184a9SRichard Henderson /* 475c46184a9SRichard Henderson * In order to use J and JAL within the code_gen_buffer, we require 476c46184a9SRichard Henderson * that the buffer not cross a 256MB boundary. 477c46184a9SRichard Henderson */ 478c46184a9SRichard Henderson static inline bool cross_256mb(void *addr, size_t size) 479c46184a9SRichard Henderson { 480c46184a9SRichard Henderson return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; 481c46184a9SRichard Henderson } 482c46184a9SRichard Henderson 483c46184a9SRichard Henderson /* 484c46184a9SRichard Henderson * We weren't able to allocate a buffer without crossing that boundary, 485c46184a9SRichard Henderson * so make do with the larger portion of the buffer that doesn't cross. 486a4df1b2dSRichard Henderson * Returns the new base and size of the buffer in *obuf and *osize. 487c46184a9SRichard Henderson */ 488a4df1b2dSRichard Henderson static inline void split_cross_256mb(void **obuf, size_t *osize, 489a4df1b2dSRichard Henderson void *buf1, size_t size1) 490c46184a9SRichard Henderson { 491c46184a9SRichard Henderson void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); 492c46184a9SRichard Henderson size_t size2 = buf1 + size1 - buf2; 493c46184a9SRichard Henderson 494c46184a9SRichard Henderson size1 = buf2 - buf1; 495c46184a9SRichard Henderson if (size1 < size2) { 496c46184a9SRichard Henderson size1 = size2; 497c46184a9SRichard Henderson buf1 = buf2; 498c46184a9SRichard Henderson } 499c46184a9SRichard Henderson 500a4df1b2dSRichard Henderson *obuf = buf1; 501a4df1b2dSRichard Henderson *osize = size1; 502c46184a9SRichard Henderson } 503c46184a9SRichard Henderson #endif 504c46184a9SRichard Henderson 505c46184a9SRichard Henderson #ifdef USE_STATIC_CODE_GEN_BUFFER 506c46184a9SRichard Henderson static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] 507c46184a9SRichard Henderson __attribute__((aligned(CODE_GEN_ALIGN))); 508c46184a9SRichard Henderson 5097be9ebcfSRichard Henderson static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp) 510c46184a9SRichard Henderson { 511c46184a9SRichard Henderson void *buf, *end; 512c46184a9SRichard Henderson size_t size; 513c46184a9SRichard Henderson 514c46184a9SRichard Henderson if (splitwx > 0) { 515c46184a9SRichard Henderson error_setg(errp, "jit split-wx not supported"); 5167be9ebcfSRichard Henderson return -1; 517c46184a9SRichard Henderson } 518c46184a9SRichard Henderson 519c46184a9SRichard Henderson /* page-align the beginning and end of the buffer */ 520c46184a9SRichard Henderson buf = static_code_gen_buffer; 521c46184a9SRichard Henderson end = static_code_gen_buffer + sizeof(static_code_gen_buffer); 522c46184a9SRichard Henderson buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size); 523c46184a9SRichard Henderson end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size); 524c46184a9SRichard Henderson 525c46184a9SRichard Henderson size = end - buf; 526c46184a9SRichard Henderson 527c46184a9SRichard Henderson /* Honor a command-line option limiting the size of the buffer. */ 528c46184a9SRichard Henderson if (size > tb_size) { 529c46184a9SRichard Henderson size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size); 530c46184a9SRichard Henderson } 531c46184a9SRichard Henderson 532c46184a9SRichard Henderson #ifdef __mips__ 533c46184a9SRichard Henderson if (cross_256mb(buf, size)) { 534a4df1b2dSRichard Henderson split_cross_256mb(&buf, &size, buf, size); 535c46184a9SRichard Henderson } 536c46184a9SRichard Henderson #endif 537c46184a9SRichard Henderson 538032a4b1bSRichard Henderson region.start_aligned = buf; 539032a4b1bSRichard Henderson region.total_size = size; 5407be9ebcfSRichard Henderson 5417be9ebcfSRichard Henderson return PROT_READ | PROT_WRITE; 542c46184a9SRichard Henderson } 543c46184a9SRichard Henderson #elif defined(_WIN32) 5447be9ebcfSRichard Henderson static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) 545c46184a9SRichard Henderson { 546c46184a9SRichard Henderson void *buf; 547c46184a9SRichard Henderson 548c46184a9SRichard Henderson if (splitwx > 0) { 549c46184a9SRichard Henderson error_setg(errp, "jit split-wx not supported"); 5507be9ebcfSRichard Henderson return -1; 551c46184a9SRichard Henderson } 552c46184a9SRichard Henderson 553c46184a9SRichard Henderson buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, 554c46184a9SRichard Henderson PAGE_EXECUTE_READWRITE); 555c46184a9SRichard Henderson if (buf == NULL) { 556c46184a9SRichard Henderson error_setg_win32(errp, GetLastError(), 557c46184a9SRichard Henderson "allocate %zu bytes for jit buffer", size); 558c46184a9SRichard Henderson return false; 559c46184a9SRichard Henderson } 560c46184a9SRichard Henderson 561032a4b1bSRichard Henderson region.start_aligned = buf; 562032a4b1bSRichard Henderson region.total_size = size; 5637be9ebcfSRichard Henderson 5647be9ebcfSRichard Henderson return PAGE_READ | PAGE_WRITE | PAGE_EXEC; 565c46184a9SRichard Henderson } 566c46184a9SRichard Henderson #else 5677be9ebcfSRichard Henderson static int alloc_code_gen_buffer_anon(size_t size, int prot, 568c46184a9SRichard Henderson int flags, Error **errp) 569c46184a9SRichard Henderson { 570c46184a9SRichard Henderson void *buf; 571c46184a9SRichard Henderson 572c46184a9SRichard Henderson buf = mmap(NULL, size, prot, flags, -1, 0); 573c46184a9SRichard Henderson if (buf == MAP_FAILED) { 574c46184a9SRichard Henderson error_setg_errno(errp, errno, 575c46184a9SRichard Henderson "allocate %zu bytes for jit buffer", size); 5767be9ebcfSRichard Henderson return -1; 577c46184a9SRichard Henderson } 578c46184a9SRichard Henderson 579c46184a9SRichard Henderson #ifdef __mips__ 580c46184a9SRichard Henderson if (cross_256mb(buf, size)) { 581c46184a9SRichard Henderson /* 582c46184a9SRichard Henderson * Try again, with the original still mapped, to avoid re-acquiring 583c46184a9SRichard Henderson * the same 256mb crossing. 584c46184a9SRichard Henderson */ 585c46184a9SRichard Henderson size_t size2; 586c46184a9SRichard Henderson void *buf2 = mmap(NULL, size, prot, flags, -1, 0); 587c46184a9SRichard Henderson switch ((int)(buf2 != MAP_FAILED)) { 588c46184a9SRichard Henderson case 1: 589c46184a9SRichard Henderson if (!cross_256mb(buf2, size)) { 590c46184a9SRichard Henderson /* Success! Use the new buffer. */ 591c46184a9SRichard Henderson munmap(buf, size); 592c46184a9SRichard Henderson break; 593c46184a9SRichard Henderson } 594c46184a9SRichard Henderson /* Failure. Work with what we had. */ 595c46184a9SRichard Henderson munmap(buf2, size); 596c46184a9SRichard Henderson /* fallthru */ 597c46184a9SRichard Henderson default: 598c46184a9SRichard Henderson /* Split the original buffer. Free the smaller half. */ 599a4df1b2dSRichard Henderson split_cross_256mb(&buf2, &size2, buf, size); 600c46184a9SRichard Henderson if (buf == buf2) { 601c46184a9SRichard Henderson munmap(buf + size2, size - size2); 602c46184a9SRichard Henderson } else { 603c46184a9SRichard Henderson munmap(buf, size - size2); 604c46184a9SRichard Henderson } 605c46184a9SRichard Henderson size = size2; 606c46184a9SRichard Henderson break; 607c46184a9SRichard Henderson } 608c46184a9SRichard Henderson buf = buf2; 609c46184a9SRichard Henderson } 610c46184a9SRichard Henderson #endif 611c46184a9SRichard Henderson 612032a4b1bSRichard Henderson region.start_aligned = buf; 613032a4b1bSRichard Henderson region.total_size = size; 6147be9ebcfSRichard Henderson return prot; 615c46184a9SRichard Henderson } 616c46184a9SRichard Henderson 617c46184a9SRichard Henderson #ifndef CONFIG_TCG_INTERPRETER 618c46184a9SRichard Henderson #ifdef CONFIG_POSIX 619c46184a9SRichard Henderson #include "qemu/memfd.h" 620c46184a9SRichard Henderson 621c46184a9SRichard Henderson static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) 622c46184a9SRichard Henderson { 623c46184a9SRichard Henderson void *buf_rw = NULL, *buf_rx = MAP_FAILED; 624c46184a9SRichard Henderson int fd = -1; 625c46184a9SRichard Henderson 626c46184a9SRichard Henderson #ifdef __mips__ 627c46184a9SRichard Henderson /* Find space for the RX mapping, vs the 256MiB regions. */ 6287be9ebcfSRichard Henderson if (alloc_code_gen_buffer_anon(size, PROT_NONE, 629c46184a9SRichard Henderson MAP_PRIVATE | MAP_ANONYMOUS | 6307be9ebcfSRichard Henderson MAP_NORESERVE, errp) < 0) { 631c46184a9SRichard Henderson return false; 632c46184a9SRichard Henderson } 633c46184a9SRichard Henderson /* The size of the mapping may have been adjusted. */ 634032a4b1bSRichard Henderson buf_rx = region.start_aligned; 635032a4b1bSRichard Henderson size = region.total_size; 636c46184a9SRichard Henderson #endif 637c46184a9SRichard Henderson 638c46184a9SRichard Henderson buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp); 639c46184a9SRichard Henderson if (buf_rw == NULL) { 640c46184a9SRichard Henderson goto fail; 641c46184a9SRichard Henderson } 642c46184a9SRichard Henderson 643c46184a9SRichard Henderson #ifdef __mips__ 644c46184a9SRichard Henderson void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC, 645c46184a9SRichard Henderson MAP_SHARED | MAP_FIXED, fd, 0); 646c46184a9SRichard Henderson if (tmp != buf_rx) { 647c46184a9SRichard Henderson goto fail_rx; 648c46184a9SRichard Henderson } 649c46184a9SRichard Henderson #else 650c46184a9SRichard Henderson buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0); 651c46184a9SRichard Henderson if (buf_rx == MAP_FAILED) { 652c46184a9SRichard Henderson goto fail_rx; 653c46184a9SRichard Henderson } 654c46184a9SRichard Henderson #endif 655c46184a9SRichard Henderson 656c46184a9SRichard Henderson close(fd); 657032a4b1bSRichard Henderson region.start_aligned = buf_rw; 658032a4b1bSRichard Henderson region.total_size = size; 659c46184a9SRichard Henderson tcg_splitwx_diff = buf_rx - buf_rw; 660c46184a9SRichard Henderson 6617be9ebcfSRichard Henderson return PROT_READ | PROT_WRITE; 662c46184a9SRichard Henderson 663c46184a9SRichard Henderson fail_rx: 664c46184a9SRichard Henderson error_setg_errno(errp, errno, "failed to map shared memory for execute"); 665c46184a9SRichard Henderson fail: 666c46184a9SRichard Henderson if (buf_rx != MAP_FAILED) { 667c46184a9SRichard Henderson munmap(buf_rx, size); 668c46184a9SRichard Henderson } 669c46184a9SRichard Henderson if (buf_rw) { 670c46184a9SRichard Henderson munmap(buf_rw, size); 671c46184a9SRichard Henderson } 672c46184a9SRichard Henderson if (fd >= 0) { 673c46184a9SRichard Henderson close(fd); 674c46184a9SRichard Henderson } 6757be9ebcfSRichard Henderson return -1; 676c46184a9SRichard Henderson } 677c46184a9SRichard Henderson #endif /* CONFIG_POSIX */ 678c46184a9SRichard Henderson 679c46184a9SRichard Henderson #ifdef CONFIG_DARWIN 680c46184a9SRichard Henderson #include <mach/mach.h> 681c46184a9SRichard Henderson 682c46184a9SRichard Henderson extern kern_return_t mach_vm_remap(vm_map_t target_task, 683c46184a9SRichard Henderson mach_vm_address_t *target_address, 684c46184a9SRichard Henderson mach_vm_size_t size, 685c46184a9SRichard Henderson mach_vm_offset_t mask, 686c46184a9SRichard Henderson int flags, 687c46184a9SRichard Henderson vm_map_t src_task, 688c46184a9SRichard Henderson mach_vm_address_t src_address, 689c46184a9SRichard Henderson boolean_t copy, 690c46184a9SRichard Henderson vm_prot_t *cur_protection, 691c46184a9SRichard Henderson vm_prot_t *max_protection, 692c46184a9SRichard Henderson vm_inherit_t inheritance); 693c46184a9SRichard Henderson 6947be9ebcfSRichard Henderson static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp) 695c46184a9SRichard Henderson { 696c46184a9SRichard Henderson kern_return_t ret; 697c46184a9SRichard Henderson mach_vm_address_t buf_rw, buf_rx; 698c46184a9SRichard Henderson vm_prot_t cur_prot, max_prot; 699c46184a9SRichard Henderson 700c46184a9SRichard Henderson /* Map the read-write portion via normal anon memory. */ 701c46184a9SRichard Henderson if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE, 702c46184a9SRichard Henderson MAP_PRIVATE | MAP_ANONYMOUS, errp)) { 7037be9ebcfSRichard Henderson return -1; 704c46184a9SRichard Henderson } 705c46184a9SRichard Henderson 706032a4b1bSRichard Henderson buf_rw = (mach_vm_address_t)region.start_aligned; 707c46184a9SRichard Henderson buf_rx = 0; 708c46184a9SRichard Henderson ret = mach_vm_remap(mach_task_self(), 709c46184a9SRichard Henderson &buf_rx, 710c46184a9SRichard Henderson size, 711c46184a9SRichard Henderson 0, 712c46184a9SRichard Henderson VM_FLAGS_ANYWHERE, 713c46184a9SRichard Henderson mach_task_self(), 714c46184a9SRichard Henderson buf_rw, 715c46184a9SRichard Henderson false, 716c46184a9SRichard Henderson &cur_prot, 717c46184a9SRichard Henderson &max_prot, 718c46184a9SRichard Henderson VM_INHERIT_NONE); 719c46184a9SRichard Henderson if (ret != KERN_SUCCESS) { 720c46184a9SRichard Henderson /* TODO: Convert "ret" to a human readable error message. */ 721c46184a9SRichard Henderson error_setg(errp, "vm_remap for jit splitwx failed"); 722c46184a9SRichard Henderson munmap((void *)buf_rw, size); 7237be9ebcfSRichard Henderson return -1; 724c46184a9SRichard Henderson } 725c46184a9SRichard Henderson 726c46184a9SRichard Henderson if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) { 727c46184a9SRichard Henderson error_setg_errno(errp, errno, "mprotect for jit splitwx"); 728c46184a9SRichard Henderson munmap((void *)buf_rx, size); 729c46184a9SRichard Henderson munmap((void *)buf_rw, size); 7307be9ebcfSRichard Henderson return -1; 731c46184a9SRichard Henderson } 732c46184a9SRichard Henderson 733c46184a9SRichard Henderson tcg_splitwx_diff = buf_rx - buf_rw; 7347be9ebcfSRichard Henderson return PROT_READ | PROT_WRITE; 735c46184a9SRichard Henderson } 736c46184a9SRichard Henderson #endif /* CONFIG_DARWIN */ 737c46184a9SRichard Henderson #endif /* CONFIG_TCG_INTERPRETER */ 738c46184a9SRichard Henderson 7397be9ebcfSRichard Henderson static int alloc_code_gen_buffer_splitwx(size_t size, Error **errp) 740c46184a9SRichard Henderson { 741c46184a9SRichard Henderson #ifndef CONFIG_TCG_INTERPRETER 742c46184a9SRichard Henderson # ifdef CONFIG_DARWIN 743c46184a9SRichard Henderson return alloc_code_gen_buffer_splitwx_vmremap(size, errp); 744c46184a9SRichard Henderson # endif 745c46184a9SRichard Henderson # ifdef CONFIG_POSIX 746c46184a9SRichard Henderson return alloc_code_gen_buffer_splitwx_memfd(size, errp); 747c46184a9SRichard Henderson # endif 748c46184a9SRichard Henderson #endif 749c46184a9SRichard Henderson error_setg(errp, "jit split-wx not supported"); 7507be9ebcfSRichard Henderson return -1; 751c46184a9SRichard Henderson } 752c46184a9SRichard Henderson 7537be9ebcfSRichard Henderson static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) 754c46184a9SRichard Henderson { 755c46184a9SRichard Henderson ERRP_GUARD(); 756c46184a9SRichard Henderson int prot, flags; 757c46184a9SRichard Henderson 758c46184a9SRichard Henderson if (splitwx) { 7597be9ebcfSRichard Henderson prot = alloc_code_gen_buffer_splitwx(size, errp); 7607be9ebcfSRichard Henderson if (prot >= 0) { 7617be9ebcfSRichard Henderson return prot; 762c46184a9SRichard Henderson } 763c46184a9SRichard Henderson /* 764c46184a9SRichard Henderson * If splitwx force-on (1), fail; 765c46184a9SRichard Henderson * if splitwx default-on (-1), fall through to splitwx off. 766c46184a9SRichard Henderson */ 767c46184a9SRichard Henderson if (splitwx > 0) { 7687be9ebcfSRichard Henderson return -1; 769c46184a9SRichard Henderson } 770c46184a9SRichard Henderson error_free_or_abort(errp); 771c46184a9SRichard Henderson } 772c46184a9SRichard Henderson 773c46184a9SRichard Henderson prot = PROT_READ | PROT_WRITE | PROT_EXEC; 774c46184a9SRichard Henderson flags = MAP_PRIVATE | MAP_ANONYMOUS; 775c46184a9SRichard Henderson #ifdef CONFIG_TCG_INTERPRETER 776c46184a9SRichard Henderson /* The tcg interpreter does not need execute permission. */ 777c46184a9SRichard Henderson prot = PROT_READ | PROT_WRITE; 778c46184a9SRichard Henderson #elif defined(CONFIG_DARWIN) 779c46184a9SRichard Henderson /* Applicable to both iOS and macOS (Apple Silicon). */ 780c46184a9SRichard Henderson if (!splitwx) { 781c46184a9SRichard Henderson flags |= MAP_JIT; 782c46184a9SRichard Henderson } 783c46184a9SRichard Henderson #endif 784c46184a9SRichard Henderson 785c46184a9SRichard Henderson return alloc_code_gen_buffer_anon(size, prot, flags, errp); 786c46184a9SRichard Henderson } 787c46184a9SRichard Henderson #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ 788c46184a9SRichard Henderson 789c46184a9SRichard Henderson /* 7905ff7258cSRichard Henderson * Initializes region partitioning. 7915ff7258cSRichard Henderson * 7925ff7258cSRichard Henderson * Called at init time from the parent thread (i.e. the one calling 7935ff7258cSRichard Henderson * tcg_context_init), after the target's TCG globals have been set. 7945ff7258cSRichard Henderson * 7955ff7258cSRichard Henderson * Region partitioning works by splitting code_gen_buffer into separate regions, 7965ff7258cSRichard Henderson * and then assigning regions to TCG threads so that the threads can translate 7975ff7258cSRichard Henderson * code in parallel without synchronization. 7985ff7258cSRichard Henderson * 7995ff7258cSRichard Henderson * In softmmu the number of TCG threads is bounded by max_cpus, so we use at 8005ff7258cSRichard Henderson * least max_cpus regions in MTTCG. In !MTTCG we use a single region. 8015ff7258cSRichard Henderson * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...]) 8025ff7258cSRichard Henderson * must have been parsed before calling this function, since it calls 8035ff7258cSRichard Henderson * qemu_tcg_mttcg_enabled(). 8045ff7258cSRichard Henderson * 8055ff7258cSRichard Henderson * In user-mode we use a single region. Having multiple regions in user-mode 8065ff7258cSRichard Henderson * is not supported, because the number of vCPU threads (recall that each thread 8075ff7258cSRichard Henderson * spawned by the guest corresponds to a vCPU thread) is only bounded by the 8085ff7258cSRichard Henderson * OS, and usually this number is huge (tens of thousands is not uncommon). 8095ff7258cSRichard Henderson * Thus, given this large bound on the number of vCPU threads and the fact 8105ff7258cSRichard Henderson * that code_gen_buffer is allocated at compile-time, we cannot guarantee 8115ff7258cSRichard Henderson * that the availability of at least one region per vCPU thread. 8125ff7258cSRichard Henderson * 8135ff7258cSRichard Henderson * However, this user-mode limitation is unlikely to be a significant problem 8145ff7258cSRichard Henderson * in practice. Multi-threaded guests share most if not all of their translated 8155ff7258cSRichard Henderson * code, which makes parallel code generation less appealing than in softmmu. 8165ff7258cSRichard Henderson */ 81743b972b7SRichard Henderson void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) 8185ff7258cSRichard Henderson { 819ba22783dSRichard Henderson const size_t page_size = qemu_real_host_page_size; 8205ff7258cSRichard Henderson size_t region_size; 821*22c6a993SRichard Henderson int have_prot, need_prot; 8225ff7258cSRichard Henderson 823ba22783dSRichard Henderson /* Size the buffer. */ 824ba22783dSRichard Henderson if (tb_size == 0) { 825ba22783dSRichard Henderson size_t phys_mem = qemu_get_host_physmem(); 826ba22783dSRichard Henderson if (phys_mem == 0) { 827ba22783dSRichard Henderson tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 828ba22783dSRichard Henderson } else { 829ba22783dSRichard Henderson tb_size = QEMU_ALIGN_DOWN(phys_mem / 8, page_size); 830ba22783dSRichard Henderson tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, tb_size); 831ba22783dSRichard Henderson } 832ba22783dSRichard Henderson } 833ba22783dSRichard Henderson if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { 834ba22783dSRichard Henderson tb_size = MIN_CODE_GEN_BUFFER_SIZE; 835ba22783dSRichard Henderson } 836ba22783dSRichard Henderson if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { 837ba22783dSRichard Henderson tb_size = MAX_CODE_GEN_BUFFER_SIZE; 838ba22783dSRichard Henderson } 839ba22783dSRichard Henderson 840ba22783dSRichard Henderson have_prot = alloc_code_gen_buffer(tb_size, splitwx, &error_fatal); 8417be9ebcfSRichard Henderson assert(have_prot >= 0); 842c46184a9SRichard Henderson 843cd9ea992SRichard Henderson /* Request large pages for the buffer and the splitwx. */ 844cd9ea992SRichard Henderson qemu_madvise(region.start_aligned, region.total_size, QEMU_MADV_HUGEPAGE); 845cd9ea992SRichard Henderson if (tcg_splitwx_diff) { 846cd9ea992SRichard Henderson qemu_madvise(region.start_aligned + tcg_splitwx_diff, 847cd9ea992SRichard Henderson region.total_size, QEMU_MADV_HUGEPAGE); 848cd9ea992SRichard Henderson } 849cd9ea992SRichard Henderson 8505ff7258cSRichard Henderson /* 8515ff7258cSRichard Henderson * Make region_size a multiple of page_size, using aligned as the start. 8525ff7258cSRichard Henderson * As a result of this we might end up with a few extra pages at the end of 8535ff7258cSRichard Henderson * the buffer; we will assign those to the last region. 8545ff7258cSRichard Henderson */ 855ba22783dSRichard Henderson region.n = tcg_n_regions(tb_size, max_cpus); 856ba22783dSRichard Henderson region_size = tb_size / region.n; 8575ff7258cSRichard Henderson region_size = QEMU_ALIGN_DOWN(region_size, page_size); 8585ff7258cSRichard Henderson 8595ff7258cSRichard Henderson /* A region must have at least 2 pages; one code, one guard */ 8605ff7258cSRichard Henderson g_assert(region_size >= 2 * page_size); 861032a4b1bSRichard Henderson region.stride = region_size; 862032a4b1bSRichard Henderson 863032a4b1bSRichard Henderson /* Reserve space for guard pages. */ 864032a4b1bSRichard Henderson region.size = region_size - page_size; 865032a4b1bSRichard Henderson region.total_size -= page_size; 866032a4b1bSRichard Henderson 867032a4b1bSRichard Henderson /* 868032a4b1bSRichard Henderson * The first region will be smaller than the others, via the prologue, 869032a4b1bSRichard Henderson * which has yet to be allocated. For now, the first region begins at 870032a4b1bSRichard Henderson * the page boundary. 871032a4b1bSRichard Henderson */ 872032a4b1bSRichard Henderson region.after_prologue = region.start_aligned; 8735ff7258cSRichard Henderson 8745ff7258cSRichard Henderson /* init the region struct */ 8755ff7258cSRichard Henderson qemu_mutex_init(®ion.lock); 8765ff7258cSRichard Henderson 8775ff7258cSRichard Henderson /* 8785ff7258cSRichard Henderson * Set guard pages in the rw buffer, as that's the one into which 8795ff7258cSRichard Henderson * buffer overruns could occur. Do not set guard pages in the rx 8805ff7258cSRichard Henderson * buffer -- let that one use hugepages throughout. 881*22c6a993SRichard Henderson * Work with the page protections set up with the initial mapping. 8825ff7258cSRichard Henderson */ 883*22c6a993SRichard Henderson need_prot = PAGE_READ | PAGE_WRITE; 884*22c6a993SRichard Henderson #ifndef CONFIG_TCG_INTERPRETER 885*22c6a993SRichard Henderson if (tcg_splitwx_diff == 0) { 886*22c6a993SRichard Henderson need_prot |= PAGE_EXEC; 887*22c6a993SRichard Henderson } 888*22c6a993SRichard Henderson #endif 889*22c6a993SRichard Henderson for (size_t i = 0, n = region.n; i < n; i++) { 8905ff7258cSRichard Henderson void *start, *end; 8915ff7258cSRichard Henderson 8925ff7258cSRichard Henderson tcg_region_bounds(i, &start, &end); 893*22c6a993SRichard Henderson if (have_prot != need_prot) { 894*22c6a993SRichard Henderson int rc; 8955ff7258cSRichard Henderson 896*22c6a993SRichard Henderson if (need_prot == (PAGE_READ | PAGE_WRITE | PAGE_EXEC)) { 897*22c6a993SRichard Henderson rc = qemu_mprotect_rwx(start, end - start); 898*22c6a993SRichard Henderson } else if (need_prot == (PAGE_READ | PAGE_WRITE)) { 899*22c6a993SRichard Henderson rc = qemu_mprotect_rw(start, end - start); 900*22c6a993SRichard Henderson } else { 901*22c6a993SRichard Henderson g_assert_not_reached(); 902*22c6a993SRichard Henderson } 903*22c6a993SRichard Henderson if (rc) { 904*22c6a993SRichard Henderson error_setg_errno(&error_fatal, errno, 905*22c6a993SRichard Henderson "mprotect of jit buffer"); 906*22c6a993SRichard Henderson } 907*22c6a993SRichard Henderson } 908*22c6a993SRichard Henderson if (have_prot != 0) { 9095ff7258cSRichard Henderson /* 9105ff7258cSRichard Henderson * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect 9115ff7258cSRichard Henderson * rejects a permission change from RWX -> NONE. Guard pages are 9125ff7258cSRichard Henderson * nice for bug detection but are not essential; ignore any failure. 9135ff7258cSRichard Henderson */ 9145ff7258cSRichard Henderson (void)qemu_mprotect_none(end, page_size); 9155ff7258cSRichard Henderson } 916*22c6a993SRichard Henderson } 9175ff7258cSRichard Henderson 9185ff7258cSRichard Henderson tcg_region_trees_init(); 9195ff7258cSRichard Henderson 9205ff7258cSRichard Henderson /* 9215ff7258cSRichard Henderson * Leave the initial context initialized to the first region. 9225ff7258cSRichard Henderson * This will be the context into which we generate the prologue. 9235ff7258cSRichard Henderson * It is also the only context for CONFIG_USER_ONLY. 9245ff7258cSRichard Henderson */ 9255ff7258cSRichard Henderson tcg_region_initial_alloc__locked(&tcg_init_ctx); 9265ff7258cSRichard Henderson } 9275ff7258cSRichard Henderson 9285ff7258cSRichard Henderson void tcg_region_prologue_set(TCGContext *s) 9295ff7258cSRichard Henderson { 9305ff7258cSRichard Henderson /* Deduct the prologue from the first region. */ 931c2471ca0SRichard Henderson g_assert(region.start_aligned == s->code_gen_buffer); 932c2471ca0SRichard Henderson region.after_prologue = s->code_ptr; 9335ff7258cSRichard Henderson 9345ff7258cSRichard Henderson /* Recompute boundaries of the first region. */ 9355ff7258cSRichard Henderson tcg_region_assign(s, 0); 9365ff7258cSRichard Henderson 9375ff7258cSRichard Henderson /* Register the balance of the buffer with gdb. */ 938c2471ca0SRichard Henderson tcg_register_jit(tcg_splitwx_to_rx(region.after_prologue), 939c2471ca0SRichard Henderson region.start_aligned + region.total_size - 940c2471ca0SRichard Henderson region.after_prologue); 9415ff7258cSRichard Henderson } 9425ff7258cSRichard Henderson 9435ff7258cSRichard Henderson /* 9445ff7258cSRichard Henderson * Returns the size (in bytes) of all translated code (i.e. from all regions) 9455ff7258cSRichard Henderson * currently in the cache. 9465ff7258cSRichard Henderson * See also: tcg_code_capacity() 9475ff7258cSRichard Henderson * Do not confuse with tcg_current_code_size(); that one applies to a single 9485ff7258cSRichard Henderson * TCG context. 9495ff7258cSRichard Henderson */ 9505ff7258cSRichard Henderson size_t tcg_code_size(void) 9515ff7258cSRichard Henderson { 9520e2d61cfSRichard Henderson unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); 9535ff7258cSRichard Henderson unsigned int i; 9545ff7258cSRichard Henderson size_t total; 9555ff7258cSRichard Henderson 9565ff7258cSRichard Henderson qemu_mutex_lock(®ion.lock); 9575ff7258cSRichard Henderson total = region.agg_size_full; 9585ff7258cSRichard Henderson for (i = 0; i < n_ctxs; i++) { 9595ff7258cSRichard Henderson const TCGContext *s = qatomic_read(&tcg_ctxs[i]); 9605ff7258cSRichard Henderson size_t size; 9615ff7258cSRichard Henderson 9625ff7258cSRichard Henderson size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer; 9635ff7258cSRichard Henderson g_assert(size <= s->code_gen_buffer_size); 9645ff7258cSRichard Henderson total += size; 9655ff7258cSRichard Henderson } 9665ff7258cSRichard Henderson qemu_mutex_unlock(®ion.lock); 9675ff7258cSRichard Henderson return total; 9685ff7258cSRichard Henderson } 9695ff7258cSRichard Henderson 9705ff7258cSRichard Henderson /* 9715ff7258cSRichard Henderson * Returns the code capacity (in bytes) of the entire cache, i.e. including all 9725ff7258cSRichard Henderson * regions. 9735ff7258cSRichard Henderson * See also: tcg_code_size() 9745ff7258cSRichard Henderson */ 9755ff7258cSRichard Henderson size_t tcg_code_capacity(void) 9765ff7258cSRichard Henderson { 9775ff7258cSRichard Henderson size_t guard_size, capacity; 9785ff7258cSRichard Henderson 9795ff7258cSRichard Henderson /* no need for synchronization; these variables are set at init time */ 9805ff7258cSRichard Henderson guard_size = region.stride - region.size; 98177bd7fd1SRichard Henderson capacity = region.total_size; 98277bd7fd1SRichard Henderson capacity -= (region.n - 1) * guard_size; 98377bd7fd1SRichard Henderson capacity -= region.n * TCG_HIGHWATER; 98477bd7fd1SRichard Henderson 9855ff7258cSRichard Henderson return capacity; 9865ff7258cSRichard Henderson } 9875ff7258cSRichard Henderson 9885ff7258cSRichard Henderson size_t tcg_tb_phys_invalidate_count(void) 9895ff7258cSRichard Henderson { 9900e2d61cfSRichard Henderson unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); 9915ff7258cSRichard Henderson unsigned int i; 9925ff7258cSRichard Henderson size_t total = 0; 9935ff7258cSRichard Henderson 9945ff7258cSRichard Henderson for (i = 0; i < n_ctxs; i++) { 9955ff7258cSRichard Henderson const TCGContext *s = qatomic_read(&tcg_ctxs[i]); 9965ff7258cSRichard Henderson 9975ff7258cSRichard Henderson total += qatomic_read(&s->tb_phys_invalidate_count); 9985ff7258cSRichard Henderson } 9995ff7258cSRichard Henderson return total; 10005ff7258cSRichard Henderson } 1001