15ff7258cSRichard Henderson /* 25ff7258cSRichard Henderson * Memory region management for Tiny Code Generator for QEMU 35ff7258cSRichard Henderson * 45ff7258cSRichard Henderson * Copyright (c) 2008 Fabrice Bellard 55ff7258cSRichard Henderson * 65ff7258cSRichard Henderson * Permission is hereby granted, free of charge, to any person obtaining a copy 75ff7258cSRichard Henderson * of this software and associated documentation files (the "Software"), to deal 85ff7258cSRichard Henderson * in the Software without restriction, including without limitation the rights 95ff7258cSRichard Henderson * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 105ff7258cSRichard Henderson * copies of the Software, and to permit persons to whom the Software is 115ff7258cSRichard Henderson * furnished to do so, subject to the following conditions: 125ff7258cSRichard Henderson * 135ff7258cSRichard Henderson * The above copyright notice and this permission notice shall be included in 145ff7258cSRichard Henderson * all copies or substantial portions of the Software. 155ff7258cSRichard Henderson * 165ff7258cSRichard Henderson * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 175ff7258cSRichard Henderson * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 185ff7258cSRichard Henderson * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 195ff7258cSRichard Henderson * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 205ff7258cSRichard Henderson * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 215ff7258cSRichard Henderson * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 225ff7258cSRichard Henderson * THE SOFTWARE. 235ff7258cSRichard Henderson */ 245ff7258cSRichard Henderson 255ff7258cSRichard Henderson #include "qemu/osdep.h" 26*c46184a9SRichard Henderson #include "qemu/units.h" 27*c46184a9SRichard Henderson #include "qapi/error.h" 285ff7258cSRichard Henderson #include "exec/exec-all.h" 295ff7258cSRichard Henderson #include "tcg/tcg.h" 305ff7258cSRichard Henderson #if !defined(CONFIG_USER_ONLY) 315ff7258cSRichard Henderson #include "hw/boards.h" 325ff7258cSRichard Henderson #endif 335ff7258cSRichard Henderson #include "tcg-internal.h" 345ff7258cSRichard Henderson 355ff7258cSRichard Henderson 365ff7258cSRichard Henderson struct tcg_region_tree { 375ff7258cSRichard Henderson QemuMutex lock; 385ff7258cSRichard Henderson GTree *tree; 395ff7258cSRichard Henderson /* padding to avoid false sharing is computed at run-time */ 405ff7258cSRichard Henderson }; 415ff7258cSRichard Henderson 425ff7258cSRichard Henderson /* 435ff7258cSRichard Henderson * We divide code_gen_buffer into equally-sized "regions" that TCG threads 445ff7258cSRichard Henderson * dynamically allocate from as demand dictates. Given appropriate region 455ff7258cSRichard Henderson * sizing, this minimizes flushes even when some TCG threads generate a lot 465ff7258cSRichard Henderson * more code than others. 475ff7258cSRichard Henderson */ 485ff7258cSRichard Henderson struct tcg_region_state { 495ff7258cSRichard Henderson QemuMutex lock; 505ff7258cSRichard Henderson 515ff7258cSRichard Henderson /* fields set at init time */ 525ff7258cSRichard Henderson void *start; 535ff7258cSRichard Henderson void *start_aligned; 545ff7258cSRichard Henderson void *end; 555ff7258cSRichard Henderson size_t n; 565ff7258cSRichard Henderson size_t size; /* size of one region */ 575ff7258cSRichard Henderson size_t stride; /* .size + guard size */ 585ff7258cSRichard Henderson 595ff7258cSRichard Henderson /* fields protected by the lock */ 605ff7258cSRichard Henderson size_t current; /* current region index */ 615ff7258cSRichard Henderson size_t agg_size_full; /* aggregate size of full regions */ 625ff7258cSRichard Henderson }; 635ff7258cSRichard Henderson 645ff7258cSRichard Henderson static struct tcg_region_state region; 655ff7258cSRichard Henderson 665ff7258cSRichard Henderson /* 675ff7258cSRichard Henderson * This is an array of struct tcg_region_tree's, with padding. 685ff7258cSRichard Henderson * We use void * to simplify the computation of region_trees[i]; each 695ff7258cSRichard Henderson * struct is found every tree_size bytes. 705ff7258cSRichard Henderson */ 715ff7258cSRichard Henderson static void *region_trees; 725ff7258cSRichard Henderson static size_t tree_size; 735ff7258cSRichard Henderson 745ff7258cSRichard Henderson /* compare a pointer @ptr and a tb_tc @s */ 755ff7258cSRichard Henderson static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s) 765ff7258cSRichard Henderson { 775ff7258cSRichard Henderson if (ptr >= s->ptr + s->size) { 785ff7258cSRichard Henderson return 1; 795ff7258cSRichard Henderson } else if (ptr < s->ptr) { 805ff7258cSRichard Henderson return -1; 815ff7258cSRichard Henderson } 825ff7258cSRichard Henderson return 0; 835ff7258cSRichard Henderson } 845ff7258cSRichard Henderson 855ff7258cSRichard Henderson static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp) 865ff7258cSRichard Henderson { 875ff7258cSRichard Henderson const struct tb_tc *a = ap; 885ff7258cSRichard Henderson const struct tb_tc *b = bp; 895ff7258cSRichard Henderson 905ff7258cSRichard Henderson /* 915ff7258cSRichard Henderson * When both sizes are set, we know this isn't a lookup. 925ff7258cSRichard Henderson * This is the most likely case: every TB must be inserted; lookups 935ff7258cSRichard Henderson * are a lot less frequent. 945ff7258cSRichard Henderson */ 955ff7258cSRichard Henderson if (likely(a->size && b->size)) { 965ff7258cSRichard Henderson if (a->ptr > b->ptr) { 975ff7258cSRichard Henderson return 1; 985ff7258cSRichard Henderson } else if (a->ptr < b->ptr) { 995ff7258cSRichard Henderson return -1; 1005ff7258cSRichard Henderson } 1015ff7258cSRichard Henderson /* a->ptr == b->ptr should happen only on deletions */ 1025ff7258cSRichard Henderson g_assert(a->size == b->size); 1035ff7258cSRichard Henderson return 0; 1045ff7258cSRichard Henderson } 1055ff7258cSRichard Henderson /* 1065ff7258cSRichard Henderson * All lookups have either .size field set to 0. 1075ff7258cSRichard Henderson * From the glib sources we see that @ap is always the lookup key. However 1085ff7258cSRichard Henderson * the docs provide no guarantee, so we just mark this case as likely. 1095ff7258cSRichard Henderson */ 1105ff7258cSRichard Henderson if (likely(a->size == 0)) { 1115ff7258cSRichard Henderson return ptr_cmp_tb_tc(a->ptr, b); 1125ff7258cSRichard Henderson } 1135ff7258cSRichard Henderson return ptr_cmp_tb_tc(b->ptr, a); 1145ff7258cSRichard Henderson } 1155ff7258cSRichard Henderson 1165ff7258cSRichard Henderson static void tcg_region_trees_init(void) 1175ff7258cSRichard Henderson { 1185ff7258cSRichard Henderson size_t i; 1195ff7258cSRichard Henderson 1205ff7258cSRichard Henderson tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize); 1215ff7258cSRichard Henderson region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size); 1225ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 1235ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 1245ff7258cSRichard Henderson 1255ff7258cSRichard Henderson qemu_mutex_init(&rt->lock); 1265ff7258cSRichard Henderson rt->tree = g_tree_new(tb_tc_cmp); 1275ff7258cSRichard Henderson } 1285ff7258cSRichard Henderson } 1295ff7258cSRichard Henderson 1305ff7258cSRichard Henderson static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p) 1315ff7258cSRichard Henderson { 1325ff7258cSRichard Henderson size_t region_idx; 1335ff7258cSRichard Henderson 1345ff7258cSRichard Henderson /* 1355ff7258cSRichard Henderson * Like tcg_splitwx_to_rw, with no assert. The pc may come from 1365ff7258cSRichard Henderson * a signal handler over which the caller has no control. 1375ff7258cSRichard Henderson */ 1385ff7258cSRichard Henderson if (!in_code_gen_buffer(p)) { 1395ff7258cSRichard Henderson p -= tcg_splitwx_diff; 1405ff7258cSRichard Henderson if (!in_code_gen_buffer(p)) { 1415ff7258cSRichard Henderson return NULL; 1425ff7258cSRichard Henderson } 1435ff7258cSRichard Henderson } 1445ff7258cSRichard Henderson 1455ff7258cSRichard Henderson if (p < region.start_aligned) { 1465ff7258cSRichard Henderson region_idx = 0; 1475ff7258cSRichard Henderson } else { 1485ff7258cSRichard Henderson ptrdiff_t offset = p - region.start_aligned; 1495ff7258cSRichard Henderson 1505ff7258cSRichard Henderson if (offset > region.stride * (region.n - 1)) { 1515ff7258cSRichard Henderson region_idx = region.n - 1; 1525ff7258cSRichard Henderson } else { 1535ff7258cSRichard Henderson region_idx = offset / region.stride; 1545ff7258cSRichard Henderson } 1555ff7258cSRichard Henderson } 1565ff7258cSRichard Henderson return region_trees + region_idx * tree_size; 1575ff7258cSRichard Henderson } 1585ff7258cSRichard Henderson 1595ff7258cSRichard Henderson void tcg_tb_insert(TranslationBlock *tb) 1605ff7258cSRichard Henderson { 1615ff7258cSRichard Henderson struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); 1625ff7258cSRichard Henderson 1635ff7258cSRichard Henderson g_assert(rt != NULL); 1645ff7258cSRichard Henderson qemu_mutex_lock(&rt->lock); 1655ff7258cSRichard Henderson g_tree_insert(rt->tree, &tb->tc, tb); 1665ff7258cSRichard Henderson qemu_mutex_unlock(&rt->lock); 1675ff7258cSRichard Henderson } 1685ff7258cSRichard Henderson 1695ff7258cSRichard Henderson void tcg_tb_remove(TranslationBlock *tb) 1705ff7258cSRichard Henderson { 1715ff7258cSRichard Henderson struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); 1725ff7258cSRichard Henderson 1735ff7258cSRichard Henderson g_assert(rt != NULL); 1745ff7258cSRichard Henderson qemu_mutex_lock(&rt->lock); 1755ff7258cSRichard Henderson g_tree_remove(rt->tree, &tb->tc); 1765ff7258cSRichard Henderson qemu_mutex_unlock(&rt->lock); 1775ff7258cSRichard Henderson } 1785ff7258cSRichard Henderson 1795ff7258cSRichard Henderson /* 1805ff7258cSRichard Henderson * Find the TB 'tb' such that 1815ff7258cSRichard Henderson * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size 1825ff7258cSRichard Henderson * Return NULL if not found. 1835ff7258cSRichard Henderson */ 1845ff7258cSRichard Henderson TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr) 1855ff7258cSRichard Henderson { 1865ff7258cSRichard Henderson struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr); 1875ff7258cSRichard Henderson TranslationBlock *tb; 1885ff7258cSRichard Henderson struct tb_tc s = { .ptr = (void *)tc_ptr }; 1895ff7258cSRichard Henderson 1905ff7258cSRichard Henderson if (rt == NULL) { 1915ff7258cSRichard Henderson return NULL; 1925ff7258cSRichard Henderson } 1935ff7258cSRichard Henderson 1945ff7258cSRichard Henderson qemu_mutex_lock(&rt->lock); 1955ff7258cSRichard Henderson tb = g_tree_lookup(rt->tree, &s); 1965ff7258cSRichard Henderson qemu_mutex_unlock(&rt->lock); 1975ff7258cSRichard Henderson return tb; 1985ff7258cSRichard Henderson } 1995ff7258cSRichard Henderson 2005ff7258cSRichard Henderson static void tcg_region_tree_lock_all(void) 2015ff7258cSRichard Henderson { 2025ff7258cSRichard Henderson size_t i; 2035ff7258cSRichard Henderson 2045ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 2055ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 2065ff7258cSRichard Henderson 2075ff7258cSRichard Henderson qemu_mutex_lock(&rt->lock); 2085ff7258cSRichard Henderson } 2095ff7258cSRichard Henderson } 2105ff7258cSRichard Henderson 2115ff7258cSRichard Henderson static void tcg_region_tree_unlock_all(void) 2125ff7258cSRichard Henderson { 2135ff7258cSRichard Henderson size_t i; 2145ff7258cSRichard Henderson 2155ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 2165ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 2175ff7258cSRichard Henderson 2185ff7258cSRichard Henderson qemu_mutex_unlock(&rt->lock); 2195ff7258cSRichard Henderson } 2205ff7258cSRichard Henderson } 2215ff7258cSRichard Henderson 2225ff7258cSRichard Henderson void tcg_tb_foreach(GTraverseFunc func, gpointer user_data) 2235ff7258cSRichard Henderson { 2245ff7258cSRichard Henderson size_t i; 2255ff7258cSRichard Henderson 2265ff7258cSRichard Henderson tcg_region_tree_lock_all(); 2275ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 2285ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 2295ff7258cSRichard Henderson 2305ff7258cSRichard Henderson g_tree_foreach(rt->tree, func, user_data); 2315ff7258cSRichard Henderson } 2325ff7258cSRichard Henderson tcg_region_tree_unlock_all(); 2335ff7258cSRichard Henderson } 2345ff7258cSRichard Henderson 2355ff7258cSRichard Henderson size_t tcg_nb_tbs(void) 2365ff7258cSRichard Henderson { 2375ff7258cSRichard Henderson size_t nb_tbs = 0; 2385ff7258cSRichard Henderson size_t i; 2395ff7258cSRichard Henderson 2405ff7258cSRichard Henderson tcg_region_tree_lock_all(); 2415ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 2425ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 2435ff7258cSRichard Henderson 2445ff7258cSRichard Henderson nb_tbs += g_tree_nnodes(rt->tree); 2455ff7258cSRichard Henderson } 2465ff7258cSRichard Henderson tcg_region_tree_unlock_all(); 2475ff7258cSRichard Henderson return nb_tbs; 2485ff7258cSRichard Henderson } 2495ff7258cSRichard Henderson 2505ff7258cSRichard Henderson static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data) 2515ff7258cSRichard Henderson { 2525ff7258cSRichard Henderson TranslationBlock *tb = v; 2535ff7258cSRichard Henderson 2545ff7258cSRichard Henderson tb_destroy(tb); 2555ff7258cSRichard Henderson return FALSE; 2565ff7258cSRichard Henderson } 2575ff7258cSRichard Henderson 2585ff7258cSRichard Henderson static void tcg_region_tree_reset_all(void) 2595ff7258cSRichard Henderson { 2605ff7258cSRichard Henderson size_t i; 2615ff7258cSRichard Henderson 2625ff7258cSRichard Henderson tcg_region_tree_lock_all(); 2635ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 2645ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 2655ff7258cSRichard Henderson 2665ff7258cSRichard Henderson g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL); 2675ff7258cSRichard Henderson /* Increment the refcount first so that destroy acts as a reset */ 2685ff7258cSRichard Henderson g_tree_ref(rt->tree); 2695ff7258cSRichard Henderson g_tree_destroy(rt->tree); 2705ff7258cSRichard Henderson } 2715ff7258cSRichard Henderson tcg_region_tree_unlock_all(); 2725ff7258cSRichard Henderson } 2735ff7258cSRichard Henderson 2745ff7258cSRichard Henderson static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend) 2755ff7258cSRichard Henderson { 2765ff7258cSRichard Henderson void *start, *end; 2775ff7258cSRichard Henderson 2785ff7258cSRichard Henderson start = region.start_aligned + curr_region * region.stride; 2795ff7258cSRichard Henderson end = start + region.size; 2805ff7258cSRichard Henderson 2815ff7258cSRichard Henderson if (curr_region == 0) { 2825ff7258cSRichard Henderson start = region.start; 2835ff7258cSRichard Henderson } 2845ff7258cSRichard Henderson if (curr_region == region.n - 1) { 2855ff7258cSRichard Henderson end = region.end; 2865ff7258cSRichard Henderson } 2875ff7258cSRichard Henderson 2885ff7258cSRichard Henderson *pstart = start; 2895ff7258cSRichard Henderson *pend = end; 2905ff7258cSRichard Henderson } 2915ff7258cSRichard Henderson 2925ff7258cSRichard Henderson static void tcg_region_assign(TCGContext *s, size_t curr_region) 2935ff7258cSRichard Henderson { 2945ff7258cSRichard Henderson void *start, *end; 2955ff7258cSRichard Henderson 2965ff7258cSRichard Henderson tcg_region_bounds(curr_region, &start, &end); 2975ff7258cSRichard Henderson 2985ff7258cSRichard Henderson s->code_gen_buffer = start; 2995ff7258cSRichard Henderson s->code_gen_ptr = start; 3005ff7258cSRichard Henderson s->code_gen_buffer_size = end - start; 3015ff7258cSRichard Henderson s->code_gen_highwater = end - TCG_HIGHWATER; 3025ff7258cSRichard Henderson } 3035ff7258cSRichard Henderson 3045ff7258cSRichard Henderson static bool tcg_region_alloc__locked(TCGContext *s) 3055ff7258cSRichard Henderson { 3065ff7258cSRichard Henderson if (region.current == region.n) { 3075ff7258cSRichard Henderson return true; 3085ff7258cSRichard Henderson } 3095ff7258cSRichard Henderson tcg_region_assign(s, region.current); 3105ff7258cSRichard Henderson region.current++; 3115ff7258cSRichard Henderson return false; 3125ff7258cSRichard Henderson } 3135ff7258cSRichard Henderson 3145ff7258cSRichard Henderson /* 3155ff7258cSRichard Henderson * Request a new region once the one in use has filled up. 3165ff7258cSRichard Henderson * Returns true on error. 3175ff7258cSRichard Henderson */ 3185ff7258cSRichard Henderson bool tcg_region_alloc(TCGContext *s) 3195ff7258cSRichard Henderson { 3205ff7258cSRichard Henderson bool err; 3215ff7258cSRichard Henderson /* read the region size now; alloc__locked will overwrite it on success */ 3225ff7258cSRichard Henderson size_t size_full = s->code_gen_buffer_size; 3235ff7258cSRichard Henderson 3245ff7258cSRichard Henderson qemu_mutex_lock(®ion.lock); 3255ff7258cSRichard Henderson err = tcg_region_alloc__locked(s); 3265ff7258cSRichard Henderson if (!err) { 3275ff7258cSRichard Henderson region.agg_size_full += size_full - TCG_HIGHWATER; 3285ff7258cSRichard Henderson } 3295ff7258cSRichard Henderson qemu_mutex_unlock(®ion.lock); 3305ff7258cSRichard Henderson return err; 3315ff7258cSRichard Henderson } 3325ff7258cSRichard Henderson 3335ff7258cSRichard Henderson /* 3345ff7258cSRichard Henderson * Perform a context's first region allocation. 3355ff7258cSRichard Henderson * This function does _not_ increment region.agg_size_full. 3365ff7258cSRichard Henderson */ 3375ff7258cSRichard Henderson static void tcg_region_initial_alloc__locked(TCGContext *s) 3385ff7258cSRichard Henderson { 3395ff7258cSRichard Henderson bool err = tcg_region_alloc__locked(s); 3405ff7258cSRichard Henderson g_assert(!err); 3415ff7258cSRichard Henderson } 3425ff7258cSRichard Henderson 3435ff7258cSRichard Henderson void tcg_region_initial_alloc(TCGContext *s) 3445ff7258cSRichard Henderson { 3455ff7258cSRichard Henderson qemu_mutex_lock(®ion.lock); 3465ff7258cSRichard Henderson tcg_region_initial_alloc__locked(s); 3475ff7258cSRichard Henderson qemu_mutex_unlock(®ion.lock); 3485ff7258cSRichard Henderson } 3495ff7258cSRichard Henderson 3505ff7258cSRichard Henderson /* Call from a safe-work context */ 3515ff7258cSRichard Henderson void tcg_region_reset_all(void) 3525ff7258cSRichard Henderson { 3535ff7258cSRichard Henderson unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs); 3545ff7258cSRichard Henderson unsigned int i; 3555ff7258cSRichard Henderson 3565ff7258cSRichard Henderson qemu_mutex_lock(®ion.lock); 3575ff7258cSRichard Henderson region.current = 0; 3585ff7258cSRichard Henderson region.agg_size_full = 0; 3595ff7258cSRichard Henderson 3605ff7258cSRichard Henderson for (i = 0; i < n_ctxs; i++) { 3615ff7258cSRichard Henderson TCGContext *s = qatomic_read(&tcg_ctxs[i]); 3625ff7258cSRichard Henderson tcg_region_initial_alloc__locked(s); 3635ff7258cSRichard Henderson } 3645ff7258cSRichard Henderson qemu_mutex_unlock(®ion.lock); 3655ff7258cSRichard Henderson 3665ff7258cSRichard Henderson tcg_region_tree_reset_all(); 3675ff7258cSRichard Henderson } 3685ff7258cSRichard Henderson 3695ff7258cSRichard Henderson #ifdef CONFIG_USER_ONLY 3705ff7258cSRichard Henderson static size_t tcg_n_regions(void) 3715ff7258cSRichard Henderson { 3725ff7258cSRichard Henderson return 1; 3735ff7258cSRichard Henderson } 3745ff7258cSRichard Henderson #else 3755ff7258cSRichard Henderson /* 3765ff7258cSRichard Henderson * It is likely that some vCPUs will translate more code than others, so we 3775ff7258cSRichard Henderson * first try to set more regions than max_cpus, with those regions being of 3785ff7258cSRichard Henderson * reasonable size. If that's not possible we make do by evenly dividing 3795ff7258cSRichard Henderson * the code_gen_buffer among the vCPUs. 3805ff7258cSRichard Henderson */ 3815ff7258cSRichard Henderson static size_t tcg_n_regions(void) 3825ff7258cSRichard Henderson { 3835ff7258cSRichard Henderson size_t i; 3845ff7258cSRichard Henderson 3855ff7258cSRichard Henderson /* Use a single region if all we have is one vCPU thread */ 3865ff7258cSRichard Henderson #if !defined(CONFIG_USER_ONLY) 3875ff7258cSRichard Henderson MachineState *ms = MACHINE(qdev_get_machine()); 3885ff7258cSRichard Henderson unsigned int max_cpus = ms->smp.max_cpus; 3895ff7258cSRichard Henderson #endif 3905ff7258cSRichard Henderson if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) { 3915ff7258cSRichard Henderson return 1; 3925ff7258cSRichard Henderson } 3935ff7258cSRichard Henderson 3945ff7258cSRichard Henderson /* Try to have more regions than max_cpus, with each region being >= 2 MB */ 3955ff7258cSRichard Henderson for (i = 8; i > 0; i--) { 3965ff7258cSRichard Henderson size_t regions_per_thread = i; 3975ff7258cSRichard Henderson size_t region_size; 3985ff7258cSRichard Henderson 3995ff7258cSRichard Henderson region_size = tcg_init_ctx.code_gen_buffer_size; 4005ff7258cSRichard Henderson region_size /= max_cpus * regions_per_thread; 4015ff7258cSRichard Henderson 4025ff7258cSRichard Henderson if (region_size >= 2 * 1024u * 1024) { 4035ff7258cSRichard Henderson return max_cpus * regions_per_thread; 4045ff7258cSRichard Henderson } 4055ff7258cSRichard Henderson } 4065ff7258cSRichard Henderson /* If we can't, then just allocate one region per vCPU thread */ 4075ff7258cSRichard Henderson return max_cpus; 4085ff7258cSRichard Henderson } 4095ff7258cSRichard Henderson #endif 4105ff7258cSRichard Henderson 4115ff7258cSRichard Henderson /* 412*c46184a9SRichard Henderson * Minimum size of the code gen buffer. This number is randomly chosen, 413*c46184a9SRichard Henderson * but not so small that we can't have a fair number of TB's live. 414*c46184a9SRichard Henderson */ 415*c46184a9SRichard Henderson #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB) 416*c46184a9SRichard Henderson 417*c46184a9SRichard Henderson /* 418*c46184a9SRichard Henderson * Maximum size of the code gen buffer we'd like to use. Unless otherwise 419*c46184a9SRichard Henderson * indicated, this is constrained by the range of direct branches on the 420*c46184a9SRichard Henderson * host cpu, as used by the TCG implementation of goto_tb. 421*c46184a9SRichard Henderson */ 422*c46184a9SRichard Henderson #if defined(__x86_64__) 423*c46184a9SRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) 424*c46184a9SRichard Henderson #elif defined(__sparc__) 425*c46184a9SRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) 426*c46184a9SRichard Henderson #elif defined(__powerpc64__) 427*c46184a9SRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) 428*c46184a9SRichard Henderson #elif defined(__powerpc__) 429*c46184a9SRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB) 430*c46184a9SRichard Henderson #elif defined(__aarch64__) 431*c46184a9SRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) 432*c46184a9SRichard Henderson #elif defined(__s390x__) 433*c46184a9SRichard Henderson /* We have a +- 4GB range on the branches; leave some slop. */ 434*c46184a9SRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB) 435*c46184a9SRichard Henderson #elif defined(__mips__) 436*c46184a9SRichard Henderson /* 437*c46184a9SRichard Henderson * We have a 256MB branch region, but leave room to make sure the 438*c46184a9SRichard Henderson * main executable is also within that region. 439*c46184a9SRichard Henderson */ 440*c46184a9SRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB) 441*c46184a9SRichard Henderson #else 442*c46184a9SRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) 443*c46184a9SRichard Henderson #endif 444*c46184a9SRichard Henderson 445*c46184a9SRichard Henderson #if TCG_TARGET_REG_BITS == 32 446*c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB) 447*c46184a9SRichard Henderson #ifdef CONFIG_USER_ONLY 448*c46184a9SRichard Henderson /* 449*c46184a9SRichard Henderson * For user mode on smaller 32 bit systems we may run into trouble 450*c46184a9SRichard Henderson * allocating big chunks of data in the right place. On these systems 451*c46184a9SRichard Henderson * we utilise a static code generation buffer directly in the binary. 452*c46184a9SRichard Henderson */ 453*c46184a9SRichard Henderson #define USE_STATIC_CODE_GEN_BUFFER 454*c46184a9SRichard Henderson #endif 455*c46184a9SRichard Henderson #else /* TCG_TARGET_REG_BITS == 64 */ 456*c46184a9SRichard Henderson #ifdef CONFIG_USER_ONLY 457*c46184a9SRichard Henderson /* 458*c46184a9SRichard Henderson * As user-mode emulation typically means running multiple instances 459*c46184a9SRichard Henderson * of the translator don't go too nuts with our default code gen 460*c46184a9SRichard Henderson * buffer lest we make things too hard for the OS. 461*c46184a9SRichard Henderson */ 462*c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB) 463*c46184a9SRichard Henderson #else 464*c46184a9SRichard Henderson /* 465*c46184a9SRichard Henderson * We expect most system emulation to run one or two guests per host. 466*c46184a9SRichard Henderson * Users running large scale system emulation may want to tweak their 467*c46184a9SRichard Henderson * runtime setup via the tb-size control on the command line. 468*c46184a9SRichard Henderson */ 469*c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB) 470*c46184a9SRichard Henderson #endif 471*c46184a9SRichard Henderson #endif 472*c46184a9SRichard Henderson 473*c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE \ 474*c46184a9SRichard Henderson (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ 475*c46184a9SRichard Henderson ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) 476*c46184a9SRichard Henderson 477*c46184a9SRichard Henderson static size_t size_code_gen_buffer(size_t tb_size) 478*c46184a9SRichard Henderson { 479*c46184a9SRichard Henderson /* Size the buffer. */ 480*c46184a9SRichard Henderson if (tb_size == 0) { 481*c46184a9SRichard Henderson size_t phys_mem = qemu_get_host_physmem(); 482*c46184a9SRichard Henderson if (phys_mem == 0) { 483*c46184a9SRichard Henderson tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 484*c46184a9SRichard Henderson } else { 485*c46184a9SRichard Henderson tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8); 486*c46184a9SRichard Henderson } 487*c46184a9SRichard Henderson } 488*c46184a9SRichard Henderson if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { 489*c46184a9SRichard Henderson tb_size = MIN_CODE_GEN_BUFFER_SIZE; 490*c46184a9SRichard Henderson } 491*c46184a9SRichard Henderson if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { 492*c46184a9SRichard Henderson tb_size = MAX_CODE_GEN_BUFFER_SIZE; 493*c46184a9SRichard Henderson } 494*c46184a9SRichard Henderson return tb_size; 495*c46184a9SRichard Henderson } 496*c46184a9SRichard Henderson 497*c46184a9SRichard Henderson #ifdef __mips__ 498*c46184a9SRichard Henderson /* 499*c46184a9SRichard Henderson * In order to use J and JAL within the code_gen_buffer, we require 500*c46184a9SRichard Henderson * that the buffer not cross a 256MB boundary. 501*c46184a9SRichard Henderson */ 502*c46184a9SRichard Henderson static inline bool cross_256mb(void *addr, size_t size) 503*c46184a9SRichard Henderson { 504*c46184a9SRichard Henderson return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; 505*c46184a9SRichard Henderson } 506*c46184a9SRichard Henderson 507*c46184a9SRichard Henderson /* 508*c46184a9SRichard Henderson * We weren't able to allocate a buffer without crossing that boundary, 509*c46184a9SRichard Henderson * so make do with the larger portion of the buffer that doesn't cross. 510*c46184a9SRichard Henderson * Returns the new base of the buffer, and adjusts code_gen_buffer_size. 511*c46184a9SRichard Henderson */ 512*c46184a9SRichard Henderson static inline void *split_cross_256mb(void *buf1, size_t size1) 513*c46184a9SRichard Henderson { 514*c46184a9SRichard Henderson void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); 515*c46184a9SRichard Henderson size_t size2 = buf1 + size1 - buf2; 516*c46184a9SRichard Henderson 517*c46184a9SRichard Henderson size1 = buf2 - buf1; 518*c46184a9SRichard Henderson if (size1 < size2) { 519*c46184a9SRichard Henderson size1 = size2; 520*c46184a9SRichard Henderson buf1 = buf2; 521*c46184a9SRichard Henderson } 522*c46184a9SRichard Henderson 523*c46184a9SRichard Henderson tcg_ctx->code_gen_buffer_size = size1; 524*c46184a9SRichard Henderson return buf1; 525*c46184a9SRichard Henderson } 526*c46184a9SRichard Henderson #endif 527*c46184a9SRichard Henderson 528*c46184a9SRichard Henderson #ifdef USE_STATIC_CODE_GEN_BUFFER 529*c46184a9SRichard Henderson static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] 530*c46184a9SRichard Henderson __attribute__((aligned(CODE_GEN_ALIGN))); 531*c46184a9SRichard Henderson 532*c46184a9SRichard Henderson static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp) 533*c46184a9SRichard Henderson { 534*c46184a9SRichard Henderson void *buf, *end; 535*c46184a9SRichard Henderson size_t size; 536*c46184a9SRichard Henderson 537*c46184a9SRichard Henderson if (splitwx > 0) { 538*c46184a9SRichard Henderson error_setg(errp, "jit split-wx not supported"); 539*c46184a9SRichard Henderson return false; 540*c46184a9SRichard Henderson } 541*c46184a9SRichard Henderson 542*c46184a9SRichard Henderson /* page-align the beginning and end of the buffer */ 543*c46184a9SRichard Henderson buf = static_code_gen_buffer; 544*c46184a9SRichard Henderson end = static_code_gen_buffer + sizeof(static_code_gen_buffer); 545*c46184a9SRichard Henderson buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size); 546*c46184a9SRichard Henderson end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size); 547*c46184a9SRichard Henderson 548*c46184a9SRichard Henderson size = end - buf; 549*c46184a9SRichard Henderson 550*c46184a9SRichard Henderson /* Honor a command-line option limiting the size of the buffer. */ 551*c46184a9SRichard Henderson if (size > tb_size) { 552*c46184a9SRichard Henderson size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size); 553*c46184a9SRichard Henderson } 554*c46184a9SRichard Henderson tcg_ctx->code_gen_buffer_size = size; 555*c46184a9SRichard Henderson 556*c46184a9SRichard Henderson #ifdef __mips__ 557*c46184a9SRichard Henderson if (cross_256mb(buf, size)) { 558*c46184a9SRichard Henderson buf = split_cross_256mb(buf, size); 559*c46184a9SRichard Henderson size = tcg_ctx->code_gen_buffer_size; 560*c46184a9SRichard Henderson } 561*c46184a9SRichard Henderson #endif 562*c46184a9SRichard Henderson 563*c46184a9SRichard Henderson if (qemu_mprotect_rwx(buf, size)) { 564*c46184a9SRichard Henderson error_setg_errno(errp, errno, "mprotect of jit buffer"); 565*c46184a9SRichard Henderson return false; 566*c46184a9SRichard Henderson } 567*c46184a9SRichard Henderson qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); 568*c46184a9SRichard Henderson 569*c46184a9SRichard Henderson tcg_ctx->code_gen_buffer = buf; 570*c46184a9SRichard Henderson return true; 571*c46184a9SRichard Henderson } 572*c46184a9SRichard Henderson #elif defined(_WIN32) 573*c46184a9SRichard Henderson static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) 574*c46184a9SRichard Henderson { 575*c46184a9SRichard Henderson void *buf; 576*c46184a9SRichard Henderson 577*c46184a9SRichard Henderson if (splitwx > 0) { 578*c46184a9SRichard Henderson error_setg(errp, "jit split-wx not supported"); 579*c46184a9SRichard Henderson return false; 580*c46184a9SRichard Henderson } 581*c46184a9SRichard Henderson 582*c46184a9SRichard Henderson buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, 583*c46184a9SRichard Henderson PAGE_EXECUTE_READWRITE); 584*c46184a9SRichard Henderson if (buf == NULL) { 585*c46184a9SRichard Henderson error_setg_win32(errp, GetLastError(), 586*c46184a9SRichard Henderson "allocate %zu bytes for jit buffer", size); 587*c46184a9SRichard Henderson return false; 588*c46184a9SRichard Henderson } 589*c46184a9SRichard Henderson 590*c46184a9SRichard Henderson tcg_ctx->code_gen_buffer = buf; 591*c46184a9SRichard Henderson tcg_ctx->code_gen_buffer_size = size; 592*c46184a9SRichard Henderson return true; 593*c46184a9SRichard Henderson } 594*c46184a9SRichard Henderson #else 595*c46184a9SRichard Henderson static bool alloc_code_gen_buffer_anon(size_t size, int prot, 596*c46184a9SRichard Henderson int flags, Error **errp) 597*c46184a9SRichard Henderson { 598*c46184a9SRichard Henderson void *buf; 599*c46184a9SRichard Henderson 600*c46184a9SRichard Henderson buf = mmap(NULL, size, prot, flags, -1, 0); 601*c46184a9SRichard Henderson if (buf == MAP_FAILED) { 602*c46184a9SRichard Henderson error_setg_errno(errp, errno, 603*c46184a9SRichard Henderson "allocate %zu bytes for jit buffer", size); 604*c46184a9SRichard Henderson return false; 605*c46184a9SRichard Henderson } 606*c46184a9SRichard Henderson tcg_ctx->code_gen_buffer_size = size; 607*c46184a9SRichard Henderson 608*c46184a9SRichard Henderson #ifdef __mips__ 609*c46184a9SRichard Henderson if (cross_256mb(buf, size)) { 610*c46184a9SRichard Henderson /* 611*c46184a9SRichard Henderson * Try again, with the original still mapped, to avoid re-acquiring 612*c46184a9SRichard Henderson * the same 256mb crossing. 613*c46184a9SRichard Henderson */ 614*c46184a9SRichard Henderson size_t size2; 615*c46184a9SRichard Henderson void *buf2 = mmap(NULL, size, prot, flags, -1, 0); 616*c46184a9SRichard Henderson switch ((int)(buf2 != MAP_FAILED)) { 617*c46184a9SRichard Henderson case 1: 618*c46184a9SRichard Henderson if (!cross_256mb(buf2, size)) { 619*c46184a9SRichard Henderson /* Success! Use the new buffer. */ 620*c46184a9SRichard Henderson munmap(buf, size); 621*c46184a9SRichard Henderson break; 622*c46184a9SRichard Henderson } 623*c46184a9SRichard Henderson /* Failure. Work with what we had. */ 624*c46184a9SRichard Henderson munmap(buf2, size); 625*c46184a9SRichard Henderson /* fallthru */ 626*c46184a9SRichard Henderson default: 627*c46184a9SRichard Henderson /* Split the original buffer. Free the smaller half. */ 628*c46184a9SRichard Henderson buf2 = split_cross_256mb(buf, size); 629*c46184a9SRichard Henderson size2 = tcg_ctx->code_gen_buffer_size; 630*c46184a9SRichard Henderson if (buf == buf2) { 631*c46184a9SRichard Henderson munmap(buf + size2, size - size2); 632*c46184a9SRichard Henderson } else { 633*c46184a9SRichard Henderson munmap(buf, size - size2); 634*c46184a9SRichard Henderson } 635*c46184a9SRichard Henderson size = size2; 636*c46184a9SRichard Henderson break; 637*c46184a9SRichard Henderson } 638*c46184a9SRichard Henderson buf = buf2; 639*c46184a9SRichard Henderson } 640*c46184a9SRichard Henderson #endif 641*c46184a9SRichard Henderson 642*c46184a9SRichard Henderson /* Request large pages for the buffer. */ 643*c46184a9SRichard Henderson qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); 644*c46184a9SRichard Henderson 645*c46184a9SRichard Henderson tcg_ctx->code_gen_buffer = buf; 646*c46184a9SRichard Henderson return true; 647*c46184a9SRichard Henderson } 648*c46184a9SRichard Henderson 649*c46184a9SRichard Henderson #ifndef CONFIG_TCG_INTERPRETER 650*c46184a9SRichard Henderson #ifdef CONFIG_POSIX 651*c46184a9SRichard Henderson #include "qemu/memfd.h" 652*c46184a9SRichard Henderson 653*c46184a9SRichard Henderson static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) 654*c46184a9SRichard Henderson { 655*c46184a9SRichard Henderson void *buf_rw = NULL, *buf_rx = MAP_FAILED; 656*c46184a9SRichard Henderson int fd = -1; 657*c46184a9SRichard Henderson 658*c46184a9SRichard Henderson #ifdef __mips__ 659*c46184a9SRichard Henderson /* Find space for the RX mapping, vs the 256MiB regions. */ 660*c46184a9SRichard Henderson if (!alloc_code_gen_buffer_anon(size, PROT_NONE, 661*c46184a9SRichard Henderson MAP_PRIVATE | MAP_ANONYMOUS | 662*c46184a9SRichard Henderson MAP_NORESERVE, errp)) { 663*c46184a9SRichard Henderson return false; 664*c46184a9SRichard Henderson } 665*c46184a9SRichard Henderson /* The size of the mapping may have been adjusted. */ 666*c46184a9SRichard Henderson size = tcg_ctx->code_gen_buffer_size; 667*c46184a9SRichard Henderson buf_rx = tcg_ctx->code_gen_buffer; 668*c46184a9SRichard Henderson #endif 669*c46184a9SRichard Henderson 670*c46184a9SRichard Henderson buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp); 671*c46184a9SRichard Henderson if (buf_rw == NULL) { 672*c46184a9SRichard Henderson goto fail; 673*c46184a9SRichard Henderson } 674*c46184a9SRichard Henderson 675*c46184a9SRichard Henderson #ifdef __mips__ 676*c46184a9SRichard Henderson void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC, 677*c46184a9SRichard Henderson MAP_SHARED | MAP_FIXED, fd, 0); 678*c46184a9SRichard Henderson if (tmp != buf_rx) { 679*c46184a9SRichard Henderson goto fail_rx; 680*c46184a9SRichard Henderson } 681*c46184a9SRichard Henderson #else 682*c46184a9SRichard Henderson buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0); 683*c46184a9SRichard Henderson if (buf_rx == MAP_FAILED) { 684*c46184a9SRichard Henderson goto fail_rx; 685*c46184a9SRichard Henderson } 686*c46184a9SRichard Henderson #endif 687*c46184a9SRichard Henderson 688*c46184a9SRichard Henderson close(fd); 689*c46184a9SRichard Henderson tcg_ctx->code_gen_buffer = buf_rw; 690*c46184a9SRichard Henderson tcg_ctx->code_gen_buffer_size = size; 691*c46184a9SRichard Henderson tcg_splitwx_diff = buf_rx - buf_rw; 692*c46184a9SRichard Henderson 693*c46184a9SRichard Henderson /* Request large pages for the buffer and the splitwx. */ 694*c46184a9SRichard Henderson qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE); 695*c46184a9SRichard Henderson qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE); 696*c46184a9SRichard Henderson return true; 697*c46184a9SRichard Henderson 698*c46184a9SRichard Henderson fail_rx: 699*c46184a9SRichard Henderson error_setg_errno(errp, errno, "failed to map shared memory for execute"); 700*c46184a9SRichard Henderson fail: 701*c46184a9SRichard Henderson if (buf_rx != MAP_FAILED) { 702*c46184a9SRichard Henderson munmap(buf_rx, size); 703*c46184a9SRichard Henderson } 704*c46184a9SRichard Henderson if (buf_rw) { 705*c46184a9SRichard Henderson munmap(buf_rw, size); 706*c46184a9SRichard Henderson } 707*c46184a9SRichard Henderson if (fd >= 0) { 708*c46184a9SRichard Henderson close(fd); 709*c46184a9SRichard Henderson } 710*c46184a9SRichard Henderson return false; 711*c46184a9SRichard Henderson } 712*c46184a9SRichard Henderson #endif /* CONFIG_POSIX */ 713*c46184a9SRichard Henderson 714*c46184a9SRichard Henderson #ifdef CONFIG_DARWIN 715*c46184a9SRichard Henderson #include <mach/mach.h> 716*c46184a9SRichard Henderson 717*c46184a9SRichard Henderson extern kern_return_t mach_vm_remap(vm_map_t target_task, 718*c46184a9SRichard Henderson mach_vm_address_t *target_address, 719*c46184a9SRichard Henderson mach_vm_size_t size, 720*c46184a9SRichard Henderson mach_vm_offset_t mask, 721*c46184a9SRichard Henderson int flags, 722*c46184a9SRichard Henderson vm_map_t src_task, 723*c46184a9SRichard Henderson mach_vm_address_t src_address, 724*c46184a9SRichard Henderson boolean_t copy, 725*c46184a9SRichard Henderson vm_prot_t *cur_protection, 726*c46184a9SRichard Henderson vm_prot_t *max_protection, 727*c46184a9SRichard Henderson vm_inherit_t inheritance); 728*c46184a9SRichard Henderson 729*c46184a9SRichard Henderson static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp) 730*c46184a9SRichard Henderson { 731*c46184a9SRichard Henderson kern_return_t ret; 732*c46184a9SRichard Henderson mach_vm_address_t buf_rw, buf_rx; 733*c46184a9SRichard Henderson vm_prot_t cur_prot, max_prot; 734*c46184a9SRichard Henderson 735*c46184a9SRichard Henderson /* Map the read-write portion via normal anon memory. */ 736*c46184a9SRichard Henderson if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE, 737*c46184a9SRichard Henderson MAP_PRIVATE | MAP_ANONYMOUS, errp)) { 738*c46184a9SRichard Henderson return false; 739*c46184a9SRichard Henderson } 740*c46184a9SRichard Henderson 741*c46184a9SRichard Henderson buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer; 742*c46184a9SRichard Henderson buf_rx = 0; 743*c46184a9SRichard Henderson ret = mach_vm_remap(mach_task_self(), 744*c46184a9SRichard Henderson &buf_rx, 745*c46184a9SRichard Henderson size, 746*c46184a9SRichard Henderson 0, 747*c46184a9SRichard Henderson VM_FLAGS_ANYWHERE, 748*c46184a9SRichard Henderson mach_task_self(), 749*c46184a9SRichard Henderson buf_rw, 750*c46184a9SRichard Henderson false, 751*c46184a9SRichard Henderson &cur_prot, 752*c46184a9SRichard Henderson &max_prot, 753*c46184a9SRichard Henderson VM_INHERIT_NONE); 754*c46184a9SRichard Henderson if (ret != KERN_SUCCESS) { 755*c46184a9SRichard Henderson /* TODO: Convert "ret" to a human readable error message. */ 756*c46184a9SRichard Henderson error_setg(errp, "vm_remap for jit splitwx failed"); 757*c46184a9SRichard Henderson munmap((void *)buf_rw, size); 758*c46184a9SRichard Henderson return false; 759*c46184a9SRichard Henderson } 760*c46184a9SRichard Henderson 761*c46184a9SRichard Henderson if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) { 762*c46184a9SRichard Henderson error_setg_errno(errp, errno, "mprotect for jit splitwx"); 763*c46184a9SRichard Henderson munmap((void *)buf_rx, size); 764*c46184a9SRichard Henderson munmap((void *)buf_rw, size); 765*c46184a9SRichard Henderson return false; 766*c46184a9SRichard Henderson } 767*c46184a9SRichard Henderson 768*c46184a9SRichard Henderson tcg_splitwx_diff = buf_rx - buf_rw; 769*c46184a9SRichard Henderson return true; 770*c46184a9SRichard Henderson } 771*c46184a9SRichard Henderson #endif /* CONFIG_DARWIN */ 772*c46184a9SRichard Henderson #endif /* CONFIG_TCG_INTERPRETER */ 773*c46184a9SRichard Henderson 774*c46184a9SRichard Henderson static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp) 775*c46184a9SRichard Henderson { 776*c46184a9SRichard Henderson #ifndef CONFIG_TCG_INTERPRETER 777*c46184a9SRichard Henderson # ifdef CONFIG_DARWIN 778*c46184a9SRichard Henderson return alloc_code_gen_buffer_splitwx_vmremap(size, errp); 779*c46184a9SRichard Henderson # endif 780*c46184a9SRichard Henderson # ifdef CONFIG_POSIX 781*c46184a9SRichard Henderson return alloc_code_gen_buffer_splitwx_memfd(size, errp); 782*c46184a9SRichard Henderson # endif 783*c46184a9SRichard Henderson #endif 784*c46184a9SRichard Henderson error_setg(errp, "jit split-wx not supported"); 785*c46184a9SRichard Henderson return false; 786*c46184a9SRichard Henderson } 787*c46184a9SRichard Henderson 788*c46184a9SRichard Henderson static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) 789*c46184a9SRichard Henderson { 790*c46184a9SRichard Henderson ERRP_GUARD(); 791*c46184a9SRichard Henderson int prot, flags; 792*c46184a9SRichard Henderson 793*c46184a9SRichard Henderson if (splitwx) { 794*c46184a9SRichard Henderson if (alloc_code_gen_buffer_splitwx(size, errp)) { 795*c46184a9SRichard Henderson return true; 796*c46184a9SRichard Henderson } 797*c46184a9SRichard Henderson /* 798*c46184a9SRichard Henderson * If splitwx force-on (1), fail; 799*c46184a9SRichard Henderson * if splitwx default-on (-1), fall through to splitwx off. 800*c46184a9SRichard Henderson */ 801*c46184a9SRichard Henderson if (splitwx > 0) { 802*c46184a9SRichard Henderson return false; 803*c46184a9SRichard Henderson } 804*c46184a9SRichard Henderson error_free_or_abort(errp); 805*c46184a9SRichard Henderson } 806*c46184a9SRichard Henderson 807*c46184a9SRichard Henderson prot = PROT_READ | PROT_WRITE | PROT_EXEC; 808*c46184a9SRichard Henderson flags = MAP_PRIVATE | MAP_ANONYMOUS; 809*c46184a9SRichard Henderson #ifdef CONFIG_TCG_INTERPRETER 810*c46184a9SRichard Henderson /* The tcg interpreter does not need execute permission. */ 811*c46184a9SRichard Henderson prot = PROT_READ | PROT_WRITE; 812*c46184a9SRichard Henderson #elif defined(CONFIG_DARWIN) 813*c46184a9SRichard Henderson /* Applicable to both iOS and macOS (Apple Silicon). */ 814*c46184a9SRichard Henderson if (!splitwx) { 815*c46184a9SRichard Henderson flags |= MAP_JIT; 816*c46184a9SRichard Henderson } 817*c46184a9SRichard Henderson #endif 818*c46184a9SRichard Henderson 819*c46184a9SRichard Henderson return alloc_code_gen_buffer_anon(size, prot, flags, errp); 820*c46184a9SRichard Henderson } 821*c46184a9SRichard Henderson #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ 822*c46184a9SRichard Henderson 823*c46184a9SRichard Henderson /* 8245ff7258cSRichard Henderson * Initializes region partitioning. 8255ff7258cSRichard Henderson * 8265ff7258cSRichard Henderson * Called at init time from the parent thread (i.e. the one calling 8275ff7258cSRichard Henderson * tcg_context_init), after the target's TCG globals have been set. 8285ff7258cSRichard Henderson * 8295ff7258cSRichard Henderson * Region partitioning works by splitting code_gen_buffer into separate regions, 8305ff7258cSRichard Henderson * and then assigning regions to TCG threads so that the threads can translate 8315ff7258cSRichard Henderson * code in parallel without synchronization. 8325ff7258cSRichard Henderson * 8335ff7258cSRichard Henderson * In softmmu the number of TCG threads is bounded by max_cpus, so we use at 8345ff7258cSRichard Henderson * least max_cpus regions in MTTCG. In !MTTCG we use a single region. 8355ff7258cSRichard Henderson * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...]) 8365ff7258cSRichard Henderson * must have been parsed before calling this function, since it calls 8375ff7258cSRichard Henderson * qemu_tcg_mttcg_enabled(). 8385ff7258cSRichard Henderson * 8395ff7258cSRichard Henderson * In user-mode we use a single region. Having multiple regions in user-mode 8405ff7258cSRichard Henderson * is not supported, because the number of vCPU threads (recall that each thread 8415ff7258cSRichard Henderson * spawned by the guest corresponds to a vCPU thread) is only bounded by the 8425ff7258cSRichard Henderson * OS, and usually this number is huge (tens of thousands is not uncommon). 8435ff7258cSRichard Henderson * Thus, given this large bound on the number of vCPU threads and the fact 8445ff7258cSRichard Henderson * that code_gen_buffer is allocated at compile-time, we cannot guarantee 8455ff7258cSRichard Henderson * that the availability of at least one region per vCPU thread. 8465ff7258cSRichard Henderson * 8475ff7258cSRichard Henderson * However, this user-mode limitation is unlikely to be a significant problem 8485ff7258cSRichard Henderson * in practice. Multi-threaded guests share most if not all of their translated 8495ff7258cSRichard Henderson * code, which makes parallel code generation less appealing than in softmmu. 8505ff7258cSRichard Henderson */ 851*c46184a9SRichard Henderson void tcg_region_init(size_t tb_size, int splitwx) 8525ff7258cSRichard Henderson { 853*c46184a9SRichard Henderson void *buf, *aligned; 854*c46184a9SRichard Henderson size_t size; 855*c46184a9SRichard Henderson size_t page_size; 8565ff7258cSRichard Henderson size_t region_size; 8575ff7258cSRichard Henderson size_t n_regions; 8585ff7258cSRichard Henderson size_t i; 859*c46184a9SRichard Henderson bool ok; 8605ff7258cSRichard Henderson 861*c46184a9SRichard Henderson ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size), 862*c46184a9SRichard Henderson splitwx, &error_fatal); 863*c46184a9SRichard Henderson assert(ok); 864*c46184a9SRichard Henderson 865*c46184a9SRichard Henderson buf = tcg_init_ctx.code_gen_buffer; 866*c46184a9SRichard Henderson size = tcg_init_ctx.code_gen_buffer_size; 867*c46184a9SRichard Henderson page_size = qemu_real_host_page_size; 8685ff7258cSRichard Henderson n_regions = tcg_n_regions(); 8695ff7258cSRichard Henderson 8705ff7258cSRichard Henderson /* The first region will be 'aligned - buf' bytes larger than the others */ 8715ff7258cSRichard Henderson aligned = QEMU_ALIGN_PTR_UP(buf, page_size); 8725ff7258cSRichard Henderson g_assert(aligned < tcg_init_ctx.code_gen_buffer + size); 8735ff7258cSRichard Henderson /* 8745ff7258cSRichard Henderson * Make region_size a multiple of page_size, using aligned as the start. 8755ff7258cSRichard Henderson * As a result of this we might end up with a few extra pages at the end of 8765ff7258cSRichard Henderson * the buffer; we will assign those to the last region. 8775ff7258cSRichard Henderson */ 8785ff7258cSRichard Henderson region_size = (size - (aligned - buf)) / n_regions; 8795ff7258cSRichard Henderson region_size = QEMU_ALIGN_DOWN(region_size, page_size); 8805ff7258cSRichard Henderson 8815ff7258cSRichard Henderson /* A region must have at least 2 pages; one code, one guard */ 8825ff7258cSRichard Henderson g_assert(region_size >= 2 * page_size); 8835ff7258cSRichard Henderson 8845ff7258cSRichard Henderson /* init the region struct */ 8855ff7258cSRichard Henderson qemu_mutex_init(®ion.lock); 8865ff7258cSRichard Henderson region.n = n_regions; 8875ff7258cSRichard Henderson region.size = region_size - page_size; 8885ff7258cSRichard Henderson region.stride = region_size; 8895ff7258cSRichard Henderson region.start = buf; 8905ff7258cSRichard Henderson region.start_aligned = aligned; 8915ff7258cSRichard Henderson /* page-align the end, since its last page will be a guard page */ 8925ff7258cSRichard Henderson region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size); 8935ff7258cSRichard Henderson /* account for that last guard page */ 8945ff7258cSRichard Henderson region.end -= page_size; 8955ff7258cSRichard Henderson 8965ff7258cSRichard Henderson /* 8975ff7258cSRichard Henderson * Set guard pages in the rw buffer, as that's the one into which 8985ff7258cSRichard Henderson * buffer overruns could occur. Do not set guard pages in the rx 8995ff7258cSRichard Henderson * buffer -- let that one use hugepages throughout. 9005ff7258cSRichard Henderson */ 9015ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 9025ff7258cSRichard Henderson void *start, *end; 9035ff7258cSRichard Henderson 9045ff7258cSRichard Henderson tcg_region_bounds(i, &start, &end); 9055ff7258cSRichard Henderson 9065ff7258cSRichard Henderson /* 9075ff7258cSRichard Henderson * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect 9085ff7258cSRichard Henderson * rejects a permission change from RWX -> NONE. Guard pages are 9095ff7258cSRichard Henderson * nice for bug detection but are not essential; ignore any failure. 9105ff7258cSRichard Henderson */ 9115ff7258cSRichard Henderson (void)qemu_mprotect_none(end, page_size); 9125ff7258cSRichard Henderson } 9135ff7258cSRichard Henderson 9145ff7258cSRichard Henderson tcg_region_trees_init(); 9155ff7258cSRichard Henderson 9165ff7258cSRichard Henderson /* 9175ff7258cSRichard Henderson * Leave the initial context initialized to the first region. 9185ff7258cSRichard Henderson * This will be the context into which we generate the prologue. 9195ff7258cSRichard Henderson * It is also the only context for CONFIG_USER_ONLY. 9205ff7258cSRichard Henderson */ 9215ff7258cSRichard Henderson tcg_region_initial_alloc__locked(&tcg_init_ctx); 9225ff7258cSRichard Henderson } 9235ff7258cSRichard Henderson 9245ff7258cSRichard Henderson void tcg_region_prologue_set(TCGContext *s) 9255ff7258cSRichard Henderson { 9265ff7258cSRichard Henderson /* Deduct the prologue from the first region. */ 9275ff7258cSRichard Henderson g_assert(region.start == s->code_gen_buffer); 9285ff7258cSRichard Henderson region.start = s->code_ptr; 9295ff7258cSRichard Henderson 9305ff7258cSRichard Henderson /* Recompute boundaries of the first region. */ 9315ff7258cSRichard Henderson tcg_region_assign(s, 0); 9325ff7258cSRichard Henderson 9335ff7258cSRichard Henderson /* Register the balance of the buffer with gdb. */ 9345ff7258cSRichard Henderson tcg_register_jit(tcg_splitwx_to_rx(region.start), 9355ff7258cSRichard Henderson region.end - region.start); 9365ff7258cSRichard Henderson } 9375ff7258cSRichard Henderson 9385ff7258cSRichard Henderson /* 9395ff7258cSRichard Henderson * Returns the size (in bytes) of all translated code (i.e. from all regions) 9405ff7258cSRichard Henderson * currently in the cache. 9415ff7258cSRichard Henderson * See also: tcg_code_capacity() 9425ff7258cSRichard Henderson * Do not confuse with tcg_current_code_size(); that one applies to a single 9435ff7258cSRichard Henderson * TCG context. 9445ff7258cSRichard Henderson */ 9455ff7258cSRichard Henderson size_t tcg_code_size(void) 9465ff7258cSRichard Henderson { 9475ff7258cSRichard Henderson unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs); 9485ff7258cSRichard Henderson unsigned int i; 9495ff7258cSRichard Henderson size_t total; 9505ff7258cSRichard Henderson 9515ff7258cSRichard Henderson qemu_mutex_lock(®ion.lock); 9525ff7258cSRichard Henderson total = region.agg_size_full; 9535ff7258cSRichard Henderson for (i = 0; i < n_ctxs; i++) { 9545ff7258cSRichard Henderson const TCGContext *s = qatomic_read(&tcg_ctxs[i]); 9555ff7258cSRichard Henderson size_t size; 9565ff7258cSRichard Henderson 9575ff7258cSRichard Henderson size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer; 9585ff7258cSRichard Henderson g_assert(size <= s->code_gen_buffer_size); 9595ff7258cSRichard Henderson total += size; 9605ff7258cSRichard Henderson } 9615ff7258cSRichard Henderson qemu_mutex_unlock(®ion.lock); 9625ff7258cSRichard Henderson return total; 9635ff7258cSRichard Henderson } 9645ff7258cSRichard Henderson 9655ff7258cSRichard Henderson /* 9665ff7258cSRichard Henderson * Returns the code capacity (in bytes) of the entire cache, i.e. including all 9675ff7258cSRichard Henderson * regions. 9685ff7258cSRichard Henderson * See also: tcg_code_size() 9695ff7258cSRichard Henderson */ 9705ff7258cSRichard Henderson size_t tcg_code_capacity(void) 9715ff7258cSRichard Henderson { 9725ff7258cSRichard Henderson size_t guard_size, capacity; 9735ff7258cSRichard Henderson 9745ff7258cSRichard Henderson /* no need for synchronization; these variables are set at init time */ 9755ff7258cSRichard Henderson guard_size = region.stride - region.size; 9765ff7258cSRichard Henderson capacity = region.end + guard_size - region.start; 9775ff7258cSRichard Henderson capacity -= region.n * (guard_size + TCG_HIGHWATER); 9785ff7258cSRichard Henderson return capacity; 9795ff7258cSRichard Henderson } 9805ff7258cSRichard Henderson 9815ff7258cSRichard Henderson size_t tcg_tb_phys_invalidate_count(void) 9825ff7258cSRichard Henderson { 9835ff7258cSRichard Henderson unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs); 9845ff7258cSRichard Henderson unsigned int i; 9855ff7258cSRichard Henderson size_t total = 0; 9865ff7258cSRichard Henderson 9875ff7258cSRichard Henderson for (i = 0; i < n_ctxs; i++) { 9885ff7258cSRichard Henderson const TCGContext *s = qatomic_read(&tcg_ctxs[i]); 9895ff7258cSRichard Henderson 9905ff7258cSRichard Henderson total += qatomic_read(&s->tb_phys_invalidate_count); 9915ff7258cSRichard Henderson } 9925ff7258cSRichard Henderson return total; 9935ff7258cSRichard Henderson } 994