15ff7258cSRichard Henderson /* 25ff7258cSRichard Henderson * Memory region management for Tiny Code Generator for QEMU 35ff7258cSRichard Henderson * 45ff7258cSRichard Henderson * Copyright (c) 2008 Fabrice Bellard 55ff7258cSRichard Henderson * 65ff7258cSRichard Henderson * Permission is hereby granted, free of charge, to any person obtaining a copy 75ff7258cSRichard Henderson * of this software and associated documentation files (the "Software"), to deal 85ff7258cSRichard Henderson * in the Software without restriction, including without limitation the rights 95ff7258cSRichard Henderson * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 105ff7258cSRichard Henderson * copies of the Software, and to permit persons to whom the Software is 115ff7258cSRichard Henderson * furnished to do so, subject to the following conditions: 125ff7258cSRichard Henderson * 135ff7258cSRichard Henderson * The above copyright notice and this permission notice shall be included in 145ff7258cSRichard Henderson * all copies or substantial portions of the Software. 155ff7258cSRichard Henderson * 165ff7258cSRichard Henderson * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 175ff7258cSRichard Henderson * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 185ff7258cSRichard Henderson * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 195ff7258cSRichard Henderson * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 205ff7258cSRichard Henderson * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 215ff7258cSRichard Henderson * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 225ff7258cSRichard Henderson * THE SOFTWARE. 235ff7258cSRichard Henderson */ 245ff7258cSRichard Henderson 255ff7258cSRichard Henderson #include "qemu/osdep.h" 26c46184a9SRichard Henderson #include "qemu/units.h" 27b85ea5faSPeter Maydell #include "qemu/madvise.h" 28f2241d16SPeter Maydell #include "qemu/mprotect.h" 295df022cfSPeter Maydell #include "qemu/memalign.h" 30ad768e6fSPeter Maydell #include "qemu/cacheinfo.h" 311ff4a81bSEmilio Cota #include "qemu/qtree.h" 32c46184a9SRichard Henderson #include "qapi/error.h" 335ff7258cSRichard Henderson #include "tcg/tcg.h" 34cac9b0fdSRichard Henderson #include "exec/translation-block.h" 355ff7258cSRichard Henderson #include "tcg-internal.h" 36a97a8375SRichard Henderson #include "host/cpuinfo.h" 375ff7258cSRichard Henderson 385ff7258cSRichard Henderson 39a97a8375SRichard Henderson /* 40a97a8375SRichard Henderson * Local source-level compatibility with Unix. 41a97a8375SRichard Henderson * Used by tcg_region_init below. 42a97a8375SRichard Henderson */ 43a97a8375SRichard Henderson #if defined(_WIN32) 44a97a8375SRichard Henderson #define PROT_READ 1 45a97a8375SRichard Henderson #define PROT_WRITE 2 46a97a8375SRichard Henderson #define PROT_EXEC 4 47a97a8375SRichard Henderson #endif 48a97a8375SRichard Henderson 495ff7258cSRichard Henderson struct tcg_region_tree { 505ff7258cSRichard Henderson QemuMutex lock; 511ff4a81bSEmilio Cota QTree *tree; 525ff7258cSRichard Henderson /* padding to avoid false sharing is computed at run-time */ 535ff7258cSRichard Henderson }; 545ff7258cSRichard Henderson 555ff7258cSRichard Henderson /* 565ff7258cSRichard Henderson * We divide code_gen_buffer into equally-sized "regions" that TCG threads 575ff7258cSRichard Henderson * dynamically allocate from as demand dictates. Given appropriate region 585ff7258cSRichard Henderson * sizing, this minimizes flushes even when some TCG threads generate a lot 595ff7258cSRichard Henderson * more code than others. 605ff7258cSRichard Henderson */ 615ff7258cSRichard Henderson struct tcg_region_state { 625ff7258cSRichard Henderson QemuMutex lock; 635ff7258cSRichard Henderson 645ff7258cSRichard Henderson /* fields set at init time */ 655ff7258cSRichard Henderson void *start_aligned; 66c2471ca0SRichard Henderson void *after_prologue; 675ff7258cSRichard Henderson size_t n; 685ff7258cSRichard Henderson size_t size; /* size of one region */ 695ff7258cSRichard Henderson size_t stride; /* .size + guard size */ 7077bd7fd1SRichard Henderson size_t total_size; /* size of entire buffer, >= n * stride */ 715ff7258cSRichard Henderson 725ff7258cSRichard Henderson /* fields protected by the lock */ 735ff7258cSRichard Henderson size_t current; /* current region index */ 745ff7258cSRichard Henderson size_t agg_size_full; /* aggregate size of full regions */ 755ff7258cSRichard Henderson }; 765ff7258cSRichard Henderson 775ff7258cSRichard Henderson static struct tcg_region_state region; 785ff7258cSRichard Henderson 795ff7258cSRichard Henderson /* 805ff7258cSRichard Henderson * This is an array of struct tcg_region_tree's, with padding. 815ff7258cSRichard Henderson * We use void * to simplify the computation of region_trees[i]; each 825ff7258cSRichard Henderson * struct is found every tree_size bytes. 835ff7258cSRichard Henderson */ 845ff7258cSRichard Henderson static void *region_trees; 855ff7258cSRichard Henderson static size_t tree_size; 865ff7258cSRichard Henderson 8747d590dfSRichard Henderson bool in_code_gen_buffer(const void *p) 8847d590dfSRichard Henderson { 8947d590dfSRichard Henderson /* 9047d590dfSRichard Henderson * Much like it is valid to have a pointer to the byte past the 9147d590dfSRichard Henderson * end of an array (so long as you don't dereference it), allow 9247d590dfSRichard Henderson * a pointer to the byte past the end of the code gen buffer. 9347d590dfSRichard Henderson */ 94032a4b1bSRichard Henderson return (size_t)(p - region.start_aligned) <= region.total_size; 9547d590dfSRichard Henderson } 9647d590dfSRichard Henderson 97a97a8375SRichard Henderson #ifndef CONFIG_TCG_INTERPRETER 98a97a8375SRichard Henderson static int host_prot_read_exec(void) 99a97a8375SRichard Henderson { 100a97a8375SRichard Henderson #if defined(CONFIG_LINUX) && defined(HOST_AARCH64) && defined(PROT_BTI) 101a97a8375SRichard Henderson if (cpuinfo & CPUINFO_BTI) { 102a97a8375SRichard Henderson return PROT_READ | PROT_EXEC | PROT_BTI; 103a97a8375SRichard Henderson } 104a97a8375SRichard Henderson #endif 105a97a8375SRichard Henderson return PROT_READ | PROT_EXEC; 106a97a8375SRichard Henderson } 107a97a8375SRichard Henderson #endif 108a97a8375SRichard Henderson 10947d590dfSRichard Henderson #ifdef CONFIG_DEBUG_TCG 11047d590dfSRichard Henderson const void *tcg_splitwx_to_rx(void *rw) 11147d590dfSRichard Henderson { 11247d590dfSRichard Henderson /* Pass NULL pointers unchanged. */ 11347d590dfSRichard Henderson if (rw) { 11447d590dfSRichard Henderson g_assert(in_code_gen_buffer(rw)); 11547d590dfSRichard Henderson rw += tcg_splitwx_diff; 11647d590dfSRichard Henderson } 11747d590dfSRichard Henderson return rw; 11847d590dfSRichard Henderson } 11947d590dfSRichard Henderson 12047d590dfSRichard Henderson void *tcg_splitwx_to_rw(const void *rx) 12147d590dfSRichard Henderson { 12247d590dfSRichard Henderson /* Pass NULL pointers unchanged. */ 12347d590dfSRichard Henderson if (rx) { 12447d590dfSRichard Henderson rx -= tcg_splitwx_diff; 12547d590dfSRichard Henderson /* Assert that we end with a pointer in the rw region. */ 12647d590dfSRichard Henderson g_assert(in_code_gen_buffer(rx)); 12747d590dfSRichard Henderson } 12847d590dfSRichard Henderson return (void *)rx; 12947d590dfSRichard Henderson } 13047d590dfSRichard Henderson #endif /* CONFIG_DEBUG_TCG */ 13147d590dfSRichard Henderson 1325ff7258cSRichard Henderson /* compare a pointer @ptr and a tb_tc @s */ 1335ff7258cSRichard Henderson static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s) 1345ff7258cSRichard Henderson { 1355ff7258cSRichard Henderson if (ptr >= s->ptr + s->size) { 1365ff7258cSRichard Henderson return 1; 1375ff7258cSRichard Henderson } else if (ptr < s->ptr) { 1385ff7258cSRichard Henderson return -1; 1395ff7258cSRichard Henderson } 1405ff7258cSRichard Henderson return 0; 1415ff7258cSRichard Henderson } 1425ff7258cSRichard Henderson 143834361efSLiren Wei static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp, gpointer userdata) 1445ff7258cSRichard Henderson { 1455ff7258cSRichard Henderson const struct tb_tc *a = ap; 1465ff7258cSRichard Henderson const struct tb_tc *b = bp; 1475ff7258cSRichard Henderson 1485ff7258cSRichard Henderson /* 1495ff7258cSRichard Henderson * When both sizes are set, we know this isn't a lookup. 1505ff7258cSRichard Henderson * This is the most likely case: every TB must be inserted; lookups 1515ff7258cSRichard Henderson * are a lot less frequent. 1525ff7258cSRichard Henderson */ 1535ff7258cSRichard Henderson if (likely(a->size && b->size)) { 1545ff7258cSRichard Henderson if (a->ptr > b->ptr) { 1555ff7258cSRichard Henderson return 1; 1565ff7258cSRichard Henderson } else if (a->ptr < b->ptr) { 1575ff7258cSRichard Henderson return -1; 1585ff7258cSRichard Henderson } 1595ff7258cSRichard Henderson /* a->ptr == b->ptr should happen only on deletions */ 1605ff7258cSRichard Henderson g_assert(a->size == b->size); 1615ff7258cSRichard Henderson return 0; 1625ff7258cSRichard Henderson } 1635ff7258cSRichard Henderson /* 1645ff7258cSRichard Henderson * All lookups have either .size field set to 0. 1655ff7258cSRichard Henderson * From the glib sources we see that @ap is always the lookup key. However 1665ff7258cSRichard Henderson * the docs provide no guarantee, so we just mark this case as likely. 1675ff7258cSRichard Henderson */ 1685ff7258cSRichard Henderson if (likely(a->size == 0)) { 1695ff7258cSRichard Henderson return ptr_cmp_tb_tc(a->ptr, b); 1705ff7258cSRichard Henderson } 1715ff7258cSRichard Henderson return ptr_cmp_tb_tc(b->ptr, a); 1725ff7258cSRichard Henderson } 1735ff7258cSRichard Henderson 174834361efSLiren Wei static void tb_destroy(gpointer value) 175834361efSLiren Wei { 176834361efSLiren Wei TranslationBlock *tb = value; 177834361efSLiren Wei qemu_spin_destroy(&tb->jmp_lock); 178834361efSLiren Wei } 179834361efSLiren Wei 1805ff7258cSRichard Henderson static void tcg_region_trees_init(void) 1815ff7258cSRichard Henderson { 1825ff7258cSRichard Henderson size_t i; 1835ff7258cSRichard Henderson 1845ff7258cSRichard Henderson tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize); 1855ff7258cSRichard Henderson region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size); 1865ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 1875ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 1885ff7258cSRichard Henderson 1895ff7258cSRichard Henderson qemu_mutex_init(&rt->lock); 1901ff4a81bSEmilio Cota rt->tree = q_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy); 1915ff7258cSRichard Henderson } 1925ff7258cSRichard Henderson } 1935ff7258cSRichard Henderson 1945ff7258cSRichard Henderson static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p) 1955ff7258cSRichard Henderson { 1965ff7258cSRichard Henderson size_t region_idx; 1975ff7258cSRichard Henderson 1985ff7258cSRichard Henderson /* 1995ff7258cSRichard Henderson * Like tcg_splitwx_to_rw, with no assert. The pc may come from 2005ff7258cSRichard Henderson * a signal handler over which the caller has no control. 2015ff7258cSRichard Henderson */ 2025ff7258cSRichard Henderson if (!in_code_gen_buffer(p)) { 2035ff7258cSRichard Henderson p -= tcg_splitwx_diff; 2045ff7258cSRichard Henderson if (!in_code_gen_buffer(p)) { 2055ff7258cSRichard Henderson return NULL; 2065ff7258cSRichard Henderson } 2075ff7258cSRichard Henderson } 2085ff7258cSRichard Henderson 2095ff7258cSRichard Henderson if (p < region.start_aligned) { 2105ff7258cSRichard Henderson region_idx = 0; 2115ff7258cSRichard Henderson } else { 2125ff7258cSRichard Henderson ptrdiff_t offset = p - region.start_aligned; 2135ff7258cSRichard Henderson 2145ff7258cSRichard Henderson if (offset > region.stride * (region.n - 1)) { 2155ff7258cSRichard Henderson region_idx = region.n - 1; 2165ff7258cSRichard Henderson } else { 2175ff7258cSRichard Henderson region_idx = offset / region.stride; 2185ff7258cSRichard Henderson } 2195ff7258cSRichard Henderson } 2205ff7258cSRichard Henderson return region_trees + region_idx * tree_size; 2215ff7258cSRichard Henderson } 2225ff7258cSRichard Henderson 2235ff7258cSRichard Henderson void tcg_tb_insert(TranslationBlock *tb) 2245ff7258cSRichard Henderson { 2255ff7258cSRichard Henderson struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); 2265ff7258cSRichard Henderson 2275ff7258cSRichard Henderson g_assert(rt != NULL); 2285ff7258cSRichard Henderson qemu_mutex_lock(&rt->lock); 2291ff4a81bSEmilio Cota q_tree_insert(rt->tree, &tb->tc, tb); 2305ff7258cSRichard Henderson qemu_mutex_unlock(&rt->lock); 2315ff7258cSRichard Henderson } 2325ff7258cSRichard Henderson 2335ff7258cSRichard Henderson void tcg_tb_remove(TranslationBlock *tb) 2345ff7258cSRichard Henderson { 2355ff7258cSRichard Henderson struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); 2365ff7258cSRichard Henderson 2375ff7258cSRichard Henderson g_assert(rt != NULL); 2385ff7258cSRichard Henderson qemu_mutex_lock(&rt->lock); 2391ff4a81bSEmilio Cota q_tree_remove(rt->tree, &tb->tc); 2405ff7258cSRichard Henderson qemu_mutex_unlock(&rt->lock); 2415ff7258cSRichard Henderson } 2425ff7258cSRichard Henderson 2435ff7258cSRichard Henderson /* 2445ff7258cSRichard Henderson * Find the TB 'tb' such that 2455ff7258cSRichard Henderson * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size 2465ff7258cSRichard Henderson * Return NULL if not found. 2475ff7258cSRichard Henderson */ 2485ff7258cSRichard Henderson TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr) 2495ff7258cSRichard Henderson { 2505ff7258cSRichard Henderson struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr); 2515ff7258cSRichard Henderson TranslationBlock *tb; 2525ff7258cSRichard Henderson struct tb_tc s = { .ptr = (void *)tc_ptr }; 2535ff7258cSRichard Henderson 2545ff7258cSRichard Henderson if (rt == NULL) { 2555ff7258cSRichard Henderson return NULL; 2565ff7258cSRichard Henderson } 2575ff7258cSRichard Henderson 2585ff7258cSRichard Henderson qemu_mutex_lock(&rt->lock); 2591ff4a81bSEmilio Cota tb = q_tree_lookup(rt->tree, &s); 2605ff7258cSRichard Henderson qemu_mutex_unlock(&rt->lock); 2615ff7258cSRichard Henderson return tb; 2625ff7258cSRichard Henderson } 2635ff7258cSRichard Henderson 2645ff7258cSRichard Henderson static void tcg_region_tree_lock_all(void) 2655ff7258cSRichard Henderson { 2665ff7258cSRichard Henderson size_t i; 2675ff7258cSRichard Henderson 2685ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 2695ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 2705ff7258cSRichard Henderson 2715ff7258cSRichard Henderson qemu_mutex_lock(&rt->lock); 2725ff7258cSRichard Henderson } 2735ff7258cSRichard Henderson } 2745ff7258cSRichard Henderson 2755ff7258cSRichard Henderson static void tcg_region_tree_unlock_all(void) 2765ff7258cSRichard Henderson { 2775ff7258cSRichard Henderson size_t i; 2785ff7258cSRichard Henderson 2795ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 2805ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 2815ff7258cSRichard Henderson 2825ff7258cSRichard Henderson qemu_mutex_unlock(&rt->lock); 2835ff7258cSRichard Henderson } 2845ff7258cSRichard Henderson } 2855ff7258cSRichard Henderson 2865ff7258cSRichard Henderson void tcg_tb_foreach(GTraverseFunc func, gpointer user_data) 2875ff7258cSRichard Henderson { 2885ff7258cSRichard Henderson size_t i; 2895ff7258cSRichard Henderson 2905ff7258cSRichard Henderson tcg_region_tree_lock_all(); 2915ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 2925ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 2935ff7258cSRichard Henderson 2941ff4a81bSEmilio Cota q_tree_foreach(rt->tree, func, user_data); 2955ff7258cSRichard Henderson } 2965ff7258cSRichard Henderson tcg_region_tree_unlock_all(); 2975ff7258cSRichard Henderson } 2985ff7258cSRichard Henderson 2995ff7258cSRichard Henderson size_t tcg_nb_tbs(void) 3005ff7258cSRichard Henderson { 3015ff7258cSRichard Henderson size_t nb_tbs = 0; 3025ff7258cSRichard Henderson size_t i; 3035ff7258cSRichard Henderson 3045ff7258cSRichard Henderson tcg_region_tree_lock_all(); 3055ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 3065ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 3075ff7258cSRichard Henderson 3081ff4a81bSEmilio Cota nb_tbs += q_tree_nnodes(rt->tree); 3095ff7258cSRichard Henderson } 3105ff7258cSRichard Henderson tcg_region_tree_unlock_all(); 3115ff7258cSRichard Henderson return nb_tbs; 3125ff7258cSRichard Henderson } 3135ff7258cSRichard Henderson 3145ff7258cSRichard Henderson static void tcg_region_tree_reset_all(void) 3155ff7258cSRichard Henderson { 3165ff7258cSRichard Henderson size_t i; 3175ff7258cSRichard Henderson 3185ff7258cSRichard Henderson tcg_region_tree_lock_all(); 3195ff7258cSRichard Henderson for (i = 0; i < region.n; i++) { 3205ff7258cSRichard Henderson struct tcg_region_tree *rt = region_trees + i * tree_size; 3215ff7258cSRichard Henderson 3225ff7258cSRichard Henderson /* Increment the refcount first so that destroy acts as a reset */ 3231ff4a81bSEmilio Cota q_tree_ref(rt->tree); 3241ff4a81bSEmilio Cota q_tree_destroy(rt->tree); 3255ff7258cSRichard Henderson } 3265ff7258cSRichard Henderson tcg_region_tree_unlock_all(); 3275ff7258cSRichard Henderson } 3285ff7258cSRichard Henderson 3295ff7258cSRichard Henderson static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend) 3305ff7258cSRichard Henderson { 3315ff7258cSRichard Henderson void *start, *end; 3325ff7258cSRichard Henderson 3335ff7258cSRichard Henderson start = region.start_aligned + curr_region * region.stride; 3345ff7258cSRichard Henderson end = start + region.size; 3355ff7258cSRichard Henderson 3365ff7258cSRichard Henderson if (curr_region == 0) { 337c2471ca0SRichard Henderson start = region.after_prologue; 3385ff7258cSRichard Henderson } 33977bd7fd1SRichard Henderson /* The final region may have a few extra pages due to earlier rounding. */ 3405ff7258cSRichard Henderson if (curr_region == region.n - 1) { 34177bd7fd1SRichard Henderson end = region.start_aligned + region.total_size; 3425ff7258cSRichard Henderson } 3435ff7258cSRichard Henderson 3445ff7258cSRichard Henderson *pstart = start; 3455ff7258cSRichard Henderson *pend = end; 3465ff7258cSRichard Henderson } 3475ff7258cSRichard Henderson 3485ff7258cSRichard Henderson static void tcg_region_assign(TCGContext *s, size_t curr_region) 3495ff7258cSRichard Henderson { 3505ff7258cSRichard Henderson void *start, *end; 3515ff7258cSRichard Henderson 3525ff7258cSRichard Henderson tcg_region_bounds(curr_region, &start, &end); 3535ff7258cSRichard Henderson 3545ff7258cSRichard Henderson s->code_gen_buffer = start; 3555ff7258cSRichard Henderson s->code_gen_ptr = start; 3565ff7258cSRichard Henderson s->code_gen_buffer_size = end - start; 3575ff7258cSRichard Henderson s->code_gen_highwater = end - TCG_HIGHWATER; 3585ff7258cSRichard Henderson } 3595ff7258cSRichard Henderson 3605ff7258cSRichard Henderson static bool tcg_region_alloc__locked(TCGContext *s) 3615ff7258cSRichard Henderson { 3625ff7258cSRichard Henderson if (region.current == region.n) { 3635ff7258cSRichard Henderson return true; 3645ff7258cSRichard Henderson } 3655ff7258cSRichard Henderson tcg_region_assign(s, region.current); 3665ff7258cSRichard Henderson region.current++; 3675ff7258cSRichard Henderson return false; 3685ff7258cSRichard Henderson } 3695ff7258cSRichard Henderson 3705ff7258cSRichard Henderson /* 3715ff7258cSRichard Henderson * Request a new region once the one in use has filled up. 3725ff7258cSRichard Henderson * Returns true on error. 3735ff7258cSRichard Henderson */ 3745ff7258cSRichard Henderson bool tcg_region_alloc(TCGContext *s) 3755ff7258cSRichard Henderson { 3765ff7258cSRichard Henderson bool err; 3775ff7258cSRichard Henderson /* read the region size now; alloc__locked will overwrite it on success */ 3785ff7258cSRichard Henderson size_t size_full = s->code_gen_buffer_size; 3795ff7258cSRichard Henderson 3805ff7258cSRichard Henderson qemu_mutex_lock(®ion.lock); 3815ff7258cSRichard Henderson err = tcg_region_alloc__locked(s); 3825ff7258cSRichard Henderson if (!err) { 3835ff7258cSRichard Henderson region.agg_size_full += size_full - TCG_HIGHWATER; 3845ff7258cSRichard Henderson } 3855ff7258cSRichard Henderson qemu_mutex_unlock(®ion.lock); 3865ff7258cSRichard Henderson return err; 3875ff7258cSRichard Henderson } 3885ff7258cSRichard Henderson 3895ff7258cSRichard Henderson /* 3905ff7258cSRichard Henderson * Perform a context's first region allocation. 3915ff7258cSRichard Henderson * This function does _not_ increment region.agg_size_full. 3925ff7258cSRichard Henderson */ 3935ff7258cSRichard Henderson static void tcg_region_initial_alloc__locked(TCGContext *s) 3945ff7258cSRichard Henderson { 3955ff7258cSRichard Henderson bool err = tcg_region_alloc__locked(s); 3965ff7258cSRichard Henderson g_assert(!err); 3975ff7258cSRichard Henderson } 3985ff7258cSRichard Henderson 3995ff7258cSRichard Henderson void tcg_region_initial_alloc(TCGContext *s) 4005ff7258cSRichard Henderson { 4015ff7258cSRichard Henderson qemu_mutex_lock(®ion.lock); 4025ff7258cSRichard Henderson tcg_region_initial_alloc__locked(s); 4035ff7258cSRichard Henderson qemu_mutex_unlock(®ion.lock); 4045ff7258cSRichard Henderson } 4055ff7258cSRichard Henderson 4065ff7258cSRichard Henderson /* Call from a safe-work context */ 4075ff7258cSRichard Henderson void tcg_region_reset_all(void) 4085ff7258cSRichard Henderson { 4090e2d61cfSRichard Henderson unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); 4105ff7258cSRichard Henderson unsigned int i; 4115ff7258cSRichard Henderson 4125ff7258cSRichard Henderson qemu_mutex_lock(®ion.lock); 4135ff7258cSRichard Henderson region.current = 0; 4145ff7258cSRichard Henderson region.agg_size_full = 0; 4155ff7258cSRichard Henderson 4165ff7258cSRichard Henderson for (i = 0; i < n_ctxs; i++) { 4175ff7258cSRichard Henderson TCGContext *s = qatomic_read(&tcg_ctxs[i]); 4185ff7258cSRichard Henderson tcg_region_initial_alloc__locked(s); 4195ff7258cSRichard Henderson } 4205ff7258cSRichard Henderson qemu_mutex_unlock(®ion.lock); 4215ff7258cSRichard Henderson 4225ff7258cSRichard Henderson tcg_region_tree_reset_all(); 4235ff7258cSRichard Henderson } 4245ff7258cSRichard Henderson 42501afda99SRichard Henderson static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus) 4265ff7258cSRichard Henderson { 42743b972b7SRichard Henderson #ifdef CONFIG_USER_ONLY 4285ff7258cSRichard Henderson return 1; 4295ff7258cSRichard Henderson #else 43001afda99SRichard Henderson size_t n_regions; 43101afda99SRichard Henderson 4325ff7258cSRichard Henderson /* 43343b972b7SRichard Henderson * It is likely that some vCPUs will translate more code than others, 43443b972b7SRichard Henderson * so we first try to set more regions than max_cpus, with those regions 43543b972b7SRichard Henderson * being of reasonable size. If that's not possible we make do by evenly 43643b972b7SRichard Henderson * dividing the code_gen_buffer among the vCPUs. 4375ff7258cSRichard Henderson */ 4385ff7258cSRichard Henderson /* Use a single region if all we have is one vCPU thread */ 4395ff7258cSRichard Henderson if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) { 4405ff7258cSRichard Henderson return 1; 4415ff7258cSRichard Henderson } 4425ff7258cSRichard Henderson 44301afda99SRichard Henderson /* 44401afda99SRichard Henderson * Try to have more regions than max_cpus, with each region being >= 2 MB. 44501afda99SRichard Henderson * If we can't, then just allocate one region per vCPU thread. 44601afda99SRichard Henderson */ 44701afda99SRichard Henderson n_regions = tb_size / (2 * MiB); 44801afda99SRichard Henderson if (n_regions <= max_cpus) { 4495ff7258cSRichard Henderson return max_cpus; 45001afda99SRichard Henderson } 45101afda99SRichard Henderson return MIN(n_regions, max_cpus * 8); 4525ff7258cSRichard Henderson #endif 45343b972b7SRichard Henderson } 4545ff7258cSRichard Henderson 4555ff7258cSRichard Henderson /* 456c46184a9SRichard Henderson * Minimum size of the code gen buffer. This number is randomly chosen, 457c46184a9SRichard Henderson * but not so small that we can't have a fair number of TB's live. 45826a75d12SRichard Henderson * 45926a75d12SRichard Henderson * Maximum size, MAX_CODE_GEN_BUFFER_SIZE, is defined in tcg-target.h. 46026a75d12SRichard Henderson * Unless otherwise indicated, this is constrained by the range of 46126a75d12SRichard Henderson * direct branches on the host cpu, as used by the TCG implementation 46226a75d12SRichard Henderson * of goto_tb. 463c46184a9SRichard Henderson */ 464c46184a9SRichard Henderson #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB) 465c46184a9SRichard Henderson 466c46184a9SRichard Henderson #if TCG_TARGET_REG_BITS == 32 467c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB) 468c46184a9SRichard Henderson #ifdef CONFIG_USER_ONLY 469c46184a9SRichard Henderson /* 470c46184a9SRichard Henderson * For user mode on smaller 32 bit systems we may run into trouble 471c46184a9SRichard Henderson * allocating big chunks of data in the right place. On these systems 472c46184a9SRichard Henderson * we utilise a static code generation buffer directly in the binary. 473c46184a9SRichard Henderson */ 474c46184a9SRichard Henderson #define USE_STATIC_CODE_GEN_BUFFER 475c46184a9SRichard Henderson #endif 476c46184a9SRichard Henderson #else /* TCG_TARGET_REG_BITS == 64 */ 477c46184a9SRichard Henderson #ifdef CONFIG_USER_ONLY 478c46184a9SRichard Henderson /* 479c46184a9SRichard Henderson * As user-mode emulation typically means running multiple instances 480c46184a9SRichard Henderson * of the translator don't go too nuts with our default code gen 481c46184a9SRichard Henderson * buffer lest we make things too hard for the OS. 482c46184a9SRichard Henderson */ 483c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB) 484c46184a9SRichard Henderson #else 485c46184a9SRichard Henderson /* 486c46184a9SRichard Henderson * We expect most system emulation to run one or two guests per host. 487c46184a9SRichard Henderson * Users running large scale system emulation may want to tweak their 488c46184a9SRichard Henderson * runtime setup via the tb-size control on the command line. 489c46184a9SRichard Henderson */ 490c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB) 491c46184a9SRichard Henderson #endif 492c46184a9SRichard Henderson #endif 493c46184a9SRichard Henderson 494c46184a9SRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE \ 495c46184a9SRichard Henderson (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ 496c46184a9SRichard Henderson ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) 497c46184a9SRichard Henderson 498c46184a9SRichard Henderson #ifdef USE_STATIC_CODE_GEN_BUFFER 499c46184a9SRichard Henderson static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] 500c46184a9SRichard Henderson __attribute__((aligned(CODE_GEN_ALIGN))); 501c46184a9SRichard Henderson 5027be9ebcfSRichard Henderson static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp) 503c46184a9SRichard Henderson { 504c46184a9SRichard Henderson void *buf, *end; 505c46184a9SRichard Henderson size_t size; 506c46184a9SRichard Henderson 507c46184a9SRichard Henderson if (splitwx > 0) { 508c46184a9SRichard Henderson error_setg(errp, "jit split-wx not supported"); 5097be9ebcfSRichard Henderson return -1; 510c46184a9SRichard Henderson } 511c46184a9SRichard Henderson 512c46184a9SRichard Henderson /* page-align the beginning and end of the buffer */ 513c46184a9SRichard Henderson buf = static_code_gen_buffer; 514c46184a9SRichard Henderson end = static_code_gen_buffer + sizeof(static_code_gen_buffer); 5158e3b0cbbSMarc-André Lureau buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size()); 5168e3b0cbbSMarc-André Lureau end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size()); 517c46184a9SRichard Henderson 518c46184a9SRichard Henderson size = end - buf; 519c46184a9SRichard Henderson 520c46184a9SRichard Henderson /* Honor a command-line option limiting the size of the buffer. */ 521c46184a9SRichard Henderson if (size > tb_size) { 5228e3b0cbbSMarc-André Lureau size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size()); 523c46184a9SRichard Henderson } 524c46184a9SRichard Henderson 525032a4b1bSRichard Henderson region.start_aligned = buf; 526032a4b1bSRichard Henderson region.total_size = size; 5277be9ebcfSRichard Henderson 5287be9ebcfSRichard Henderson return PROT_READ | PROT_WRITE; 529c46184a9SRichard Henderson } 530c46184a9SRichard Henderson #elif defined(_WIN32) 5317be9ebcfSRichard Henderson static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) 532c46184a9SRichard Henderson { 533c46184a9SRichard Henderson void *buf; 534c46184a9SRichard Henderson 535c46184a9SRichard Henderson if (splitwx > 0) { 536c46184a9SRichard Henderson error_setg(errp, "jit split-wx not supported"); 5377be9ebcfSRichard Henderson return -1; 538c46184a9SRichard Henderson } 539c46184a9SRichard Henderson 540c46184a9SRichard Henderson buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, 541c46184a9SRichard Henderson PAGE_EXECUTE_READWRITE); 542c46184a9SRichard Henderson if (buf == NULL) { 543c46184a9SRichard Henderson error_setg_win32(errp, GetLastError(), 544c46184a9SRichard Henderson "allocate %zu bytes for jit buffer", size); 545c46184a9SRichard Henderson return false; 546c46184a9SRichard Henderson } 547c46184a9SRichard Henderson 548032a4b1bSRichard Henderson region.start_aligned = buf; 549032a4b1bSRichard Henderson region.total_size = size; 5507be9ebcfSRichard Henderson 55164979566SRichard Henderson return PROT_READ | PROT_WRITE | PROT_EXEC; 552c46184a9SRichard Henderson } 553c46184a9SRichard Henderson #else 5547be9ebcfSRichard Henderson static int alloc_code_gen_buffer_anon(size_t size, int prot, 555c46184a9SRichard Henderson int flags, Error **errp) 556c46184a9SRichard Henderson { 557c46184a9SRichard Henderson void *buf; 558c46184a9SRichard Henderson 559c46184a9SRichard Henderson buf = mmap(NULL, size, prot, flags, -1, 0); 560c46184a9SRichard Henderson if (buf == MAP_FAILED) { 561c46184a9SRichard Henderson error_setg_errno(errp, errno, 562c46184a9SRichard Henderson "allocate %zu bytes for jit buffer", size); 5637be9ebcfSRichard Henderson return -1; 564c46184a9SRichard Henderson } 565c46184a9SRichard Henderson 566032a4b1bSRichard Henderson region.start_aligned = buf; 567032a4b1bSRichard Henderson region.total_size = size; 5687be9ebcfSRichard Henderson return prot; 569c46184a9SRichard Henderson } 570c46184a9SRichard Henderson 571c46184a9SRichard Henderson #ifndef CONFIG_TCG_INTERPRETER 572c46184a9SRichard Henderson #ifdef CONFIG_POSIX 573c46184a9SRichard Henderson #include "qemu/memfd.h" 574c46184a9SRichard Henderson 575ba892411SShaobo Song static int alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) 576c46184a9SRichard Henderson { 577c46184a9SRichard Henderson void *buf_rw = NULL, *buf_rx = MAP_FAILED; 578c46184a9SRichard Henderson int fd = -1; 579c46184a9SRichard Henderson 580c46184a9SRichard Henderson buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp); 581c46184a9SRichard Henderson if (buf_rw == NULL) { 582c46184a9SRichard Henderson goto fail; 583c46184a9SRichard Henderson } 584c46184a9SRichard Henderson 585a97a8375SRichard Henderson buf_rx = mmap(NULL, size, host_prot_read_exec(), MAP_SHARED, fd, 0); 586c46184a9SRichard Henderson if (buf_rx == MAP_FAILED) { 587c46184a9SRichard Henderson goto fail_rx; 588c46184a9SRichard Henderson } 589c46184a9SRichard Henderson 590c46184a9SRichard Henderson close(fd); 591032a4b1bSRichard Henderson region.start_aligned = buf_rw; 592032a4b1bSRichard Henderson region.total_size = size; 593c46184a9SRichard Henderson tcg_splitwx_diff = buf_rx - buf_rw; 594c46184a9SRichard Henderson 5957be9ebcfSRichard Henderson return PROT_READ | PROT_WRITE; 596c46184a9SRichard Henderson 597c46184a9SRichard Henderson fail_rx: 598c46184a9SRichard Henderson error_setg_errno(errp, errno, "failed to map shared memory for execute"); 599c46184a9SRichard Henderson fail: 600*0e5e6219SSamuel Tardieu /* buf_rx is always equal to MAP_FAILED here and does not require cleanup */ 601c46184a9SRichard Henderson if (buf_rw) { 602c46184a9SRichard Henderson munmap(buf_rw, size); 603c46184a9SRichard Henderson } 604c46184a9SRichard Henderson if (fd >= 0) { 605c46184a9SRichard Henderson close(fd); 606c46184a9SRichard Henderson } 6077be9ebcfSRichard Henderson return -1; 608c46184a9SRichard Henderson } 609c46184a9SRichard Henderson #endif /* CONFIG_POSIX */ 610c46184a9SRichard Henderson 611c46184a9SRichard Henderson #ifdef CONFIG_DARWIN 612c46184a9SRichard Henderson #include <mach/mach.h> 613c46184a9SRichard Henderson 614c46184a9SRichard Henderson extern kern_return_t mach_vm_remap(vm_map_t target_task, 615c46184a9SRichard Henderson mach_vm_address_t *target_address, 616c46184a9SRichard Henderson mach_vm_size_t size, 617c46184a9SRichard Henderson mach_vm_offset_t mask, 618c46184a9SRichard Henderson int flags, 619c46184a9SRichard Henderson vm_map_t src_task, 620c46184a9SRichard Henderson mach_vm_address_t src_address, 621c46184a9SRichard Henderson boolean_t copy, 622c46184a9SRichard Henderson vm_prot_t *cur_protection, 623c46184a9SRichard Henderson vm_prot_t *max_protection, 624c46184a9SRichard Henderson vm_inherit_t inheritance); 625c46184a9SRichard Henderson 6267be9ebcfSRichard Henderson static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp) 627c46184a9SRichard Henderson { 628c46184a9SRichard Henderson kern_return_t ret; 629c46184a9SRichard Henderson mach_vm_address_t buf_rw, buf_rx; 630c46184a9SRichard Henderson vm_prot_t cur_prot, max_prot; 631c46184a9SRichard Henderson 632c46184a9SRichard Henderson /* Map the read-write portion via normal anon memory. */ 633c46184a9SRichard Henderson if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE, 634c46184a9SRichard Henderson MAP_PRIVATE | MAP_ANONYMOUS, errp)) { 6357be9ebcfSRichard Henderson return -1; 636c46184a9SRichard Henderson } 637c46184a9SRichard Henderson 638032a4b1bSRichard Henderson buf_rw = (mach_vm_address_t)region.start_aligned; 639c46184a9SRichard Henderson buf_rx = 0; 640c46184a9SRichard Henderson ret = mach_vm_remap(mach_task_self(), 641c46184a9SRichard Henderson &buf_rx, 642c46184a9SRichard Henderson size, 643c46184a9SRichard Henderson 0, 644c46184a9SRichard Henderson VM_FLAGS_ANYWHERE, 645c46184a9SRichard Henderson mach_task_self(), 646c46184a9SRichard Henderson buf_rw, 647c46184a9SRichard Henderson false, 648c46184a9SRichard Henderson &cur_prot, 649c46184a9SRichard Henderson &max_prot, 650c46184a9SRichard Henderson VM_INHERIT_NONE); 651c46184a9SRichard Henderson if (ret != KERN_SUCCESS) { 652c46184a9SRichard Henderson /* TODO: Convert "ret" to a human readable error message. */ 653c46184a9SRichard Henderson error_setg(errp, "vm_remap for jit splitwx failed"); 654c46184a9SRichard Henderson munmap((void *)buf_rw, size); 6557be9ebcfSRichard Henderson return -1; 656c46184a9SRichard Henderson } 657c46184a9SRichard Henderson 658a97a8375SRichard Henderson if (mprotect((void *)buf_rx, size, host_prot_read_exec()) != 0) { 659c46184a9SRichard Henderson error_setg_errno(errp, errno, "mprotect for jit splitwx"); 660c46184a9SRichard Henderson munmap((void *)buf_rx, size); 661c46184a9SRichard Henderson munmap((void *)buf_rw, size); 6627be9ebcfSRichard Henderson return -1; 663c46184a9SRichard Henderson } 664c46184a9SRichard Henderson 665c46184a9SRichard Henderson tcg_splitwx_diff = buf_rx - buf_rw; 6667be9ebcfSRichard Henderson return PROT_READ | PROT_WRITE; 667c46184a9SRichard Henderson } 668c46184a9SRichard Henderson #endif /* CONFIG_DARWIN */ 669c46184a9SRichard Henderson #endif /* CONFIG_TCG_INTERPRETER */ 670c46184a9SRichard Henderson 6717be9ebcfSRichard Henderson static int alloc_code_gen_buffer_splitwx(size_t size, Error **errp) 672c46184a9SRichard Henderson { 673c46184a9SRichard Henderson #ifndef CONFIG_TCG_INTERPRETER 674c46184a9SRichard Henderson # ifdef CONFIG_DARWIN 675c46184a9SRichard Henderson return alloc_code_gen_buffer_splitwx_vmremap(size, errp); 676c46184a9SRichard Henderson # endif 677c46184a9SRichard Henderson # ifdef CONFIG_POSIX 678c46184a9SRichard Henderson return alloc_code_gen_buffer_splitwx_memfd(size, errp); 679c46184a9SRichard Henderson # endif 680c46184a9SRichard Henderson #endif 681c46184a9SRichard Henderson error_setg(errp, "jit split-wx not supported"); 6827be9ebcfSRichard Henderson return -1; 683c46184a9SRichard Henderson } 684c46184a9SRichard Henderson 6857be9ebcfSRichard Henderson static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) 686c46184a9SRichard Henderson { 687c46184a9SRichard Henderson ERRP_GUARD(); 688c46184a9SRichard Henderson int prot, flags; 689c46184a9SRichard Henderson 690c46184a9SRichard Henderson if (splitwx) { 6917be9ebcfSRichard Henderson prot = alloc_code_gen_buffer_splitwx(size, errp); 6927be9ebcfSRichard Henderson if (prot >= 0) { 6937be9ebcfSRichard Henderson return prot; 694c46184a9SRichard Henderson } 695c46184a9SRichard Henderson /* 696c46184a9SRichard Henderson * If splitwx force-on (1), fail; 697c46184a9SRichard Henderson * if splitwx default-on (-1), fall through to splitwx off. 698c46184a9SRichard Henderson */ 699c46184a9SRichard Henderson if (splitwx > 0) { 7007be9ebcfSRichard Henderson return -1; 701c46184a9SRichard Henderson } 702c46184a9SRichard Henderson error_free_or_abort(errp); 703c46184a9SRichard Henderson } 704c46184a9SRichard Henderson 705b7da02daSRichard Henderson /* 706b7da02daSRichard Henderson * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect 707b7da02daSRichard Henderson * rejects a permission change from RWX -> NONE when reserving the 708b7da02daSRichard Henderson * guard pages later. We can go the other way with the same number 709b7da02daSRichard Henderson * of syscalls, so always begin with PROT_NONE. 710b7da02daSRichard Henderson */ 711b7da02daSRichard Henderson prot = PROT_NONE; 712c46184a9SRichard Henderson flags = MAP_PRIVATE | MAP_ANONYMOUS; 713b7da02daSRichard Henderson #ifdef CONFIG_DARWIN 714c46184a9SRichard Henderson /* Applicable to both iOS and macOS (Apple Silicon). */ 715c46184a9SRichard Henderson if (!splitwx) { 716c46184a9SRichard Henderson flags |= MAP_JIT; 717c46184a9SRichard Henderson } 718c46184a9SRichard Henderson #endif 719c46184a9SRichard Henderson 720c46184a9SRichard Henderson return alloc_code_gen_buffer_anon(size, prot, flags, errp); 721c46184a9SRichard Henderson } 722c46184a9SRichard Henderson #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ 723c46184a9SRichard Henderson 724c46184a9SRichard Henderson /* 7255ff7258cSRichard Henderson * Initializes region partitioning. 7265ff7258cSRichard Henderson * 7275ff7258cSRichard Henderson * Called at init time from the parent thread (i.e. the one calling 7285ff7258cSRichard Henderson * tcg_context_init), after the target's TCG globals have been set. 7295ff7258cSRichard Henderson * 7305ff7258cSRichard Henderson * Region partitioning works by splitting code_gen_buffer into separate regions, 7315ff7258cSRichard Henderson * and then assigning regions to TCG threads so that the threads can translate 7325ff7258cSRichard Henderson * code in parallel without synchronization. 7335ff7258cSRichard Henderson * 7347893e42dSPhilippe Mathieu-Daudé * In system-mode the number of TCG threads is bounded by max_cpus, so we use at 7355ff7258cSRichard Henderson * least max_cpus regions in MTTCG. In !MTTCG we use a single region. 7365ff7258cSRichard Henderson * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...]) 7375ff7258cSRichard Henderson * must have been parsed before calling this function, since it calls 7385ff7258cSRichard Henderson * qemu_tcg_mttcg_enabled(). 7395ff7258cSRichard Henderson * 7405ff7258cSRichard Henderson * In user-mode we use a single region. Having multiple regions in user-mode 7415ff7258cSRichard Henderson * is not supported, because the number of vCPU threads (recall that each thread 7425ff7258cSRichard Henderson * spawned by the guest corresponds to a vCPU thread) is only bounded by the 7435ff7258cSRichard Henderson * OS, and usually this number is huge (tens of thousands is not uncommon). 7445ff7258cSRichard Henderson * Thus, given this large bound on the number of vCPU threads and the fact 7455ff7258cSRichard Henderson * that code_gen_buffer is allocated at compile-time, we cannot guarantee 7465ff7258cSRichard Henderson * that the availability of at least one region per vCPU thread. 7475ff7258cSRichard Henderson * 7485ff7258cSRichard Henderson * However, this user-mode limitation is unlikely to be a significant problem 7495ff7258cSRichard Henderson * in practice. Multi-threaded guests share most if not all of their translated 7507893e42dSPhilippe Mathieu-Daudé * code, which makes parallel code generation less appealing than in system-mode 7515ff7258cSRichard Henderson */ 75243b972b7SRichard Henderson void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) 7535ff7258cSRichard Henderson { 7548e3b0cbbSMarc-André Lureau const size_t page_size = qemu_real_host_page_size(); 7555ff7258cSRichard Henderson size_t region_size; 75622c6a993SRichard Henderson int have_prot, need_prot; 7575ff7258cSRichard Henderson 758ba22783dSRichard Henderson /* Size the buffer. */ 759ba22783dSRichard Henderson if (tb_size == 0) { 760ba22783dSRichard Henderson size_t phys_mem = qemu_get_host_physmem(); 761ba22783dSRichard Henderson if (phys_mem == 0) { 762ba22783dSRichard Henderson tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 763ba22783dSRichard Henderson } else { 764ba22783dSRichard Henderson tb_size = QEMU_ALIGN_DOWN(phys_mem / 8, page_size); 765ba22783dSRichard Henderson tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, tb_size); 766ba22783dSRichard Henderson } 767ba22783dSRichard Henderson } 768ba22783dSRichard Henderson if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { 769ba22783dSRichard Henderson tb_size = MIN_CODE_GEN_BUFFER_SIZE; 770ba22783dSRichard Henderson } 771ba22783dSRichard Henderson if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { 772ba22783dSRichard Henderson tb_size = MAX_CODE_GEN_BUFFER_SIZE; 773ba22783dSRichard Henderson } 774ba22783dSRichard Henderson 775ba22783dSRichard Henderson have_prot = alloc_code_gen_buffer(tb_size, splitwx, &error_fatal); 7767be9ebcfSRichard Henderson assert(have_prot >= 0); 777c46184a9SRichard Henderson 778cd9ea992SRichard Henderson /* Request large pages for the buffer and the splitwx. */ 779cd9ea992SRichard Henderson qemu_madvise(region.start_aligned, region.total_size, QEMU_MADV_HUGEPAGE); 780cd9ea992SRichard Henderson if (tcg_splitwx_diff) { 781cd9ea992SRichard Henderson qemu_madvise(region.start_aligned + tcg_splitwx_diff, 782cd9ea992SRichard Henderson region.total_size, QEMU_MADV_HUGEPAGE); 783cd9ea992SRichard Henderson } 784cd9ea992SRichard Henderson 7855ff7258cSRichard Henderson /* 7865ff7258cSRichard Henderson * Make region_size a multiple of page_size, using aligned as the start. 7875ff7258cSRichard Henderson * As a result of this we might end up with a few extra pages at the end of 7885ff7258cSRichard Henderson * the buffer; we will assign those to the last region. 7895ff7258cSRichard Henderson */ 790ba22783dSRichard Henderson region.n = tcg_n_regions(tb_size, max_cpus); 791ba22783dSRichard Henderson region_size = tb_size / region.n; 7925ff7258cSRichard Henderson region_size = QEMU_ALIGN_DOWN(region_size, page_size); 7935ff7258cSRichard Henderson 7945ff7258cSRichard Henderson /* A region must have at least 2 pages; one code, one guard */ 7955ff7258cSRichard Henderson g_assert(region_size >= 2 * page_size); 796032a4b1bSRichard Henderson region.stride = region_size; 797032a4b1bSRichard Henderson 798032a4b1bSRichard Henderson /* Reserve space for guard pages. */ 799032a4b1bSRichard Henderson region.size = region_size - page_size; 800032a4b1bSRichard Henderson region.total_size -= page_size; 801032a4b1bSRichard Henderson 802032a4b1bSRichard Henderson /* 803032a4b1bSRichard Henderson * The first region will be smaller than the others, via the prologue, 804032a4b1bSRichard Henderson * which has yet to be allocated. For now, the first region begins at 805032a4b1bSRichard Henderson * the page boundary. 806032a4b1bSRichard Henderson */ 807032a4b1bSRichard Henderson region.after_prologue = region.start_aligned; 8085ff7258cSRichard Henderson 8095ff7258cSRichard Henderson /* init the region struct */ 8105ff7258cSRichard Henderson qemu_mutex_init(®ion.lock); 8115ff7258cSRichard Henderson 8125ff7258cSRichard Henderson /* 8135ff7258cSRichard Henderson * Set guard pages in the rw buffer, as that's the one into which 8145ff7258cSRichard Henderson * buffer overruns could occur. Do not set guard pages in the rx 8155ff7258cSRichard Henderson * buffer -- let that one use hugepages throughout. 81622c6a993SRichard Henderson * Work with the page protections set up with the initial mapping. 8175ff7258cSRichard Henderson */ 81864979566SRichard Henderson need_prot = PROT_READ | PROT_WRITE; 81922c6a993SRichard Henderson #ifndef CONFIG_TCG_INTERPRETER 82022c6a993SRichard Henderson if (tcg_splitwx_diff == 0) { 821a97a8375SRichard Henderson need_prot |= host_prot_read_exec(); 82222c6a993SRichard Henderson } 82322c6a993SRichard Henderson #endif 82422c6a993SRichard Henderson for (size_t i = 0, n = region.n; i < n; i++) { 8255ff7258cSRichard Henderson void *start, *end; 8265ff7258cSRichard Henderson 8275ff7258cSRichard Henderson tcg_region_bounds(i, &start, &end); 82822c6a993SRichard Henderson if (have_prot != need_prot) { 82922c6a993SRichard Henderson int rc; 8305ff7258cSRichard Henderson 83164979566SRichard Henderson if (need_prot == (PROT_READ | PROT_WRITE | PROT_EXEC)) { 83222c6a993SRichard Henderson rc = qemu_mprotect_rwx(start, end - start); 83364979566SRichard Henderson } else if (need_prot == (PROT_READ | PROT_WRITE)) { 83422c6a993SRichard Henderson rc = qemu_mprotect_rw(start, end - start); 83522c6a993SRichard Henderson } else { 836a97a8375SRichard Henderson #ifdef CONFIG_POSIX 837a97a8375SRichard Henderson rc = mprotect(start, end - start, need_prot); 838a97a8375SRichard Henderson #else 83922c6a993SRichard Henderson g_assert_not_reached(); 840a97a8375SRichard Henderson #endif 84122c6a993SRichard Henderson } 84222c6a993SRichard Henderson if (rc) { 84322c6a993SRichard Henderson error_setg_errno(&error_fatal, errno, 84422c6a993SRichard Henderson "mprotect of jit buffer"); 84522c6a993SRichard Henderson } 84622c6a993SRichard Henderson } 84722c6a993SRichard Henderson if (have_prot != 0) { 848b7da02daSRichard Henderson /* Guard pages are nice for bug detection but are not essential. */ 8495ff7258cSRichard Henderson (void)qemu_mprotect_none(end, page_size); 8505ff7258cSRichard Henderson } 85122c6a993SRichard Henderson } 8525ff7258cSRichard Henderson 8535ff7258cSRichard Henderson tcg_region_trees_init(); 8545ff7258cSRichard Henderson 8555ff7258cSRichard Henderson /* 8565ff7258cSRichard Henderson * Leave the initial context initialized to the first region. 8575ff7258cSRichard Henderson * This will be the context into which we generate the prologue. 8585ff7258cSRichard Henderson * It is also the only context for CONFIG_USER_ONLY. 8595ff7258cSRichard Henderson */ 8605ff7258cSRichard Henderson tcg_region_initial_alloc__locked(&tcg_init_ctx); 8615ff7258cSRichard Henderson } 8625ff7258cSRichard Henderson 8635ff7258cSRichard Henderson void tcg_region_prologue_set(TCGContext *s) 8645ff7258cSRichard Henderson { 8655ff7258cSRichard Henderson /* Deduct the prologue from the first region. */ 866c2471ca0SRichard Henderson g_assert(region.start_aligned == s->code_gen_buffer); 867c2471ca0SRichard Henderson region.after_prologue = s->code_ptr; 8685ff7258cSRichard Henderson 8695ff7258cSRichard Henderson /* Recompute boundaries of the first region. */ 8705ff7258cSRichard Henderson tcg_region_assign(s, 0); 8715ff7258cSRichard Henderson 8725ff7258cSRichard Henderson /* Register the balance of the buffer with gdb. */ 873c2471ca0SRichard Henderson tcg_register_jit(tcg_splitwx_to_rx(region.after_prologue), 874c2471ca0SRichard Henderson region.start_aligned + region.total_size - 875c2471ca0SRichard Henderson region.after_prologue); 8765ff7258cSRichard Henderson } 8775ff7258cSRichard Henderson 8785ff7258cSRichard Henderson /* 8795ff7258cSRichard Henderson * Returns the size (in bytes) of all translated code (i.e. from all regions) 8805ff7258cSRichard Henderson * currently in the cache. 8815ff7258cSRichard Henderson * See also: tcg_code_capacity() 8825ff7258cSRichard Henderson * Do not confuse with tcg_current_code_size(); that one applies to a single 8835ff7258cSRichard Henderson * TCG context. 8845ff7258cSRichard Henderson */ 8855ff7258cSRichard Henderson size_t tcg_code_size(void) 8865ff7258cSRichard Henderson { 8870e2d61cfSRichard Henderson unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); 8885ff7258cSRichard Henderson unsigned int i; 8895ff7258cSRichard Henderson size_t total; 8905ff7258cSRichard Henderson 8915ff7258cSRichard Henderson qemu_mutex_lock(®ion.lock); 8925ff7258cSRichard Henderson total = region.agg_size_full; 8935ff7258cSRichard Henderson for (i = 0; i < n_ctxs; i++) { 8945ff7258cSRichard Henderson const TCGContext *s = qatomic_read(&tcg_ctxs[i]); 8955ff7258cSRichard Henderson size_t size; 8965ff7258cSRichard Henderson 8975ff7258cSRichard Henderson size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer; 8985ff7258cSRichard Henderson g_assert(size <= s->code_gen_buffer_size); 8995ff7258cSRichard Henderson total += size; 9005ff7258cSRichard Henderson } 9015ff7258cSRichard Henderson qemu_mutex_unlock(®ion.lock); 9025ff7258cSRichard Henderson return total; 9035ff7258cSRichard Henderson } 9045ff7258cSRichard Henderson 9055ff7258cSRichard Henderson /* 9065ff7258cSRichard Henderson * Returns the code capacity (in bytes) of the entire cache, i.e. including all 9075ff7258cSRichard Henderson * regions. 9085ff7258cSRichard Henderson * See also: tcg_code_size() 9095ff7258cSRichard Henderson */ 9105ff7258cSRichard Henderson size_t tcg_code_capacity(void) 9115ff7258cSRichard Henderson { 9125ff7258cSRichard Henderson size_t guard_size, capacity; 9135ff7258cSRichard Henderson 9145ff7258cSRichard Henderson /* no need for synchronization; these variables are set at init time */ 9155ff7258cSRichard Henderson guard_size = region.stride - region.size; 91677bd7fd1SRichard Henderson capacity = region.total_size; 91777bd7fd1SRichard Henderson capacity -= (region.n - 1) * guard_size; 91877bd7fd1SRichard Henderson capacity -= region.n * TCG_HIGHWATER; 91977bd7fd1SRichard Henderson 9205ff7258cSRichard Henderson return capacity; 9215ff7258cSRichard Henderson } 922