154936004Sbellard /* 2fd6ce8f6Sbellard * virtual page mapping and translated block handling 354936004Sbellard * 454936004Sbellard * Copyright (c) 2003 Fabrice Bellard 554936004Sbellard * 654936004Sbellard * This library is free software; you can redistribute it and/or 754936004Sbellard * modify it under the terms of the GNU Lesser General Public 854936004Sbellard * License as published by the Free Software Foundation; either 954936004Sbellard * version 2 of the License, or (at your option) any later version. 1054936004Sbellard * 1154936004Sbellard * This library is distributed in the hope that it will be useful, 1254936004Sbellard * but WITHOUT ANY WARRANTY; without even the implied warranty of 1354936004Sbellard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1454936004Sbellard * Lesser General Public License for more details. 1554936004Sbellard * 1654936004Sbellard * You should have received a copy of the GNU Lesser General Public 178167ee88SBlue Swirl * License along with this library; if not, see <http://www.gnu.org/licenses/>. 1854936004Sbellard */ 1967b915a5Sbellard #include "config.h" 20d5a8f07cSbellard #ifdef _WIN32 21d5a8f07cSbellard #include <windows.h> 22d5a8f07cSbellard #else 23a98d49b1Sbellard #include <sys/types.h> 24d5a8f07cSbellard #include <sys/mman.h> 25d5a8f07cSbellard #endif 2654936004Sbellard 27055403b2SStefan Weil #include "qemu-common.h" 286180a181Sbellard #include "cpu.h" 29b67d9a52Sbellard #include "tcg.h" 30b3c7724cSpbrook #include "hw/hw.h" 31cc9e98cbSAlex Williamson #include "hw/qdev.h" 3274576198Saliguori #include "osdep.h" 337ba1e619Saliguori #include "kvm.h" 34432d268cSJun Nakajima #include "hw/xen.h" 3529e922b6SBlue Swirl #include "qemu-timer.h" 3662152b8aSAvi Kivity #include "memory.h" 3762152b8aSAvi Kivity #include "exec-memory.h" 3853a5960aSpbrook #if defined(CONFIG_USER_ONLY) 3953a5960aSpbrook #include <qemu.h> 40f01576f1SJuergen Lock #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 41f01576f1SJuergen Lock #include <sys/param.h> 42f01576f1SJuergen Lock #if __FreeBSD_version >= 700104 43f01576f1SJuergen Lock #define HAVE_KINFO_GETVMMAP 44f01576f1SJuergen Lock #define sigqueue sigqueue_freebsd /* avoid redefinition */ 45f01576f1SJuergen Lock #include <sys/time.h> 46f01576f1SJuergen Lock #include <sys/proc.h> 47f01576f1SJuergen Lock #include <machine/profile.h> 48f01576f1SJuergen Lock #define _KERNEL 49f01576f1SJuergen Lock #include <sys/user.h> 50f01576f1SJuergen Lock #undef _KERNEL 51f01576f1SJuergen Lock #undef sigqueue 52f01576f1SJuergen Lock #include <libutil.h> 53f01576f1SJuergen Lock #endif 54f01576f1SJuergen Lock #endif 55432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */ 56432d268cSJun Nakajima #include "xen-mapcache.h" 576506e4f9SStefano Stabellini #include "trace.h" 5853a5960aSpbrook #endif 5954936004Sbellard 60fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE 6166e85a21Sbellard //#define DEBUG_FLUSH 629fa3e853Sbellard //#define DEBUG_TLB 6367d3b957Spbrook //#define DEBUG_UNASSIGNED 64fd6ce8f6Sbellard 65fd6ce8f6Sbellard /* make various TB consistency checks */ 66fd6ce8f6Sbellard //#define DEBUG_TB_CHECK 6798857888Sbellard //#define DEBUG_TLB_CHECK 68fd6ce8f6Sbellard 691196be37Sths //#define DEBUG_IOPORT 70db7b5426Sblueswir1 //#define DEBUG_SUBPAGE 711196be37Sths 7299773bd4Spbrook #if !defined(CONFIG_USER_ONLY) 7399773bd4Spbrook /* TB consistency checks only implemented for usermode emulation. */ 7499773bd4Spbrook #undef DEBUG_TB_CHECK 7599773bd4Spbrook #endif 7699773bd4Spbrook 779fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10 789fa3e853Sbellard 79bdaf78e0Sblueswir1 static TranslationBlock *tbs; 8024ab68acSStefan Weil static int code_gen_max_blocks; 819fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; 82bdaf78e0Sblueswir1 static int nb_tbs; 83eb51d102Sbellard /* any access to the tbs or the page table must use this lock */ 84c227f099SAnthony Liguori spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; 85fd6ce8f6Sbellard 86141ac468Sblueswir1 #if defined(__arm__) || defined(__sparc_v9__) 87141ac468Sblueswir1 /* The prologue must be reachable with a direct jump. ARM and Sparc64 88141ac468Sblueswir1 have limited branch ranges (possibly also PPC) so place it in a 89d03d860bSblueswir1 section close to code segment. */ 90d03d860bSblueswir1 #define code_gen_section \ 91d03d860bSblueswir1 __attribute__((__section__(".gen_code"))) \ 92d03d860bSblueswir1 __attribute__((aligned (32))) 93f8e2af11SStefan Weil #elif defined(_WIN32) 94f8e2af11SStefan Weil /* Maximum alignment for Win32 is 16. */ 95f8e2af11SStefan Weil #define code_gen_section \ 96f8e2af11SStefan Weil __attribute__((aligned (16))) 97d03d860bSblueswir1 #else 98d03d860bSblueswir1 #define code_gen_section \ 99d03d860bSblueswir1 __attribute__((aligned (32))) 100d03d860bSblueswir1 #endif 101d03d860bSblueswir1 102d03d860bSblueswir1 uint8_t code_gen_prologue[1024] code_gen_section; 103bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer; 104bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_size; 10526a5f13bSbellard /* threshold to flush the translated code buffer */ 106bdaf78e0Sblueswir1 static unsigned long code_gen_buffer_max_size; 10724ab68acSStefan Weil static uint8_t *code_gen_ptr; 108fd6ce8f6Sbellard 109e2eef170Spbrook #if !defined(CONFIG_USER_ONLY) 1109fa3e853Sbellard int phys_ram_fd; 11174576198Saliguori static int in_migration; 11294a6b54fSpbrook 11385d59fefSPaolo Bonzini RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; 11462152b8aSAvi Kivity 11562152b8aSAvi Kivity static MemoryRegion *system_memory; 116309cb471SAvi Kivity static MemoryRegion *system_io; 11762152b8aSAvi Kivity 118e2eef170Spbrook #endif 1199fa3e853Sbellard 1206a00d601Sbellard CPUState *first_cpu; 1216a00d601Sbellard /* current CPU in the current thread. It is only valid inside 1226a00d601Sbellard cpu_exec() */ 123b3c4bbe5SPaolo Bonzini DEFINE_TLS(CPUState *,cpu_single_env); 1242e70f6efSpbrook /* 0 = Do not count executed instructions. 125bf20dc07Sths 1 = Precise instruction counting. 1262e70f6efSpbrook 2 = Adaptive rate instruction counting. */ 1272e70f6efSpbrook int use_icount = 0; 1286a00d601Sbellard 12954936004Sbellard typedef struct PageDesc { 13092e873b9Sbellard /* list of TBs intersecting this ram page */ 131fd6ce8f6Sbellard TranslationBlock *first_tb; 1329fa3e853Sbellard /* in order to optimize self modifying code, we count the number 1339fa3e853Sbellard of lookups we do to a given page to use a bitmap */ 1349fa3e853Sbellard unsigned int code_write_count; 1359fa3e853Sbellard uint8_t *code_bitmap; 1369fa3e853Sbellard #if defined(CONFIG_USER_ONLY) 1379fa3e853Sbellard unsigned long flags; 1389fa3e853Sbellard #endif 13954936004Sbellard } PageDesc; 14054936004Sbellard 14141c1b1c9SPaul Brook /* In system mode we want L1_MAP to be based on ram offsets, 1425cd2c5b6SRichard Henderson while in user mode we want it to be based on virtual addresses. */ 1435cd2c5b6SRichard Henderson #if !defined(CONFIG_USER_ONLY) 14441c1b1c9SPaul Brook #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS 14541c1b1c9SPaul Brook # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS 14641c1b1c9SPaul Brook #else 1475cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS 14841c1b1c9SPaul Brook #endif 149bedb69eaSj_mayer #else 1505cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS 151bedb69eaSj_mayer #endif 15254936004Sbellard 1535cd2c5b6SRichard Henderson /* Size of the L2 (and L3, etc) page tables. */ 1545cd2c5b6SRichard Henderson #define L2_BITS 10 15554936004Sbellard #define L2_SIZE (1 << L2_BITS) 15654936004Sbellard 1575cd2c5b6SRichard Henderson /* The bits remaining after N lower levels of page tables. */ 1585cd2c5b6SRichard Henderson #define P_L1_BITS_REM \ 1595cd2c5b6SRichard Henderson ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS) 1605cd2c5b6SRichard Henderson #define V_L1_BITS_REM \ 1615cd2c5b6SRichard Henderson ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS) 1625cd2c5b6SRichard Henderson 1635cd2c5b6SRichard Henderson /* Size of the L1 page table. Avoid silly small sizes. */ 1645cd2c5b6SRichard Henderson #if P_L1_BITS_REM < 4 1655cd2c5b6SRichard Henderson #define P_L1_BITS (P_L1_BITS_REM + L2_BITS) 1665cd2c5b6SRichard Henderson #else 1675cd2c5b6SRichard Henderson #define P_L1_BITS P_L1_BITS_REM 1685cd2c5b6SRichard Henderson #endif 1695cd2c5b6SRichard Henderson 1705cd2c5b6SRichard Henderson #if V_L1_BITS_REM < 4 1715cd2c5b6SRichard Henderson #define V_L1_BITS (V_L1_BITS_REM + L2_BITS) 1725cd2c5b6SRichard Henderson #else 1735cd2c5b6SRichard Henderson #define V_L1_BITS V_L1_BITS_REM 1745cd2c5b6SRichard Henderson #endif 1755cd2c5b6SRichard Henderson 1765cd2c5b6SRichard Henderson #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS) 1775cd2c5b6SRichard Henderson #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS) 1785cd2c5b6SRichard Henderson 1795cd2c5b6SRichard Henderson #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS) 1805cd2c5b6SRichard Henderson #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) 1815cd2c5b6SRichard Henderson 18283fb7adfSbellard unsigned long qemu_real_host_page_size; 18383fb7adfSbellard unsigned long qemu_host_page_size; 18483fb7adfSbellard unsigned long qemu_host_page_mask; 18554936004Sbellard 1865cd2c5b6SRichard Henderson /* This is a multi-level map on the virtual address space. 1875cd2c5b6SRichard Henderson The bottom level has pointers to PageDesc. */ 1885cd2c5b6SRichard Henderson static void *l1_map[V_L1_SIZE]; 18954936004Sbellard 190e2eef170Spbrook #if !defined(CONFIG_USER_ONLY) 19141c1b1c9SPaul Brook typedef struct PhysPageDesc { 19241c1b1c9SPaul Brook /* offset in host memory of the page + io_index in the low bits */ 19341c1b1c9SPaul Brook ram_addr_t phys_offset; 19441c1b1c9SPaul Brook ram_addr_t region_offset; 19541c1b1c9SPaul Brook } PhysPageDesc; 19641c1b1c9SPaul Brook 1975cd2c5b6SRichard Henderson /* This is a multi-level map on the physical address space. 1985cd2c5b6SRichard Henderson The bottom level has pointers to PhysPageDesc. */ 1995cd2c5b6SRichard Henderson static void *l1_phys_map[P_L1_SIZE]; 2006d9a1304SPaul Brook 201e2eef170Spbrook static void io_mem_init(void); 20262152b8aSAvi Kivity static void memory_map_init(void); 203e2eef170Spbrook 20433417e70Sbellard /* io memory support */ 20533417e70Sbellard CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; 20633417e70Sbellard CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; 207a4193c8aSbellard void *io_mem_opaque[IO_MEM_NB_ENTRIES]; 208511d2b14Sblueswir1 static char io_mem_used[IO_MEM_NB_ENTRIES]; 2096658ffb8Spbrook static int io_mem_watch; 2106658ffb8Spbrook #endif 21133417e70Sbellard 21234865134Sbellard /* log support */ 2131e8b27caSJuha Riihimäki #ifdef WIN32 2141e8b27caSJuha Riihimäki static const char *logfilename = "qemu.log"; 2151e8b27caSJuha Riihimäki #else 216d9b630fdSblueswir1 static const char *logfilename = "/tmp/qemu.log"; 2171e8b27caSJuha Riihimäki #endif 21834865134Sbellard FILE *logfile; 21934865134Sbellard int loglevel; 220e735b91cSpbrook static int log_append = 0; 22134865134Sbellard 222e3db7226Sbellard /* statistics */ 223b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY) 224e3db7226Sbellard static int tlb_flush_count; 225b3755a91SPaul Brook #endif 226e3db7226Sbellard static int tb_flush_count; 227e3db7226Sbellard static int tb_phys_invalidate_count; 228e3db7226Sbellard 2297cb69caeSbellard #ifdef _WIN32 2307cb69caeSbellard static void map_exec(void *addr, long size) 2317cb69caeSbellard { 2327cb69caeSbellard DWORD old_protect; 2337cb69caeSbellard VirtualProtect(addr, size, 2347cb69caeSbellard PAGE_EXECUTE_READWRITE, &old_protect); 2357cb69caeSbellard 2367cb69caeSbellard } 2377cb69caeSbellard #else 2387cb69caeSbellard static void map_exec(void *addr, long size) 2397cb69caeSbellard { 2404369415fSbellard unsigned long start, end, page_size; 2417cb69caeSbellard 2424369415fSbellard page_size = getpagesize(); 2437cb69caeSbellard start = (unsigned long)addr; 2444369415fSbellard start &= ~(page_size - 1); 2457cb69caeSbellard 2467cb69caeSbellard end = (unsigned long)addr + size; 2474369415fSbellard end += page_size - 1; 2484369415fSbellard end &= ~(page_size - 1); 2497cb69caeSbellard 2507cb69caeSbellard mprotect((void *)start, end - start, 2517cb69caeSbellard PROT_READ | PROT_WRITE | PROT_EXEC); 2527cb69caeSbellard } 2537cb69caeSbellard #endif 2547cb69caeSbellard 255b346ff46Sbellard static void page_init(void) 25654936004Sbellard { 25783fb7adfSbellard /* NOTE: we can always suppose that qemu_host_page_size >= 25854936004Sbellard TARGET_PAGE_SIZE */ 259c2b48b69Saliguori #ifdef _WIN32 260c2b48b69Saliguori { 261c2b48b69Saliguori SYSTEM_INFO system_info; 262c2b48b69Saliguori 263c2b48b69Saliguori GetSystemInfo(&system_info); 264c2b48b69Saliguori qemu_real_host_page_size = system_info.dwPageSize; 265c2b48b69Saliguori } 266c2b48b69Saliguori #else 267c2b48b69Saliguori qemu_real_host_page_size = getpagesize(); 268c2b48b69Saliguori #endif 26983fb7adfSbellard if (qemu_host_page_size == 0) 27083fb7adfSbellard qemu_host_page_size = qemu_real_host_page_size; 27183fb7adfSbellard if (qemu_host_page_size < TARGET_PAGE_SIZE) 27283fb7adfSbellard qemu_host_page_size = TARGET_PAGE_SIZE; 27383fb7adfSbellard qemu_host_page_mask = ~(qemu_host_page_size - 1); 27450a9569bSbalrog 2752e9a5713SPaul Brook #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) 27650a9569bSbalrog { 277f01576f1SJuergen Lock #ifdef HAVE_KINFO_GETVMMAP 278f01576f1SJuergen Lock struct kinfo_vmentry *freep; 279f01576f1SJuergen Lock int i, cnt; 280f01576f1SJuergen Lock 281f01576f1SJuergen Lock freep = kinfo_getvmmap(getpid(), &cnt); 282f01576f1SJuergen Lock if (freep) { 283f01576f1SJuergen Lock mmap_lock(); 284f01576f1SJuergen Lock for (i = 0; i < cnt; i++) { 285f01576f1SJuergen Lock unsigned long startaddr, endaddr; 286f01576f1SJuergen Lock 287f01576f1SJuergen Lock startaddr = freep[i].kve_start; 288f01576f1SJuergen Lock endaddr = freep[i].kve_end; 289f01576f1SJuergen Lock if (h2g_valid(startaddr)) { 290f01576f1SJuergen Lock startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 291f01576f1SJuergen Lock 292f01576f1SJuergen Lock if (h2g_valid(endaddr)) { 293f01576f1SJuergen Lock endaddr = h2g(endaddr); 294fd436907SAurelien Jarno page_set_flags(startaddr, endaddr, PAGE_RESERVED); 295f01576f1SJuergen Lock } else { 296f01576f1SJuergen Lock #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS 297f01576f1SJuergen Lock endaddr = ~0ul; 298fd436907SAurelien Jarno page_set_flags(startaddr, endaddr, PAGE_RESERVED); 299f01576f1SJuergen Lock #endif 300f01576f1SJuergen Lock } 301f01576f1SJuergen Lock } 302f01576f1SJuergen Lock } 303f01576f1SJuergen Lock free(freep); 304f01576f1SJuergen Lock mmap_unlock(); 305f01576f1SJuergen Lock } 306f01576f1SJuergen Lock #else 30750a9569bSbalrog FILE *f; 30850a9569bSbalrog 3090776590dSpbrook last_brk = (unsigned long)sbrk(0); 3105cd2c5b6SRichard Henderson 311fd436907SAurelien Jarno f = fopen("/compat/linux/proc/self/maps", "r"); 31250a9569bSbalrog if (f) { 3135cd2c5b6SRichard Henderson mmap_lock(); 3145cd2c5b6SRichard Henderson 31550a9569bSbalrog do { 3165cd2c5b6SRichard Henderson unsigned long startaddr, endaddr; 3175cd2c5b6SRichard Henderson int n; 3185cd2c5b6SRichard Henderson 3195cd2c5b6SRichard Henderson n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); 3205cd2c5b6SRichard Henderson 3215cd2c5b6SRichard Henderson if (n == 2 && h2g_valid(startaddr)) { 3225cd2c5b6SRichard Henderson startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 3235cd2c5b6SRichard Henderson 3245cd2c5b6SRichard Henderson if (h2g_valid(endaddr)) { 3255cd2c5b6SRichard Henderson endaddr = h2g(endaddr); 3265cd2c5b6SRichard Henderson } else { 3275cd2c5b6SRichard Henderson endaddr = ~0ul; 3285cd2c5b6SRichard Henderson } 3295cd2c5b6SRichard Henderson page_set_flags(startaddr, endaddr, PAGE_RESERVED); 33050a9569bSbalrog } 33150a9569bSbalrog } while (!feof(f)); 3325cd2c5b6SRichard Henderson 33350a9569bSbalrog fclose(f); 334c8a706feSpbrook mmap_unlock(); 33550a9569bSbalrog } 336f01576f1SJuergen Lock #endif 3375cd2c5b6SRichard Henderson } 33850a9569bSbalrog #endif 33954936004Sbellard } 34054936004Sbellard 34141c1b1c9SPaul Brook static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) 34254936004Sbellard { 34341c1b1c9SPaul Brook PageDesc *pd; 34441c1b1c9SPaul Brook void **lp; 34541c1b1c9SPaul Brook int i; 34641c1b1c9SPaul Brook 34717e2377aSpbrook #if defined(CONFIG_USER_ONLY) 3487267c094SAnthony Liguori /* We can't use g_malloc because it may recurse into a locked mutex. */ 3495cd2c5b6SRichard Henderson # define ALLOC(P, SIZE) \ 3505cd2c5b6SRichard Henderson do { \ 3515cd2c5b6SRichard Henderson P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \ 3525cd2c5b6SRichard Henderson MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \ 3535cd2c5b6SRichard Henderson } while (0) 3545cd2c5b6SRichard Henderson #else 3555cd2c5b6SRichard Henderson # define ALLOC(P, SIZE) \ 3567267c094SAnthony Liguori do { P = g_malloc0(SIZE); } while (0) 3575cd2c5b6SRichard Henderson #endif 3585cd2c5b6SRichard Henderson 3595cd2c5b6SRichard Henderson /* Level 1. Always allocated. */ 3605cd2c5b6SRichard Henderson lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1)); 3615cd2c5b6SRichard Henderson 3625cd2c5b6SRichard Henderson /* Level 2..N-1. */ 3635cd2c5b6SRichard Henderson for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) { 3645cd2c5b6SRichard Henderson void **p = *lp; 3655cd2c5b6SRichard Henderson 3665cd2c5b6SRichard Henderson if (p == NULL) { 3675cd2c5b6SRichard Henderson if (!alloc) { 3685cd2c5b6SRichard Henderson return NULL; 3695cd2c5b6SRichard Henderson } 3705cd2c5b6SRichard Henderson ALLOC(p, sizeof(void *) * L2_SIZE); 37154936004Sbellard *lp = p; 3725cd2c5b6SRichard Henderson } 3735cd2c5b6SRichard Henderson 3745cd2c5b6SRichard Henderson lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1)); 3755cd2c5b6SRichard Henderson } 3765cd2c5b6SRichard Henderson 3775cd2c5b6SRichard Henderson pd = *lp; 3785cd2c5b6SRichard Henderson if (pd == NULL) { 3795cd2c5b6SRichard Henderson if (!alloc) { 3805cd2c5b6SRichard Henderson return NULL; 3815cd2c5b6SRichard Henderson } 3825cd2c5b6SRichard Henderson ALLOC(pd, sizeof(PageDesc) * L2_SIZE); 3835cd2c5b6SRichard Henderson *lp = pd; 3845cd2c5b6SRichard Henderson } 3855cd2c5b6SRichard Henderson 3865cd2c5b6SRichard Henderson #undef ALLOC 3875cd2c5b6SRichard Henderson 3885cd2c5b6SRichard Henderson return pd + (index & (L2_SIZE - 1)); 38954936004Sbellard } 39054936004Sbellard 39141c1b1c9SPaul Brook static inline PageDesc *page_find(tb_page_addr_t index) 39254936004Sbellard { 3935cd2c5b6SRichard Henderson return page_find_alloc(index, 0); 39454936004Sbellard } 39554936004Sbellard 3966d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY) 397c227f099SAnthony Liguori static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) 39892e873b9Sbellard { 399e3f4e2a4Spbrook PhysPageDesc *pd; 4005cd2c5b6SRichard Henderson void **lp; 401e3f4e2a4Spbrook int i; 4025cd2c5b6SRichard Henderson 4035cd2c5b6SRichard Henderson /* Level 1. Always allocated. */ 4045cd2c5b6SRichard Henderson lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1)); 4055cd2c5b6SRichard Henderson 4065cd2c5b6SRichard Henderson /* Level 2..N-1. */ 4075cd2c5b6SRichard Henderson for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) { 4085cd2c5b6SRichard Henderson void **p = *lp; 4095cd2c5b6SRichard Henderson if (p == NULL) { 4105cd2c5b6SRichard Henderson if (!alloc) { 411108c49b8Sbellard return NULL; 4125cd2c5b6SRichard Henderson } 4137267c094SAnthony Liguori *lp = p = g_malloc0(sizeof(void *) * L2_SIZE); 4145cd2c5b6SRichard Henderson } 4155cd2c5b6SRichard Henderson lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1)); 4165cd2c5b6SRichard Henderson } 4175cd2c5b6SRichard Henderson 4185cd2c5b6SRichard Henderson pd = *lp; 4195cd2c5b6SRichard Henderson if (pd == NULL) { 4205cd2c5b6SRichard Henderson int i; 4215cd2c5b6SRichard Henderson 4225cd2c5b6SRichard Henderson if (!alloc) { 4235cd2c5b6SRichard Henderson return NULL; 4245cd2c5b6SRichard Henderson } 4255cd2c5b6SRichard Henderson 4267267c094SAnthony Liguori *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE); 4275cd2c5b6SRichard Henderson 42867c4d23cSpbrook for (i = 0; i < L2_SIZE; i++) { 429e3f4e2a4Spbrook pd[i].phys_offset = IO_MEM_UNASSIGNED; 43067c4d23cSpbrook pd[i].region_offset = (index + i) << TARGET_PAGE_BITS; 43167c4d23cSpbrook } 43292e873b9Sbellard } 4335cd2c5b6SRichard Henderson 4345cd2c5b6SRichard Henderson return pd + (index & (L2_SIZE - 1)); 43592e873b9Sbellard } 43692e873b9Sbellard 437c227f099SAnthony Liguori static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) 43892e873b9Sbellard { 439108c49b8Sbellard return phys_page_find_alloc(index, 0); 44092e873b9Sbellard } 44192e873b9Sbellard 442c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr); 443c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 4443a7d929eSbellard target_ulong vaddr); 445c8a706feSpbrook #define mmap_lock() do { } while(0) 446c8a706feSpbrook #define mmap_unlock() do { } while(0) 4479fa3e853Sbellard #endif 448fd6ce8f6Sbellard 4494369415fSbellard #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024) 4504369415fSbellard 4514369415fSbellard #if defined(CONFIG_USER_ONLY) 452ccbb4d44SStuart Brady /* Currently it is not recommended to allocate big chunks of data in 4534369415fSbellard user mode. It will change when a dedicated libc will be used */ 4544369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER 4554369415fSbellard #endif 4564369415fSbellard 4574369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER 458ebf50fb3SAurelien Jarno static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] 459ebf50fb3SAurelien Jarno __attribute__((aligned (CODE_GEN_ALIGN))); 4604369415fSbellard #endif 4614369415fSbellard 4628fcd3692Sblueswir1 static void code_gen_alloc(unsigned long tb_size) 46326a5f13bSbellard { 4644369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER 4654369415fSbellard code_gen_buffer = static_code_gen_buffer; 4664369415fSbellard code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 4674369415fSbellard map_exec(code_gen_buffer, code_gen_buffer_size); 4684369415fSbellard #else 46926a5f13bSbellard code_gen_buffer_size = tb_size; 47026a5f13bSbellard if (code_gen_buffer_size == 0) { 4714369415fSbellard #if defined(CONFIG_USER_ONLY) 4724369415fSbellard code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 4734369415fSbellard #else 474ccbb4d44SStuart Brady /* XXX: needs adjustments */ 47594a6b54fSpbrook code_gen_buffer_size = (unsigned long)(ram_size / 4); 4764369415fSbellard #endif 47726a5f13bSbellard } 47826a5f13bSbellard if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE) 47926a5f13bSbellard code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE; 48026a5f13bSbellard /* The code gen buffer location may have constraints depending on 48126a5f13bSbellard the host cpu and OS */ 48226a5f13bSbellard #if defined(__linux__) 48326a5f13bSbellard { 48426a5f13bSbellard int flags; 485141ac468Sblueswir1 void *start = NULL; 486141ac468Sblueswir1 48726a5f13bSbellard flags = MAP_PRIVATE | MAP_ANONYMOUS; 48826a5f13bSbellard #if defined(__x86_64__) 48926a5f13bSbellard flags |= MAP_32BIT; 49026a5f13bSbellard /* Cannot map more than that */ 49126a5f13bSbellard if (code_gen_buffer_size > (800 * 1024 * 1024)) 49226a5f13bSbellard code_gen_buffer_size = (800 * 1024 * 1024); 493141ac468Sblueswir1 #elif defined(__sparc_v9__) 494141ac468Sblueswir1 // Map the buffer below 2G, so we can use direct calls and branches 495141ac468Sblueswir1 flags |= MAP_FIXED; 496141ac468Sblueswir1 start = (void *) 0x60000000UL; 497141ac468Sblueswir1 if (code_gen_buffer_size > (512 * 1024 * 1024)) 498141ac468Sblueswir1 code_gen_buffer_size = (512 * 1024 * 1024); 4991cb0661eSbalrog #elif defined(__arm__) 50063d41246Sbalrog /* Map the buffer below 32M, so we can use direct calls and branches */ 5011cb0661eSbalrog flags |= MAP_FIXED; 5021cb0661eSbalrog start = (void *) 0x01000000UL; 5031cb0661eSbalrog if (code_gen_buffer_size > 16 * 1024 * 1024) 5041cb0661eSbalrog code_gen_buffer_size = 16 * 1024 * 1024; 505eba0b893SRichard Henderson #elif defined(__s390x__) 506eba0b893SRichard Henderson /* Map the buffer so that we can use direct calls and branches. */ 507eba0b893SRichard Henderson /* We have a +- 4GB range on the branches; leave some slop. */ 508eba0b893SRichard Henderson if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) { 509eba0b893SRichard Henderson code_gen_buffer_size = 3ul * 1024 * 1024 * 1024; 510eba0b893SRichard Henderson } 511eba0b893SRichard Henderson start = (void *)0x90000000UL; 51226a5f13bSbellard #endif 513141ac468Sblueswir1 code_gen_buffer = mmap(start, code_gen_buffer_size, 51426a5f13bSbellard PROT_WRITE | PROT_READ | PROT_EXEC, 51526a5f13bSbellard flags, -1, 0); 51626a5f13bSbellard if (code_gen_buffer == MAP_FAILED) { 51726a5f13bSbellard fprintf(stderr, "Could not allocate dynamic translator buffer\n"); 51826a5f13bSbellard exit(1); 51926a5f13bSbellard } 52026a5f13bSbellard } 521cbb608a5SBrad #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \ 5229f4b09a4STobias Nygren || defined(__DragonFly__) || defined(__OpenBSD__) \ 5239f4b09a4STobias Nygren || defined(__NetBSD__) 52406e67a82Saliguori { 52506e67a82Saliguori int flags; 52606e67a82Saliguori void *addr = NULL; 52706e67a82Saliguori flags = MAP_PRIVATE | MAP_ANONYMOUS; 52806e67a82Saliguori #if defined(__x86_64__) 52906e67a82Saliguori /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume 53006e67a82Saliguori * 0x40000000 is free */ 53106e67a82Saliguori flags |= MAP_FIXED; 53206e67a82Saliguori addr = (void *)0x40000000; 53306e67a82Saliguori /* Cannot map more than that */ 53406e67a82Saliguori if (code_gen_buffer_size > (800 * 1024 * 1024)) 53506e67a82Saliguori code_gen_buffer_size = (800 * 1024 * 1024); 5364cd31ad2SBlue Swirl #elif defined(__sparc_v9__) 5374cd31ad2SBlue Swirl // Map the buffer below 2G, so we can use direct calls and branches 5384cd31ad2SBlue Swirl flags |= MAP_FIXED; 5394cd31ad2SBlue Swirl addr = (void *) 0x60000000UL; 5404cd31ad2SBlue Swirl if (code_gen_buffer_size > (512 * 1024 * 1024)) { 5414cd31ad2SBlue Swirl code_gen_buffer_size = (512 * 1024 * 1024); 5424cd31ad2SBlue Swirl } 54306e67a82Saliguori #endif 54406e67a82Saliguori code_gen_buffer = mmap(addr, code_gen_buffer_size, 54506e67a82Saliguori PROT_WRITE | PROT_READ | PROT_EXEC, 54606e67a82Saliguori flags, -1, 0); 54706e67a82Saliguori if (code_gen_buffer == MAP_FAILED) { 54806e67a82Saliguori fprintf(stderr, "Could not allocate dynamic translator buffer\n"); 54906e67a82Saliguori exit(1); 55006e67a82Saliguori } 55106e67a82Saliguori } 55226a5f13bSbellard #else 5537267c094SAnthony Liguori code_gen_buffer = g_malloc(code_gen_buffer_size); 55426a5f13bSbellard map_exec(code_gen_buffer, code_gen_buffer_size); 55526a5f13bSbellard #endif 5564369415fSbellard #endif /* !USE_STATIC_CODE_GEN_BUFFER */ 55726a5f13bSbellard map_exec(code_gen_prologue, sizeof(code_gen_prologue)); 55826a5f13bSbellard code_gen_buffer_max_size = code_gen_buffer_size - 559a884da8aSPeter Maydell (TCG_MAX_OP_SIZE * OPC_BUF_SIZE); 56026a5f13bSbellard code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; 5617267c094SAnthony Liguori tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock)); 56226a5f13bSbellard } 56326a5f13bSbellard 56426a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size 56526a5f13bSbellard (in bytes) allocated to the translation buffer. Zero means default 56626a5f13bSbellard size. */ 567d5ab9713SJan Kiszka void tcg_exec_init(unsigned long tb_size) 56826a5f13bSbellard { 56926a5f13bSbellard cpu_gen_init(); 57026a5f13bSbellard code_gen_alloc(tb_size); 57126a5f13bSbellard code_gen_ptr = code_gen_buffer; 5724369415fSbellard page_init(); 5739002ec79SRichard Henderson #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE) 5749002ec79SRichard Henderson /* There's no guest base to take into account, so go ahead and 5759002ec79SRichard Henderson initialize the prologue now. */ 5769002ec79SRichard Henderson tcg_prologue_init(&tcg_ctx); 5779002ec79SRichard Henderson #endif 57826a5f13bSbellard } 57926a5f13bSbellard 580d5ab9713SJan Kiszka bool tcg_enabled(void) 581d5ab9713SJan Kiszka { 582d5ab9713SJan Kiszka return code_gen_buffer != NULL; 583d5ab9713SJan Kiszka } 584d5ab9713SJan Kiszka 585d5ab9713SJan Kiszka void cpu_exec_init_all(void) 586d5ab9713SJan Kiszka { 587d5ab9713SJan Kiszka #if !defined(CONFIG_USER_ONLY) 588d5ab9713SJan Kiszka memory_map_init(); 589d5ab9713SJan Kiszka io_mem_init(); 590d5ab9713SJan Kiszka #endif 591d5ab9713SJan Kiszka } 592d5ab9713SJan Kiszka 5939656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) 5949656f324Spbrook 595e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id) 596e7f4eff7SJuan Quintela { 597e7f4eff7SJuan Quintela CPUState *env = opaque; 598e7f4eff7SJuan Quintela 5993098dba0Saurel32 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the 6003098dba0Saurel32 version_id is increased. */ 6013098dba0Saurel32 env->interrupt_request &= ~0x01; 6029656f324Spbrook tlb_flush(env, 1); 6039656f324Spbrook 6049656f324Spbrook return 0; 6059656f324Spbrook } 606e7f4eff7SJuan Quintela 607e7f4eff7SJuan Quintela static const VMStateDescription vmstate_cpu_common = { 608e7f4eff7SJuan Quintela .name = "cpu_common", 609e7f4eff7SJuan Quintela .version_id = 1, 610e7f4eff7SJuan Quintela .minimum_version_id = 1, 611e7f4eff7SJuan Quintela .minimum_version_id_old = 1, 612e7f4eff7SJuan Quintela .post_load = cpu_common_post_load, 613e7f4eff7SJuan Quintela .fields = (VMStateField []) { 614e7f4eff7SJuan Quintela VMSTATE_UINT32(halted, CPUState), 615e7f4eff7SJuan Quintela VMSTATE_UINT32(interrupt_request, CPUState), 616e7f4eff7SJuan Quintela VMSTATE_END_OF_LIST() 617e7f4eff7SJuan Quintela } 618e7f4eff7SJuan Quintela }; 6199656f324Spbrook #endif 6209656f324Spbrook 621950f1472SGlauber Costa CPUState *qemu_get_cpu(int cpu) 622950f1472SGlauber Costa { 623950f1472SGlauber Costa CPUState *env = first_cpu; 624950f1472SGlauber Costa 625950f1472SGlauber Costa while (env) { 626950f1472SGlauber Costa if (env->cpu_index == cpu) 627950f1472SGlauber Costa break; 628950f1472SGlauber Costa env = env->next_cpu; 629950f1472SGlauber Costa } 630950f1472SGlauber Costa 631950f1472SGlauber Costa return env; 632950f1472SGlauber Costa } 633950f1472SGlauber Costa 6346a00d601Sbellard void cpu_exec_init(CPUState *env) 635fd6ce8f6Sbellard { 6366a00d601Sbellard CPUState **penv; 6376a00d601Sbellard int cpu_index; 6386a00d601Sbellard 639c2764719Spbrook #if defined(CONFIG_USER_ONLY) 640c2764719Spbrook cpu_list_lock(); 641c2764719Spbrook #endif 6426a00d601Sbellard env->next_cpu = NULL; 6436a00d601Sbellard penv = &first_cpu; 6446a00d601Sbellard cpu_index = 0; 6456a00d601Sbellard while (*penv != NULL) { 6461e9fa730SNathan Froyd penv = &(*penv)->next_cpu; 6476a00d601Sbellard cpu_index++; 6486a00d601Sbellard } 6496a00d601Sbellard env->cpu_index = cpu_index; 650268a362cSaliguori env->numa_node = 0; 65172cf2d4fSBlue Swirl QTAILQ_INIT(&env->breakpoints); 65272cf2d4fSBlue Swirl QTAILQ_INIT(&env->watchpoints); 653dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY 654dc7a09cfSJan Kiszka env->thread_id = qemu_get_thread_id(); 655dc7a09cfSJan Kiszka #endif 6566a00d601Sbellard *penv = env; 657c2764719Spbrook #if defined(CONFIG_USER_ONLY) 658c2764719Spbrook cpu_list_unlock(); 659c2764719Spbrook #endif 660b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) 6610be71e32SAlex Williamson vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env); 6620be71e32SAlex Williamson register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, 663b3c7724cSpbrook cpu_save, cpu_load, env); 664b3c7724cSpbrook #endif 665fd6ce8f6Sbellard } 666fd6ce8f6Sbellard 667d1a1eb74STristan Gingold /* Allocate a new translation block. Flush the translation buffer if 668d1a1eb74STristan Gingold too many translation blocks or too much generated code. */ 669d1a1eb74STristan Gingold static TranslationBlock *tb_alloc(target_ulong pc) 670d1a1eb74STristan Gingold { 671d1a1eb74STristan Gingold TranslationBlock *tb; 672d1a1eb74STristan Gingold 673d1a1eb74STristan Gingold if (nb_tbs >= code_gen_max_blocks || 674d1a1eb74STristan Gingold (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size) 675d1a1eb74STristan Gingold return NULL; 676d1a1eb74STristan Gingold tb = &tbs[nb_tbs++]; 677d1a1eb74STristan Gingold tb->pc = pc; 678d1a1eb74STristan Gingold tb->cflags = 0; 679d1a1eb74STristan Gingold return tb; 680d1a1eb74STristan Gingold } 681d1a1eb74STristan Gingold 682d1a1eb74STristan Gingold void tb_free(TranslationBlock *tb) 683d1a1eb74STristan Gingold { 684d1a1eb74STristan Gingold /* In practice this is mostly used for single use temporary TB 685d1a1eb74STristan Gingold Ignore the hard cases and just back up if this TB happens to 686d1a1eb74STristan Gingold be the last one generated. */ 687d1a1eb74STristan Gingold if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) { 688d1a1eb74STristan Gingold code_gen_ptr = tb->tc_ptr; 689d1a1eb74STristan Gingold nb_tbs--; 690d1a1eb74STristan Gingold } 691d1a1eb74STristan Gingold } 692d1a1eb74STristan Gingold 6939fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p) 6949fa3e853Sbellard { 6959fa3e853Sbellard if (p->code_bitmap) { 6967267c094SAnthony Liguori g_free(p->code_bitmap); 6979fa3e853Sbellard p->code_bitmap = NULL; 6989fa3e853Sbellard } 6999fa3e853Sbellard p->code_write_count = 0; 7009fa3e853Sbellard } 7019fa3e853Sbellard 7025cd2c5b6SRichard Henderson /* Set to NULL all the 'first_tb' fields in all PageDescs. */ 7035cd2c5b6SRichard Henderson 7045cd2c5b6SRichard Henderson static void page_flush_tb_1 (int level, void **lp) 7055cd2c5b6SRichard Henderson { 7065cd2c5b6SRichard Henderson int i; 7075cd2c5b6SRichard Henderson 7085cd2c5b6SRichard Henderson if (*lp == NULL) { 7095cd2c5b6SRichard Henderson return; 7105cd2c5b6SRichard Henderson } 7115cd2c5b6SRichard Henderson if (level == 0) { 7125cd2c5b6SRichard Henderson PageDesc *pd = *lp; 7137296abacSPaul Brook for (i = 0; i < L2_SIZE; ++i) { 7145cd2c5b6SRichard Henderson pd[i].first_tb = NULL; 7155cd2c5b6SRichard Henderson invalidate_page_bitmap(pd + i); 7165cd2c5b6SRichard Henderson } 7175cd2c5b6SRichard Henderson } else { 7185cd2c5b6SRichard Henderson void **pp = *lp; 7197296abacSPaul Brook for (i = 0; i < L2_SIZE; ++i) { 7205cd2c5b6SRichard Henderson page_flush_tb_1 (level - 1, pp + i); 7215cd2c5b6SRichard Henderson } 7225cd2c5b6SRichard Henderson } 7235cd2c5b6SRichard Henderson } 7245cd2c5b6SRichard Henderson 725fd6ce8f6Sbellard static void page_flush_tb(void) 726fd6ce8f6Sbellard { 7275cd2c5b6SRichard Henderson int i; 7285cd2c5b6SRichard Henderson for (i = 0; i < V_L1_SIZE; i++) { 7295cd2c5b6SRichard Henderson page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i); 730fd6ce8f6Sbellard } 731fd6ce8f6Sbellard } 732fd6ce8f6Sbellard 733fd6ce8f6Sbellard /* flush all the translation blocks */ 734d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */ 7356a00d601Sbellard void tb_flush(CPUState *env1) 736fd6ce8f6Sbellard { 7376a00d601Sbellard CPUState *env; 7380124311eSbellard #if defined(DEBUG_FLUSH) 739ab3d1727Sblueswir1 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", 740ab3d1727Sblueswir1 (unsigned long)(code_gen_ptr - code_gen_buffer), 741ab3d1727Sblueswir1 nb_tbs, nb_tbs > 0 ? 742ab3d1727Sblueswir1 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0); 743fd6ce8f6Sbellard #endif 74426a5f13bSbellard if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size) 745a208e54aSpbrook cpu_abort(env1, "Internal error: code buffer overflow\n"); 746a208e54aSpbrook 747fd6ce8f6Sbellard nb_tbs = 0; 7486a00d601Sbellard 7496a00d601Sbellard for(env = first_cpu; env != NULL; env = env->next_cpu) { 7508a40a180Sbellard memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); 7516a00d601Sbellard } 7529fa3e853Sbellard 7538a8a608fSbellard memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); 754fd6ce8f6Sbellard page_flush_tb(); 7559fa3e853Sbellard 756fd6ce8f6Sbellard code_gen_ptr = code_gen_buffer; 757d4e8164fSbellard /* XXX: flush processor icache at this point if cache flush is 758d4e8164fSbellard expensive */ 759e3db7226Sbellard tb_flush_count++; 760fd6ce8f6Sbellard } 761fd6ce8f6Sbellard 762fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK 763fd6ce8f6Sbellard 764bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address) 765fd6ce8f6Sbellard { 766fd6ce8f6Sbellard TranslationBlock *tb; 767fd6ce8f6Sbellard int i; 768fd6ce8f6Sbellard address &= TARGET_PAGE_MASK; 76999773bd4Spbrook for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { 77099773bd4Spbrook for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { 771fd6ce8f6Sbellard if (!(address + TARGET_PAGE_SIZE <= tb->pc || 772fd6ce8f6Sbellard address >= tb->pc + tb->size)) { 7730bf9e31aSBlue Swirl printf("ERROR invalidate: address=" TARGET_FMT_lx 7740bf9e31aSBlue Swirl " PC=%08lx size=%04x\n", 77599773bd4Spbrook address, (long)tb->pc, tb->size); 776fd6ce8f6Sbellard } 777fd6ce8f6Sbellard } 778fd6ce8f6Sbellard } 779fd6ce8f6Sbellard } 780fd6ce8f6Sbellard 781fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */ 782fd6ce8f6Sbellard static void tb_page_check(void) 783fd6ce8f6Sbellard { 784fd6ce8f6Sbellard TranslationBlock *tb; 785fd6ce8f6Sbellard int i, flags1, flags2; 786fd6ce8f6Sbellard 78799773bd4Spbrook for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { 78899773bd4Spbrook for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { 789fd6ce8f6Sbellard flags1 = page_get_flags(tb->pc); 790fd6ce8f6Sbellard flags2 = page_get_flags(tb->pc + tb->size - 1); 791fd6ce8f6Sbellard if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { 792fd6ce8f6Sbellard printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", 79399773bd4Spbrook (long)tb->pc, tb->size, flags1, flags2); 794fd6ce8f6Sbellard } 795fd6ce8f6Sbellard } 796fd6ce8f6Sbellard } 797fd6ce8f6Sbellard } 798fd6ce8f6Sbellard 799fd6ce8f6Sbellard #endif 800fd6ce8f6Sbellard 801fd6ce8f6Sbellard /* invalidate one TB */ 802fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, 803fd6ce8f6Sbellard int next_offset) 804fd6ce8f6Sbellard { 805fd6ce8f6Sbellard TranslationBlock *tb1; 806fd6ce8f6Sbellard for(;;) { 807fd6ce8f6Sbellard tb1 = *ptb; 808fd6ce8f6Sbellard if (tb1 == tb) { 809fd6ce8f6Sbellard *ptb = *(TranslationBlock **)((char *)tb1 + next_offset); 810fd6ce8f6Sbellard break; 811fd6ce8f6Sbellard } 812fd6ce8f6Sbellard ptb = (TranslationBlock **)((char *)tb1 + next_offset); 813fd6ce8f6Sbellard } 814fd6ce8f6Sbellard } 815fd6ce8f6Sbellard 8169fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) 8179fa3e853Sbellard { 8189fa3e853Sbellard TranslationBlock *tb1; 8199fa3e853Sbellard unsigned int n1; 8209fa3e853Sbellard 8219fa3e853Sbellard for(;;) { 8229fa3e853Sbellard tb1 = *ptb; 8239fa3e853Sbellard n1 = (long)tb1 & 3; 8249fa3e853Sbellard tb1 = (TranslationBlock *)((long)tb1 & ~3); 8259fa3e853Sbellard if (tb1 == tb) { 8269fa3e853Sbellard *ptb = tb1->page_next[n1]; 8279fa3e853Sbellard break; 8289fa3e853Sbellard } 8299fa3e853Sbellard ptb = &tb1->page_next[n1]; 8309fa3e853Sbellard } 8319fa3e853Sbellard } 8329fa3e853Sbellard 833d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n) 834d4e8164fSbellard { 835d4e8164fSbellard TranslationBlock *tb1, **ptb; 836d4e8164fSbellard unsigned int n1; 837d4e8164fSbellard 838d4e8164fSbellard ptb = &tb->jmp_next[n]; 839d4e8164fSbellard tb1 = *ptb; 840d4e8164fSbellard if (tb1) { 841d4e8164fSbellard /* find tb(n) in circular list */ 842d4e8164fSbellard for(;;) { 843d4e8164fSbellard tb1 = *ptb; 844d4e8164fSbellard n1 = (long)tb1 & 3; 845d4e8164fSbellard tb1 = (TranslationBlock *)((long)tb1 & ~3); 846d4e8164fSbellard if (n1 == n && tb1 == tb) 847d4e8164fSbellard break; 848d4e8164fSbellard if (n1 == 2) { 849d4e8164fSbellard ptb = &tb1->jmp_first; 850d4e8164fSbellard } else { 851d4e8164fSbellard ptb = &tb1->jmp_next[n1]; 852d4e8164fSbellard } 853d4e8164fSbellard } 854d4e8164fSbellard /* now we can suppress tb(n) from the list */ 855d4e8164fSbellard *ptb = tb->jmp_next[n]; 856d4e8164fSbellard 857d4e8164fSbellard tb->jmp_next[n] = NULL; 858d4e8164fSbellard } 859d4e8164fSbellard } 860d4e8164fSbellard 861d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to 862d4e8164fSbellard another TB */ 863d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n) 864d4e8164fSbellard { 865d4e8164fSbellard tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); 866d4e8164fSbellard } 867d4e8164fSbellard 86841c1b1c9SPaul Brook void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) 869fd6ce8f6Sbellard { 8706a00d601Sbellard CPUState *env; 871fd6ce8f6Sbellard PageDesc *p; 8728a40a180Sbellard unsigned int h, n1; 87341c1b1c9SPaul Brook tb_page_addr_t phys_pc; 8748a40a180Sbellard TranslationBlock *tb1, *tb2; 875fd6ce8f6Sbellard 8769fa3e853Sbellard /* remove the TB from the hash list */ 8779fa3e853Sbellard phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 8789fa3e853Sbellard h = tb_phys_hash_func(phys_pc); 8799fa3e853Sbellard tb_remove(&tb_phys_hash[h], tb, 8809fa3e853Sbellard offsetof(TranslationBlock, phys_hash_next)); 8819fa3e853Sbellard 8829fa3e853Sbellard /* remove the TB from the page list */ 8839fa3e853Sbellard if (tb->page_addr[0] != page_addr) { 8849fa3e853Sbellard p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); 8859fa3e853Sbellard tb_page_remove(&p->first_tb, tb); 8869fa3e853Sbellard invalidate_page_bitmap(p); 8879fa3e853Sbellard } 8889fa3e853Sbellard if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { 8899fa3e853Sbellard p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); 8909fa3e853Sbellard tb_page_remove(&p->first_tb, tb); 8919fa3e853Sbellard invalidate_page_bitmap(p); 8929fa3e853Sbellard } 8939fa3e853Sbellard 8948a40a180Sbellard tb_invalidated_flag = 1; 8958a40a180Sbellard 8968a40a180Sbellard /* remove the TB from the hash list */ 8978a40a180Sbellard h = tb_jmp_cache_hash_func(tb->pc); 8986a00d601Sbellard for(env = first_cpu; env != NULL; env = env->next_cpu) { 8996a00d601Sbellard if (env->tb_jmp_cache[h] == tb) 9006a00d601Sbellard env->tb_jmp_cache[h] = NULL; 9016a00d601Sbellard } 9028a40a180Sbellard 9038a40a180Sbellard /* suppress this TB from the two jump lists */ 9048a40a180Sbellard tb_jmp_remove(tb, 0); 9058a40a180Sbellard tb_jmp_remove(tb, 1); 9068a40a180Sbellard 9078a40a180Sbellard /* suppress any remaining jumps to this TB */ 9088a40a180Sbellard tb1 = tb->jmp_first; 9098a40a180Sbellard for(;;) { 9108a40a180Sbellard n1 = (long)tb1 & 3; 9118a40a180Sbellard if (n1 == 2) 9128a40a180Sbellard break; 9138a40a180Sbellard tb1 = (TranslationBlock *)((long)tb1 & ~3); 9148a40a180Sbellard tb2 = tb1->jmp_next[n1]; 9158a40a180Sbellard tb_reset_jump(tb1, n1); 9168a40a180Sbellard tb1->jmp_next[n1] = NULL; 9178a40a180Sbellard tb1 = tb2; 9188a40a180Sbellard } 9198a40a180Sbellard tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ 9208a40a180Sbellard 921e3db7226Sbellard tb_phys_invalidate_count++; 9229fa3e853Sbellard } 9239fa3e853Sbellard 9249fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len) 9259fa3e853Sbellard { 9269fa3e853Sbellard int end, mask, end1; 9279fa3e853Sbellard 9289fa3e853Sbellard end = start + len; 9299fa3e853Sbellard tab += start >> 3; 9309fa3e853Sbellard mask = 0xff << (start & 7); 9319fa3e853Sbellard if ((start & ~7) == (end & ~7)) { 9329fa3e853Sbellard if (start < end) { 9339fa3e853Sbellard mask &= ~(0xff << (end & 7)); 9349fa3e853Sbellard *tab |= mask; 9359fa3e853Sbellard } 9369fa3e853Sbellard } else { 9379fa3e853Sbellard *tab++ |= mask; 9389fa3e853Sbellard start = (start + 8) & ~7; 9399fa3e853Sbellard end1 = end & ~7; 9409fa3e853Sbellard while (start < end1) { 9419fa3e853Sbellard *tab++ = 0xff; 9429fa3e853Sbellard start += 8; 9439fa3e853Sbellard } 9449fa3e853Sbellard if (start < end) { 9459fa3e853Sbellard mask = ~(0xff << (end & 7)); 9469fa3e853Sbellard *tab |= mask; 9479fa3e853Sbellard } 9489fa3e853Sbellard } 9499fa3e853Sbellard } 9509fa3e853Sbellard 9519fa3e853Sbellard static void build_page_bitmap(PageDesc *p) 9529fa3e853Sbellard { 9539fa3e853Sbellard int n, tb_start, tb_end; 9549fa3e853Sbellard TranslationBlock *tb; 9559fa3e853Sbellard 9567267c094SAnthony Liguori p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8); 9579fa3e853Sbellard 9589fa3e853Sbellard tb = p->first_tb; 9599fa3e853Sbellard while (tb != NULL) { 9609fa3e853Sbellard n = (long)tb & 3; 9619fa3e853Sbellard tb = (TranslationBlock *)((long)tb & ~3); 9629fa3e853Sbellard /* NOTE: this is subtle as a TB may span two physical pages */ 9639fa3e853Sbellard if (n == 0) { 9649fa3e853Sbellard /* NOTE: tb_end may be after the end of the page, but 9659fa3e853Sbellard it is not a problem */ 9669fa3e853Sbellard tb_start = tb->pc & ~TARGET_PAGE_MASK; 9679fa3e853Sbellard tb_end = tb_start + tb->size; 9689fa3e853Sbellard if (tb_end > TARGET_PAGE_SIZE) 9699fa3e853Sbellard tb_end = TARGET_PAGE_SIZE; 9709fa3e853Sbellard } else { 9719fa3e853Sbellard tb_start = 0; 9729fa3e853Sbellard tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 9739fa3e853Sbellard } 9749fa3e853Sbellard set_bits(p->code_bitmap, tb_start, tb_end - tb_start); 9759fa3e853Sbellard tb = tb->page_next[n]; 9769fa3e853Sbellard } 9779fa3e853Sbellard } 9789fa3e853Sbellard 9792e70f6efSpbrook TranslationBlock *tb_gen_code(CPUState *env, 9802e70f6efSpbrook target_ulong pc, target_ulong cs_base, 9812e70f6efSpbrook int flags, int cflags) 982d720b93dSbellard { 983d720b93dSbellard TranslationBlock *tb; 984d720b93dSbellard uint8_t *tc_ptr; 98541c1b1c9SPaul Brook tb_page_addr_t phys_pc, phys_page2; 98641c1b1c9SPaul Brook target_ulong virt_page2; 987d720b93dSbellard int code_gen_size; 988d720b93dSbellard 98941c1b1c9SPaul Brook phys_pc = get_page_addr_code(env, pc); 990c27004ecSbellard tb = tb_alloc(pc); 991d720b93dSbellard if (!tb) { 992d720b93dSbellard /* flush must be done */ 993d720b93dSbellard tb_flush(env); 994d720b93dSbellard /* cannot fail at this point */ 995c27004ecSbellard tb = tb_alloc(pc); 9962e70f6efSpbrook /* Don't forget to invalidate previous TB info. */ 9972e70f6efSpbrook tb_invalidated_flag = 1; 998d720b93dSbellard } 999d720b93dSbellard tc_ptr = code_gen_ptr; 1000d720b93dSbellard tb->tc_ptr = tc_ptr; 1001d720b93dSbellard tb->cs_base = cs_base; 1002d720b93dSbellard tb->flags = flags; 1003d720b93dSbellard tb->cflags = cflags; 1004d07bde88Sblueswir1 cpu_gen_code(env, tb, &code_gen_size); 1005d720b93dSbellard code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); 1006d720b93dSbellard 1007d720b93dSbellard /* check next page if needed */ 1008c27004ecSbellard virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; 1009d720b93dSbellard phys_page2 = -1; 1010c27004ecSbellard if ((pc & TARGET_PAGE_MASK) != virt_page2) { 101141c1b1c9SPaul Brook phys_page2 = get_page_addr_code(env, virt_page2); 1012d720b93dSbellard } 101341c1b1c9SPaul Brook tb_link_page(tb, phys_pc, phys_page2); 10142e70f6efSpbrook return tb; 1015d720b93dSbellard } 1016d720b93dSbellard 10179fa3e853Sbellard /* invalidate all TBs which intersect with the target physical page 10189fa3e853Sbellard starting in range [start;end[. NOTE: start and end must refer to 1019d720b93dSbellard the same physical page. 'is_cpu_write_access' should be true if called 1020d720b93dSbellard from a real cpu write access: the virtual CPU will exit the current 1021d720b93dSbellard TB if code is modified inside this TB. */ 102241c1b1c9SPaul Brook void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, 1023d720b93dSbellard int is_cpu_write_access) 10249fa3e853Sbellard { 10256b917547Saliguori TranslationBlock *tb, *tb_next, *saved_tb; 1026d720b93dSbellard CPUState *env = cpu_single_env; 102741c1b1c9SPaul Brook tb_page_addr_t tb_start, tb_end; 10286b917547Saliguori PageDesc *p; 10296b917547Saliguori int n; 10306b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC 10316b917547Saliguori int current_tb_not_found = is_cpu_write_access; 10326b917547Saliguori TranslationBlock *current_tb = NULL; 10336b917547Saliguori int current_tb_modified = 0; 10346b917547Saliguori target_ulong current_pc = 0; 10356b917547Saliguori target_ulong current_cs_base = 0; 10366b917547Saliguori int current_flags = 0; 10376b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */ 10389fa3e853Sbellard 10399fa3e853Sbellard p = page_find(start >> TARGET_PAGE_BITS); 10409fa3e853Sbellard if (!p) 10419fa3e853Sbellard return; 10429fa3e853Sbellard if (!p->code_bitmap && 1043d720b93dSbellard ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && 1044d720b93dSbellard is_cpu_write_access) { 10459fa3e853Sbellard /* build code bitmap */ 10469fa3e853Sbellard build_page_bitmap(p); 10479fa3e853Sbellard } 10489fa3e853Sbellard 10499fa3e853Sbellard /* we remove all the TBs in the range [start, end[ */ 10509fa3e853Sbellard /* XXX: see if in some cases it could be faster to invalidate all the code */ 10519fa3e853Sbellard tb = p->first_tb; 10529fa3e853Sbellard while (tb != NULL) { 10539fa3e853Sbellard n = (long)tb & 3; 10549fa3e853Sbellard tb = (TranslationBlock *)((long)tb & ~3); 10559fa3e853Sbellard tb_next = tb->page_next[n]; 10569fa3e853Sbellard /* NOTE: this is subtle as a TB may span two physical pages */ 10579fa3e853Sbellard if (n == 0) { 10589fa3e853Sbellard /* NOTE: tb_end may be after the end of the page, but 10599fa3e853Sbellard it is not a problem */ 10609fa3e853Sbellard tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 10619fa3e853Sbellard tb_end = tb_start + tb->size; 10629fa3e853Sbellard } else { 10639fa3e853Sbellard tb_start = tb->page_addr[1]; 10649fa3e853Sbellard tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 10659fa3e853Sbellard } 10669fa3e853Sbellard if (!(tb_end <= start || tb_start >= end)) { 1067d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC 1068d720b93dSbellard if (current_tb_not_found) { 1069d720b93dSbellard current_tb_not_found = 0; 1070d720b93dSbellard current_tb = NULL; 10712e70f6efSpbrook if (env->mem_io_pc) { 1072d720b93dSbellard /* now we have a real cpu fault */ 10732e70f6efSpbrook current_tb = tb_find_pc(env->mem_io_pc); 1074d720b93dSbellard } 1075d720b93dSbellard } 1076d720b93dSbellard if (current_tb == tb && 10772e70f6efSpbrook (current_tb->cflags & CF_COUNT_MASK) != 1) { 1078d720b93dSbellard /* If we are modifying the current TB, we must stop 1079d720b93dSbellard its execution. We could be more precise by checking 1080d720b93dSbellard that the modification is after the current PC, but it 1081d720b93dSbellard would require a specialized function to partially 1082d720b93dSbellard restore the CPU state */ 1083d720b93dSbellard 1084d720b93dSbellard current_tb_modified = 1; 1085618ba8e6SStefan Weil cpu_restore_state(current_tb, env, env->mem_io_pc); 10866b917547Saliguori cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 10876b917547Saliguori ¤t_flags); 1088d720b93dSbellard } 1089d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */ 10906f5a9f7eSbellard /* we need to do that to handle the case where a signal 10916f5a9f7eSbellard occurs while doing tb_phys_invalidate() */ 10926f5a9f7eSbellard saved_tb = NULL; 10936f5a9f7eSbellard if (env) { 1094ea1c1802Sbellard saved_tb = env->current_tb; 1095ea1c1802Sbellard env->current_tb = NULL; 10966f5a9f7eSbellard } 10979fa3e853Sbellard tb_phys_invalidate(tb, -1); 10986f5a9f7eSbellard if (env) { 1099ea1c1802Sbellard env->current_tb = saved_tb; 1100ea1c1802Sbellard if (env->interrupt_request && env->current_tb) 1101ea1c1802Sbellard cpu_interrupt(env, env->interrupt_request); 11029fa3e853Sbellard } 11036f5a9f7eSbellard } 11049fa3e853Sbellard tb = tb_next; 11059fa3e853Sbellard } 11069fa3e853Sbellard #if !defined(CONFIG_USER_ONLY) 11079fa3e853Sbellard /* if no code remaining, no need to continue to use slow writes */ 11089fa3e853Sbellard if (!p->first_tb) { 11099fa3e853Sbellard invalidate_page_bitmap(p); 1110d720b93dSbellard if (is_cpu_write_access) { 11112e70f6efSpbrook tlb_unprotect_code_phys(env, start, env->mem_io_vaddr); 1112d720b93dSbellard } 1113d720b93dSbellard } 1114d720b93dSbellard #endif 1115d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC 1116d720b93dSbellard if (current_tb_modified) { 1117d720b93dSbellard /* we generate a block containing just the instruction 1118d720b93dSbellard modifying the memory. It will ensure that it cannot modify 1119d720b93dSbellard itself */ 1120ea1c1802Sbellard env->current_tb = NULL; 11212e70f6efSpbrook tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); 1122d720b93dSbellard cpu_resume_from_signal(env, NULL); 11239fa3e853Sbellard } 11249fa3e853Sbellard #endif 11259fa3e853Sbellard } 11269fa3e853Sbellard 11279fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */ 112841c1b1c9SPaul Brook static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) 11299fa3e853Sbellard { 11309fa3e853Sbellard PageDesc *p; 11319fa3e853Sbellard int offset, b; 113259817ccbSbellard #if 0 1133a4193c8aSbellard if (1) { 113493fcfe39Saliguori qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 11352e70f6efSpbrook cpu_single_env->mem_io_vaddr, len, 1136a4193c8aSbellard cpu_single_env->eip, 1137a4193c8aSbellard cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); 1138a4193c8aSbellard } 113959817ccbSbellard #endif 11409fa3e853Sbellard p = page_find(start >> TARGET_PAGE_BITS); 11419fa3e853Sbellard if (!p) 11429fa3e853Sbellard return; 11439fa3e853Sbellard if (p->code_bitmap) { 11449fa3e853Sbellard offset = start & ~TARGET_PAGE_MASK; 11459fa3e853Sbellard b = p->code_bitmap[offset >> 3] >> (offset & 7); 11469fa3e853Sbellard if (b & ((1 << len) - 1)) 11479fa3e853Sbellard goto do_invalidate; 11489fa3e853Sbellard } else { 11499fa3e853Sbellard do_invalidate: 1150d720b93dSbellard tb_invalidate_phys_page_range(start, start + len, 1); 11519fa3e853Sbellard } 11529fa3e853Sbellard } 11539fa3e853Sbellard 11549fa3e853Sbellard #if !defined(CONFIG_SOFTMMU) 115541c1b1c9SPaul Brook static void tb_invalidate_phys_page(tb_page_addr_t addr, 1156d720b93dSbellard unsigned long pc, void *puc) 11579fa3e853Sbellard { 11586b917547Saliguori TranslationBlock *tb; 11599fa3e853Sbellard PageDesc *p; 11606b917547Saliguori int n; 1161d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC 11626b917547Saliguori TranslationBlock *current_tb = NULL; 1163d720b93dSbellard CPUState *env = cpu_single_env; 11646b917547Saliguori int current_tb_modified = 0; 11656b917547Saliguori target_ulong current_pc = 0; 11666b917547Saliguori target_ulong current_cs_base = 0; 11676b917547Saliguori int current_flags = 0; 1168d720b93dSbellard #endif 11699fa3e853Sbellard 11709fa3e853Sbellard addr &= TARGET_PAGE_MASK; 11719fa3e853Sbellard p = page_find(addr >> TARGET_PAGE_BITS); 1172fd6ce8f6Sbellard if (!p) 1173fd6ce8f6Sbellard return; 1174fd6ce8f6Sbellard tb = p->first_tb; 1175d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC 1176d720b93dSbellard if (tb && pc != 0) { 1177d720b93dSbellard current_tb = tb_find_pc(pc); 1178d720b93dSbellard } 1179d720b93dSbellard #endif 1180fd6ce8f6Sbellard while (tb != NULL) { 11819fa3e853Sbellard n = (long)tb & 3; 11829fa3e853Sbellard tb = (TranslationBlock *)((long)tb & ~3); 1183d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC 1184d720b93dSbellard if (current_tb == tb && 11852e70f6efSpbrook (current_tb->cflags & CF_COUNT_MASK) != 1) { 1186d720b93dSbellard /* If we are modifying the current TB, we must stop 1187d720b93dSbellard its execution. We could be more precise by checking 1188d720b93dSbellard that the modification is after the current PC, but it 1189d720b93dSbellard would require a specialized function to partially 1190d720b93dSbellard restore the CPU state */ 1191d720b93dSbellard 1192d720b93dSbellard current_tb_modified = 1; 1193618ba8e6SStefan Weil cpu_restore_state(current_tb, env, pc); 11946b917547Saliguori cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 11956b917547Saliguori ¤t_flags); 1196d720b93dSbellard } 1197d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */ 11989fa3e853Sbellard tb_phys_invalidate(tb, addr); 11999fa3e853Sbellard tb = tb->page_next[n]; 1200fd6ce8f6Sbellard } 1201fd6ce8f6Sbellard p->first_tb = NULL; 1202d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC 1203d720b93dSbellard if (current_tb_modified) { 1204d720b93dSbellard /* we generate a block containing just the instruction 1205d720b93dSbellard modifying the memory. It will ensure that it cannot modify 1206d720b93dSbellard itself */ 1207ea1c1802Sbellard env->current_tb = NULL; 12082e70f6efSpbrook tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); 1209d720b93dSbellard cpu_resume_from_signal(env, puc); 1210d720b93dSbellard } 1211d720b93dSbellard #endif 1212fd6ce8f6Sbellard } 12139fa3e853Sbellard #endif 1214fd6ce8f6Sbellard 1215fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */ 12169fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb, 121741c1b1c9SPaul Brook unsigned int n, tb_page_addr_t page_addr) 1218fd6ce8f6Sbellard { 1219fd6ce8f6Sbellard PageDesc *p; 12204429ab44SJuan Quintela #ifndef CONFIG_USER_ONLY 12214429ab44SJuan Quintela bool page_already_protected; 12224429ab44SJuan Quintela #endif 12239fa3e853Sbellard 12249fa3e853Sbellard tb->page_addr[n] = page_addr; 12255cd2c5b6SRichard Henderson p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); 12269fa3e853Sbellard tb->page_next[n] = p->first_tb; 12274429ab44SJuan Quintela #ifndef CONFIG_USER_ONLY 12284429ab44SJuan Quintela page_already_protected = p->first_tb != NULL; 12294429ab44SJuan Quintela #endif 12309fa3e853Sbellard p->first_tb = (TranslationBlock *)((long)tb | n); 12319fa3e853Sbellard invalidate_page_bitmap(p); 12329fa3e853Sbellard 1233107db443Sbellard #if defined(TARGET_HAS_SMC) || 1 1234d720b93dSbellard 12359fa3e853Sbellard #if defined(CONFIG_USER_ONLY) 12369fa3e853Sbellard if (p->flags & PAGE_WRITE) { 123753a5960aSpbrook target_ulong addr; 123853a5960aSpbrook PageDesc *p2; 1239fd6ce8f6Sbellard int prot; 1240fd6ce8f6Sbellard 1241fd6ce8f6Sbellard /* force the host page as non writable (writes will have a 1242fd6ce8f6Sbellard page fault + mprotect overhead) */ 124353a5960aSpbrook page_addr &= qemu_host_page_mask; 1244fd6ce8f6Sbellard prot = 0; 124553a5960aSpbrook for(addr = page_addr; addr < page_addr + qemu_host_page_size; 124653a5960aSpbrook addr += TARGET_PAGE_SIZE) { 124753a5960aSpbrook 124853a5960aSpbrook p2 = page_find (addr >> TARGET_PAGE_BITS); 124953a5960aSpbrook if (!p2) 125053a5960aSpbrook continue; 125153a5960aSpbrook prot |= p2->flags; 125253a5960aSpbrook p2->flags &= ~PAGE_WRITE; 125353a5960aSpbrook } 125453a5960aSpbrook mprotect(g2h(page_addr), qemu_host_page_size, 1255fd6ce8f6Sbellard (prot & PAGE_BITS) & ~PAGE_WRITE); 1256fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE 1257ab3d1727Sblueswir1 printf("protecting code page: 0x" TARGET_FMT_lx "\n", 125853a5960aSpbrook page_addr); 1259fd6ce8f6Sbellard #endif 1260fd6ce8f6Sbellard } 12619fa3e853Sbellard #else 12629fa3e853Sbellard /* if some code is already present, then the pages are already 12639fa3e853Sbellard protected. So we handle the case where only the first TB is 12649fa3e853Sbellard allocated in a physical page */ 12654429ab44SJuan Quintela if (!page_already_protected) { 12666a00d601Sbellard tlb_protect_code(page_addr); 12679fa3e853Sbellard } 12689fa3e853Sbellard #endif 1269d720b93dSbellard 1270d720b93dSbellard #endif /* TARGET_HAS_SMC */ 1271fd6ce8f6Sbellard } 1272fd6ce8f6Sbellard 12739fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is 12749fa3e853Sbellard (-1) to indicate that only one page contains the TB. */ 127541c1b1c9SPaul Brook void tb_link_page(TranslationBlock *tb, 127641c1b1c9SPaul Brook tb_page_addr_t phys_pc, tb_page_addr_t phys_page2) 1277d4e8164fSbellard { 12789fa3e853Sbellard unsigned int h; 12799fa3e853Sbellard TranslationBlock **ptb; 12809fa3e853Sbellard 1281c8a706feSpbrook /* Grab the mmap lock to stop another thread invalidating this TB 1282c8a706feSpbrook before we are done. */ 1283c8a706feSpbrook mmap_lock(); 12849fa3e853Sbellard /* add in the physical hash table */ 12859fa3e853Sbellard h = tb_phys_hash_func(phys_pc); 12869fa3e853Sbellard ptb = &tb_phys_hash[h]; 12879fa3e853Sbellard tb->phys_hash_next = *ptb; 12889fa3e853Sbellard *ptb = tb; 1289fd6ce8f6Sbellard 1290fd6ce8f6Sbellard /* add in the page list */ 12919fa3e853Sbellard tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); 12929fa3e853Sbellard if (phys_page2 != -1) 12939fa3e853Sbellard tb_alloc_page(tb, 1, phys_page2); 12949fa3e853Sbellard else 12959fa3e853Sbellard tb->page_addr[1] = -1; 12969fa3e853Sbellard 1297d4e8164fSbellard tb->jmp_first = (TranslationBlock *)((long)tb | 2); 1298d4e8164fSbellard tb->jmp_next[0] = NULL; 1299d4e8164fSbellard tb->jmp_next[1] = NULL; 1300d4e8164fSbellard 1301d4e8164fSbellard /* init original jump addresses */ 1302d4e8164fSbellard if (tb->tb_next_offset[0] != 0xffff) 1303d4e8164fSbellard tb_reset_jump(tb, 0); 1304d4e8164fSbellard if (tb->tb_next_offset[1] != 0xffff) 1305d4e8164fSbellard tb_reset_jump(tb, 1); 13068a40a180Sbellard 13078a40a180Sbellard #ifdef DEBUG_TB_CHECK 13088a40a180Sbellard tb_page_check(); 13098a40a180Sbellard #endif 1310c8a706feSpbrook mmap_unlock(); 1311fd6ce8f6Sbellard } 1312fd6ce8f6Sbellard 1313a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < 1314a513fe19Sbellard tb[1].tc_ptr. Return NULL if not found */ 1315a513fe19Sbellard TranslationBlock *tb_find_pc(unsigned long tc_ptr) 1316a513fe19Sbellard { 1317a513fe19Sbellard int m_min, m_max, m; 1318a513fe19Sbellard unsigned long v; 1319a513fe19Sbellard TranslationBlock *tb; 1320a513fe19Sbellard 1321a513fe19Sbellard if (nb_tbs <= 0) 1322a513fe19Sbellard return NULL; 1323a513fe19Sbellard if (tc_ptr < (unsigned long)code_gen_buffer || 1324a513fe19Sbellard tc_ptr >= (unsigned long)code_gen_ptr) 1325a513fe19Sbellard return NULL; 1326a513fe19Sbellard /* binary search (cf Knuth) */ 1327a513fe19Sbellard m_min = 0; 1328a513fe19Sbellard m_max = nb_tbs - 1; 1329a513fe19Sbellard while (m_min <= m_max) { 1330a513fe19Sbellard m = (m_min + m_max) >> 1; 1331a513fe19Sbellard tb = &tbs[m]; 1332a513fe19Sbellard v = (unsigned long)tb->tc_ptr; 1333a513fe19Sbellard if (v == tc_ptr) 1334a513fe19Sbellard return tb; 1335a513fe19Sbellard else if (tc_ptr < v) { 1336a513fe19Sbellard m_max = m - 1; 1337a513fe19Sbellard } else { 1338a513fe19Sbellard m_min = m + 1; 1339a513fe19Sbellard } 1340a513fe19Sbellard } 1341a513fe19Sbellard return &tbs[m_max]; 1342a513fe19Sbellard } 13437501267eSbellard 1344ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb); 1345ea041c0eSbellard 1346ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) 1347ea041c0eSbellard { 1348ea041c0eSbellard TranslationBlock *tb1, *tb_next, **ptb; 1349ea041c0eSbellard unsigned int n1; 1350ea041c0eSbellard 1351ea041c0eSbellard tb1 = tb->jmp_next[n]; 1352ea041c0eSbellard if (tb1 != NULL) { 1353ea041c0eSbellard /* find head of list */ 1354ea041c0eSbellard for(;;) { 1355ea041c0eSbellard n1 = (long)tb1 & 3; 1356ea041c0eSbellard tb1 = (TranslationBlock *)((long)tb1 & ~3); 1357ea041c0eSbellard if (n1 == 2) 1358ea041c0eSbellard break; 1359ea041c0eSbellard tb1 = tb1->jmp_next[n1]; 1360ea041c0eSbellard } 1361ea041c0eSbellard /* we are now sure now that tb jumps to tb1 */ 1362ea041c0eSbellard tb_next = tb1; 1363ea041c0eSbellard 1364ea041c0eSbellard /* remove tb from the jmp_first list */ 1365ea041c0eSbellard ptb = &tb_next->jmp_first; 1366ea041c0eSbellard for(;;) { 1367ea041c0eSbellard tb1 = *ptb; 1368ea041c0eSbellard n1 = (long)tb1 & 3; 1369ea041c0eSbellard tb1 = (TranslationBlock *)((long)tb1 & ~3); 1370ea041c0eSbellard if (n1 == n && tb1 == tb) 1371ea041c0eSbellard break; 1372ea041c0eSbellard ptb = &tb1->jmp_next[n1]; 1373ea041c0eSbellard } 1374ea041c0eSbellard *ptb = tb->jmp_next[n]; 1375ea041c0eSbellard tb->jmp_next[n] = NULL; 1376ea041c0eSbellard 1377ea041c0eSbellard /* suppress the jump to next tb in generated code */ 1378ea041c0eSbellard tb_reset_jump(tb, n); 1379ea041c0eSbellard 13800124311eSbellard /* suppress jumps in the tb on which we could have jumped */ 1381ea041c0eSbellard tb_reset_jump_recursive(tb_next); 1382ea041c0eSbellard } 1383ea041c0eSbellard } 1384ea041c0eSbellard 1385ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb) 1386ea041c0eSbellard { 1387ea041c0eSbellard tb_reset_jump_recursive2(tb, 0); 1388ea041c0eSbellard tb_reset_jump_recursive2(tb, 1); 1389ea041c0eSbellard } 1390ea041c0eSbellard 13911fddef4bSbellard #if defined(TARGET_HAS_ICE) 139294df27fdSPaul Brook #if defined(CONFIG_USER_ONLY) 139394df27fdSPaul Brook static void breakpoint_invalidate(CPUState *env, target_ulong pc) 139494df27fdSPaul Brook { 139594df27fdSPaul Brook tb_invalidate_phys_page_range(pc, pc + 1, 0); 139694df27fdSPaul Brook } 139794df27fdSPaul Brook #else 1398d720b93dSbellard static void breakpoint_invalidate(CPUState *env, target_ulong pc) 1399d720b93dSbellard { 1400c227f099SAnthony Liguori target_phys_addr_t addr; 14019b3c35e0Sj_mayer target_ulong pd; 1402c227f099SAnthony Liguori ram_addr_t ram_addr; 1403c2f07f81Spbrook PhysPageDesc *p; 1404d720b93dSbellard 1405c2f07f81Spbrook addr = cpu_get_phys_page_debug(env, pc); 1406c2f07f81Spbrook p = phys_page_find(addr >> TARGET_PAGE_BITS); 1407c2f07f81Spbrook if (!p) { 1408c2f07f81Spbrook pd = IO_MEM_UNASSIGNED; 1409c2f07f81Spbrook } else { 1410c2f07f81Spbrook pd = p->phys_offset; 1411c2f07f81Spbrook } 1412c2f07f81Spbrook ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK); 1413706cd4b5Spbrook tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); 1414d720b93dSbellard } 1415c27004ecSbellard #endif 141694df27fdSPaul Brook #endif /* TARGET_HAS_ICE */ 1417d720b93dSbellard 1418c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY) 1419c527ee8fSPaul Brook void cpu_watchpoint_remove_all(CPUState *env, int mask) 1420c527ee8fSPaul Brook 1421c527ee8fSPaul Brook { 1422c527ee8fSPaul Brook } 1423c527ee8fSPaul Brook 1424c527ee8fSPaul Brook int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, 1425c527ee8fSPaul Brook int flags, CPUWatchpoint **watchpoint) 1426c527ee8fSPaul Brook { 1427c527ee8fSPaul Brook return -ENOSYS; 1428c527ee8fSPaul Brook } 1429c527ee8fSPaul Brook #else 14306658ffb8Spbrook /* Add a watchpoint. */ 1431a1d1bb31Saliguori int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, 1432a1d1bb31Saliguori int flags, CPUWatchpoint **watchpoint) 14336658ffb8Spbrook { 1434b4051334Saliguori target_ulong len_mask = ~(len - 1); 1435c0ce998eSaliguori CPUWatchpoint *wp; 14366658ffb8Spbrook 1437b4051334Saliguori /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */ 1438b4051334Saliguori if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) { 1439b4051334Saliguori fprintf(stderr, "qemu: tried to set invalid watchpoint at " 1440b4051334Saliguori TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len); 1441b4051334Saliguori return -EINVAL; 1442b4051334Saliguori } 14437267c094SAnthony Liguori wp = g_malloc(sizeof(*wp)); 14446658ffb8Spbrook 1445a1d1bb31Saliguori wp->vaddr = addr; 1446b4051334Saliguori wp->len_mask = len_mask; 1447a1d1bb31Saliguori wp->flags = flags; 1448a1d1bb31Saliguori 14492dc9f411Saliguori /* keep all GDB-injected watchpoints in front */ 1450c0ce998eSaliguori if (flags & BP_GDB) 145172cf2d4fSBlue Swirl QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry); 1452c0ce998eSaliguori else 145372cf2d4fSBlue Swirl QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry); 1454a1d1bb31Saliguori 14556658ffb8Spbrook tlb_flush_page(env, addr); 1456a1d1bb31Saliguori 1457a1d1bb31Saliguori if (watchpoint) 1458a1d1bb31Saliguori *watchpoint = wp; 1459a1d1bb31Saliguori return 0; 14606658ffb8Spbrook } 14616658ffb8Spbrook 1462a1d1bb31Saliguori /* Remove a specific watchpoint. */ 1463a1d1bb31Saliguori int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len, 1464a1d1bb31Saliguori int flags) 14656658ffb8Spbrook { 1466b4051334Saliguori target_ulong len_mask = ~(len - 1); 1467a1d1bb31Saliguori CPUWatchpoint *wp; 14686658ffb8Spbrook 146972cf2d4fSBlue Swirl QTAILQ_FOREACH(wp, &env->watchpoints, entry) { 1470b4051334Saliguori if (addr == wp->vaddr && len_mask == wp->len_mask 14716e140f28Saliguori && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { 1472a1d1bb31Saliguori cpu_watchpoint_remove_by_ref(env, wp); 14736658ffb8Spbrook return 0; 14746658ffb8Spbrook } 14756658ffb8Spbrook } 1476a1d1bb31Saliguori return -ENOENT; 14776658ffb8Spbrook } 14786658ffb8Spbrook 1479a1d1bb31Saliguori /* Remove a specific watchpoint by reference. */ 1480a1d1bb31Saliguori void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint) 1481a1d1bb31Saliguori { 148272cf2d4fSBlue Swirl QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); 14837d03f82fSedgar_igl 1484a1d1bb31Saliguori tlb_flush_page(env, watchpoint->vaddr); 1485a1d1bb31Saliguori 14867267c094SAnthony Liguori g_free(watchpoint); 14877d03f82fSedgar_igl } 14887d03f82fSedgar_igl 1489a1d1bb31Saliguori /* Remove all matching watchpoints. */ 1490a1d1bb31Saliguori void cpu_watchpoint_remove_all(CPUState *env, int mask) 1491a1d1bb31Saliguori { 1492c0ce998eSaliguori CPUWatchpoint *wp, *next; 1493a1d1bb31Saliguori 149472cf2d4fSBlue Swirl QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) { 1495a1d1bb31Saliguori if (wp->flags & mask) 1496a1d1bb31Saliguori cpu_watchpoint_remove_by_ref(env, wp); 1497a1d1bb31Saliguori } 1498c0ce998eSaliguori } 1499c527ee8fSPaul Brook #endif 1500a1d1bb31Saliguori 1501a1d1bb31Saliguori /* Add a breakpoint. */ 1502a1d1bb31Saliguori int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, 1503a1d1bb31Saliguori CPUBreakpoint **breakpoint) 15044c3a88a2Sbellard { 15051fddef4bSbellard #if defined(TARGET_HAS_ICE) 1506c0ce998eSaliguori CPUBreakpoint *bp; 15074c3a88a2Sbellard 15087267c094SAnthony Liguori bp = g_malloc(sizeof(*bp)); 15094c3a88a2Sbellard 1510a1d1bb31Saliguori bp->pc = pc; 1511a1d1bb31Saliguori bp->flags = flags; 1512a1d1bb31Saliguori 15132dc9f411Saliguori /* keep all GDB-injected breakpoints in front */ 1514c0ce998eSaliguori if (flags & BP_GDB) 151572cf2d4fSBlue Swirl QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry); 1516c0ce998eSaliguori else 151772cf2d4fSBlue Swirl QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry); 1518d720b93dSbellard 1519d720b93dSbellard breakpoint_invalidate(env, pc); 1520a1d1bb31Saliguori 1521a1d1bb31Saliguori if (breakpoint) 1522a1d1bb31Saliguori *breakpoint = bp; 15234c3a88a2Sbellard return 0; 15244c3a88a2Sbellard #else 1525a1d1bb31Saliguori return -ENOSYS; 15264c3a88a2Sbellard #endif 15274c3a88a2Sbellard } 15284c3a88a2Sbellard 1529a1d1bb31Saliguori /* Remove a specific breakpoint. */ 1530a1d1bb31Saliguori int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags) 1531a1d1bb31Saliguori { 15327d03f82fSedgar_igl #if defined(TARGET_HAS_ICE) 1533a1d1bb31Saliguori CPUBreakpoint *bp; 1534a1d1bb31Saliguori 153572cf2d4fSBlue Swirl QTAILQ_FOREACH(bp, &env->breakpoints, entry) { 1536a1d1bb31Saliguori if (bp->pc == pc && bp->flags == flags) { 1537a1d1bb31Saliguori cpu_breakpoint_remove_by_ref(env, bp); 1538a1d1bb31Saliguori return 0; 15397d03f82fSedgar_igl } 1540a1d1bb31Saliguori } 1541a1d1bb31Saliguori return -ENOENT; 1542a1d1bb31Saliguori #else 1543a1d1bb31Saliguori return -ENOSYS; 15447d03f82fSedgar_igl #endif 15457d03f82fSedgar_igl } 15467d03f82fSedgar_igl 1547a1d1bb31Saliguori /* Remove a specific breakpoint by reference. */ 1548a1d1bb31Saliguori void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint) 15494c3a88a2Sbellard { 15501fddef4bSbellard #if defined(TARGET_HAS_ICE) 155172cf2d4fSBlue Swirl QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); 1552d720b93dSbellard 1553a1d1bb31Saliguori breakpoint_invalidate(env, breakpoint->pc); 1554a1d1bb31Saliguori 15557267c094SAnthony Liguori g_free(breakpoint); 1556a1d1bb31Saliguori #endif 1557a1d1bb31Saliguori } 1558a1d1bb31Saliguori 1559a1d1bb31Saliguori /* Remove all matching breakpoints. */ 1560a1d1bb31Saliguori void cpu_breakpoint_remove_all(CPUState *env, int mask) 1561a1d1bb31Saliguori { 1562a1d1bb31Saliguori #if defined(TARGET_HAS_ICE) 1563c0ce998eSaliguori CPUBreakpoint *bp, *next; 1564a1d1bb31Saliguori 156572cf2d4fSBlue Swirl QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) { 1566a1d1bb31Saliguori if (bp->flags & mask) 1567a1d1bb31Saliguori cpu_breakpoint_remove_by_ref(env, bp); 1568c0ce998eSaliguori } 15694c3a88a2Sbellard #endif 15704c3a88a2Sbellard } 15714c3a88a2Sbellard 1572c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the 1573c33a346eSbellard CPU loop after each instruction */ 1574c33a346eSbellard void cpu_single_step(CPUState *env, int enabled) 1575c33a346eSbellard { 15761fddef4bSbellard #if defined(TARGET_HAS_ICE) 1577c33a346eSbellard if (env->singlestep_enabled != enabled) { 1578c33a346eSbellard env->singlestep_enabled = enabled; 1579e22a25c9Saliguori if (kvm_enabled()) 1580e22a25c9Saliguori kvm_update_guest_debug(env, 0); 1581e22a25c9Saliguori else { 1582ccbb4d44SStuart Brady /* must flush all the translated code to avoid inconsistencies */ 15839fa3e853Sbellard /* XXX: only flush what is necessary */ 15840124311eSbellard tb_flush(env); 1585c33a346eSbellard } 1586e22a25c9Saliguori } 1587c33a346eSbellard #endif 1588c33a346eSbellard } 1589c33a346eSbellard 159034865134Sbellard /* enable or disable low levels log */ 159134865134Sbellard void cpu_set_log(int log_flags) 159234865134Sbellard { 159334865134Sbellard loglevel = log_flags; 159434865134Sbellard if (loglevel && !logfile) { 159511fcfab4Spbrook logfile = fopen(logfilename, log_append ? "a" : "w"); 159634865134Sbellard if (!logfile) { 159734865134Sbellard perror(logfilename); 159834865134Sbellard _exit(1); 159934865134Sbellard } 16009fa3e853Sbellard #if !defined(CONFIG_SOFTMMU) 16019fa3e853Sbellard /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ 16029fa3e853Sbellard { 1603b55266b5Sblueswir1 static char logfile_buf[4096]; 16049fa3e853Sbellard setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); 16059fa3e853Sbellard } 1606daf767b1SStefan Weil #elif defined(_WIN32) 1607daf767b1SStefan Weil /* Win32 doesn't support line-buffering, so use unbuffered output. */ 1608daf767b1SStefan Weil setvbuf(logfile, NULL, _IONBF, 0); 1609daf767b1SStefan Weil #else 161034865134Sbellard setvbuf(logfile, NULL, _IOLBF, 0); 16119fa3e853Sbellard #endif 1612e735b91cSpbrook log_append = 1; 1613e735b91cSpbrook } 1614e735b91cSpbrook if (!loglevel && logfile) { 1615e735b91cSpbrook fclose(logfile); 1616e735b91cSpbrook logfile = NULL; 161734865134Sbellard } 161834865134Sbellard } 161934865134Sbellard 162034865134Sbellard void cpu_set_log_filename(const char *filename) 162134865134Sbellard { 162234865134Sbellard logfilename = strdup(filename); 1623e735b91cSpbrook if (logfile) { 1624e735b91cSpbrook fclose(logfile); 1625e735b91cSpbrook logfile = NULL; 1626e735b91cSpbrook } 1627e735b91cSpbrook cpu_set_log(loglevel); 162834865134Sbellard } 1629c33a346eSbellard 16303098dba0Saurel32 static void cpu_unlink_tb(CPUState *env) 1631ea041c0eSbellard { 1632d5975363Spbrook /* FIXME: TB unchaining isn't SMP safe. For now just ignore the 1633d5975363Spbrook problem and hope the cpu will stop of its own accord. For userspace 1634d5975363Spbrook emulation this often isn't actually as bad as it sounds. Often 1635d5975363Spbrook signals are used primarily to interrupt blocking syscalls. */ 16363098dba0Saurel32 TranslationBlock *tb; 1637c227f099SAnthony Liguori static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; 16383098dba0Saurel32 1639cab1b4bdSRiku Voipio spin_lock(&interrupt_lock); 16403098dba0Saurel32 tb = env->current_tb; 16413098dba0Saurel32 /* if the cpu is currently executing code, we must unlink it and 16423098dba0Saurel32 all the potentially executing TB */ 1643f76cfe56SRiku Voipio if (tb) { 16443098dba0Saurel32 env->current_tb = NULL; 16453098dba0Saurel32 tb_reset_jump_recursive(tb); 16463098dba0Saurel32 } 1647cab1b4bdSRiku Voipio spin_unlock(&interrupt_lock); 16483098dba0Saurel32 } 16493098dba0Saurel32 165097ffbd8dSJan Kiszka #ifndef CONFIG_USER_ONLY 16513098dba0Saurel32 /* mask must never be zero, except for A20 change call */ 1652ec6959d0SJan Kiszka static void tcg_handle_interrupt(CPUState *env, int mask) 16533098dba0Saurel32 { 16543098dba0Saurel32 int old_mask; 16553098dba0Saurel32 16563098dba0Saurel32 old_mask = env->interrupt_request; 16573098dba0Saurel32 env->interrupt_request |= mask; 16583098dba0Saurel32 16598edac960Saliguori /* 16608edac960Saliguori * If called from iothread context, wake the target cpu in 16618edac960Saliguori * case its halted. 16628edac960Saliguori */ 1663b7680cb6SJan Kiszka if (!qemu_cpu_is_self(env)) { 16648edac960Saliguori qemu_cpu_kick(env); 16658edac960Saliguori return; 16668edac960Saliguori } 16678edac960Saliguori 16682e70f6efSpbrook if (use_icount) { 1669266910c4Spbrook env->icount_decr.u16.high = 0xffff; 16702e70f6efSpbrook if (!can_do_io(env) 1671be214e6cSaurel32 && (mask & ~old_mask) != 0) { 16722e70f6efSpbrook cpu_abort(env, "Raised interrupt while not in I/O function"); 16732e70f6efSpbrook } 16742e70f6efSpbrook } else { 16753098dba0Saurel32 cpu_unlink_tb(env); 1676ea041c0eSbellard } 16772e70f6efSpbrook } 1678ea041c0eSbellard 1679ec6959d0SJan Kiszka CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt; 1680ec6959d0SJan Kiszka 168197ffbd8dSJan Kiszka #else /* CONFIG_USER_ONLY */ 168297ffbd8dSJan Kiszka 168397ffbd8dSJan Kiszka void cpu_interrupt(CPUState *env, int mask) 168497ffbd8dSJan Kiszka { 168597ffbd8dSJan Kiszka env->interrupt_request |= mask; 168697ffbd8dSJan Kiszka cpu_unlink_tb(env); 168797ffbd8dSJan Kiszka } 168897ffbd8dSJan Kiszka #endif /* CONFIG_USER_ONLY */ 168997ffbd8dSJan Kiszka 1690b54ad049Sbellard void cpu_reset_interrupt(CPUState *env, int mask) 1691b54ad049Sbellard { 1692b54ad049Sbellard env->interrupt_request &= ~mask; 1693b54ad049Sbellard } 1694b54ad049Sbellard 16953098dba0Saurel32 void cpu_exit(CPUState *env) 16963098dba0Saurel32 { 16973098dba0Saurel32 env->exit_request = 1; 16983098dba0Saurel32 cpu_unlink_tb(env); 16993098dba0Saurel32 } 17003098dba0Saurel32 1701c7cd6a37Sblueswir1 const CPULogItem cpu_log_items[] = { 1702f193c797Sbellard { CPU_LOG_TB_OUT_ASM, "out_asm", 1703f193c797Sbellard "show generated host assembly code for each compiled TB" }, 1704f193c797Sbellard { CPU_LOG_TB_IN_ASM, "in_asm", 1705f193c797Sbellard "show target assembly code for each compiled TB" }, 1706f193c797Sbellard { CPU_LOG_TB_OP, "op", 170757fec1feSbellard "show micro ops for each compiled TB" }, 1708f193c797Sbellard { CPU_LOG_TB_OP_OPT, "op_opt", 1709e01a1157Sblueswir1 "show micro ops " 1710e01a1157Sblueswir1 #ifdef TARGET_I386 1711e01a1157Sblueswir1 "before eflags optimization and " 1712f193c797Sbellard #endif 1713e01a1157Sblueswir1 "after liveness analysis" }, 1714f193c797Sbellard { CPU_LOG_INT, "int", 1715f193c797Sbellard "show interrupts/exceptions in short format" }, 1716f193c797Sbellard { CPU_LOG_EXEC, "exec", 1717f193c797Sbellard "show trace before each executed TB (lots of logs)" }, 17189fddaa0cSbellard { CPU_LOG_TB_CPU, "cpu", 1719e91c8a77Sths "show CPU state before block translation" }, 1720f193c797Sbellard #ifdef TARGET_I386 1721f193c797Sbellard { CPU_LOG_PCALL, "pcall", 1722f193c797Sbellard "show protected mode far calls/returns/exceptions" }, 1723eca1bdf4Saliguori { CPU_LOG_RESET, "cpu_reset", 1724eca1bdf4Saliguori "show CPU state before CPU resets" }, 1725f193c797Sbellard #endif 17268e3a9fd2Sbellard #ifdef DEBUG_IOPORT 1727fd872598Sbellard { CPU_LOG_IOPORT, "ioport", 1728fd872598Sbellard "show all i/o ports accesses" }, 17298e3a9fd2Sbellard #endif 1730f193c797Sbellard { 0, NULL, NULL }, 1731f193c797Sbellard }; 1732f193c797Sbellard 1733f6f3fbcaSMichael S. Tsirkin #ifndef CONFIG_USER_ONLY 1734f6f3fbcaSMichael S. Tsirkin static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list 1735f6f3fbcaSMichael S. Tsirkin = QLIST_HEAD_INITIALIZER(memory_client_list); 1736f6f3fbcaSMichael S. Tsirkin 1737f6f3fbcaSMichael S. Tsirkin static void cpu_notify_set_memory(target_phys_addr_t start_addr, 1738f6f3fbcaSMichael S. Tsirkin ram_addr_t size, 17390fd542fbSMichael S. Tsirkin ram_addr_t phys_offset, 17400fd542fbSMichael S. Tsirkin bool log_dirty) 1741f6f3fbcaSMichael S. Tsirkin { 1742f6f3fbcaSMichael S. Tsirkin CPUPhysMemoryClient *client; 1743f6f3fbcaSMichael S. Tsirkin QLIST_FOREACH(client, &memory_client_list, list) { 17440fd542fbSMichael S. Tsirkin client->set_memory(client, start_addr, size, phys_offset, log_dirty); 1745f6f3fbcaSMichael S. Tsirkin } 1746f6f3fbcaSMichael S. Tsirkin } 1747f6f3fbcaSMichael S. Tsirkin 1748f6f3fbcaSMichael S. Tsirkin static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start, 1749f6f3fbcaSMichael S. Tsirkin target_phys_addr_t end) 1750f6f3fbcaSMichael S. Tsirkin { 1751f6f3fbcaSMichael S. Tsirkin CPUPhysMemoryClient *client; 1752f6f3fbcaSMichael S. Tsirkin QLIST_FOREACH(client, &memory_client_list, list) { 1753f6f3fbcaSMichael S. Tsirkin int r = client->sync_dirty_bitmap(client, start, end); 1754f6f3fbcaSMichael S. Tsirkin if (r < 0) 1755f6f3fbcaSMichael S. Tsirkin return r; 1756f6f3fbcaSMichael S. Tsirkin } 1757f6f3fbcaSMichael S. Tsirkin return 0; 1758f6f3fbcaSMichael S. Tsirkin } 1759f6f3fbcaSMichael S. Tsirkin 1760f6f3fbcaSMichael S. Tsirkin static int cpu_notify_migration_log(int enable) 1761f6f3fbcaSMichael S. Tsirkin { 1762f6f3fbcaSMichael S. Tsirkin CPUPhysMemoryClient *client; 1763f6f3fbcaSMichael S. Tsirkin QLIST_FOREACH(client, &memory_client_list, list) { 1764f6f3fbcaSMichael S. Tsirkin int r = client->migration_log(client, enable); 1765f6f3fbcaSMichael S. Tsirkin if (r < 0) 1766f6f3fbcaSMichael S. Tsirkin return r; 1767f6f3fbcaSMichael S. Tsirkin } 1768f6f3fbcaSMichael S. Tsirkin return 0; 1769f6f3fbcaSMichael S. Tsirkin } 1770f6f3fbcaSMichael S. Tsirkin 17712173a75fSAlex Williamson struct last_map { 17722173a75fSAlex Williamson target_phys_addr_t start_addr; 17732173a75fSAlex Williamson ram_addr_t size; 17742173a75fSAlex Williamson ram_addr_t phys_offset; 17752173a75fSAlex Williamson }; 17762173a75fSAlex Williamson 17778d4c78e7SAlex Williamson /* The l1_phys_map provides the upper P_L1_BITs of the guest physical 17788d4c78e7SAlex Williamson * address. Each intermediate table provides the next L2_BITs of guest 17798d4c78e7SAlex Williamson * physical address space. The number of levels vary based on host and 17808d4c78e7SAlex Williamson * guest configuration, making it efficient to build the final guest 17818d4c78e7SAlex Williamson * physical address by seeding the L1 offset and shifting and adding in 17828d4c78e7SAlex Williamson * each L2 offset as we recurse through them. */ 17832173a75fSAlex Williamson static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level, 17842173a75fSAlex Williamson void **lp, target_phys_addr_t addr, 17852173a75fSAlex Williamson struct last_map *map) 1786f6f3fbcaSMichael S. Tsirkin { 17875cd2c5b6SRichard Henderson int i; 1788f6f3fbcaSMichael S. Tsirkin 17895cd2c5b6SRichard Henderson if (*lp == NULL) { 17905cd2c5b6SRichard Henderson return; 1791f6f3fbcaSMichael S. Tsirkin } 17925cd2c5b6SRichard Henderson if (level == 0) { 17935cd2c5b6SRichard Henderson PhysPageDesc *pd = *lp; 17948d4c78e7SAlex Williamson addr <<= L2_BITS + TARGET_PAGE_BITS; 17957296abacSPaul Brook for (i = 0; i < L2_SIZE; ++i) { 17965cd2c5b6SRichard Henderson if (pd[i].phys_offset != IO_MEM_UNASSIGNED) { 17972173a75fSAlex Williamson target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS; 17982173a75fSAlex Williamson 17992173a75fSAlex Williamson if (map->size && 18002173a75fSAlex Williamson start_addr == map->start_addr + map->size && 18012173a75fSAlex Williamson pd[i].phys_offset == map->phys_offset + map->size) { 18022173a75fSAlex Williamson 18032173a75fSAlex Williamson map->size += TARGET_PAGE_SIZE; 18042173a75fSAlex Williamson continue; 18052173a75fSAlex Williamson } else if (map->size) { 18062173a75fSAlex Williamson client->set_memory(client, map->start_addr, 18072173a75fSAlex Williamson map->size, map->phys_offset, false); 18082173a75fSAlex Williamson } 18092173a75fSAlex Williamson 18102173a75fSAlex Williamson map->start_addr = start_addr; 18112173a75fSAlex Williamson map->size = TARGET_PAGE_SIZE; 18122173a75fSAlex Williamson map->phys_offset = pd[i].phys_offset; 1813f6f3fbcaSMichael S. Tsirkin } 18145cd2c5b6SRichard Henderson } 18155cd2c5b6SRichard Henderson } else { 18165cd2c5b6SRichard Henderson void **pp = *lp; 18177296abacSPaul Brook for (i = 0; i < L2_SIZE; ++i) { 18188d4c78e7SAlex Williamson phys_page_for_each_1(client, level - 1, pp + i, 18192173a75fSAlex Williamson (addr << L2_BITS) | i, map); 1820f6f3fbcaSMichael S. Tsirkin } 1821f6f3fbcaSMichael S. Tsirkin } 1822f6f3fbcaSMichael S. Tsirkin } 1823f6f3fbcaSMichael S. Tsirkin 1824f6f3fbcaSMichael S. Tsirkin static void phys_page_for_each(CPUPhysMemoryClient *client) 1825f6f3fbcaSMichael S. Tsirkin { 18265cd2c5b6SRichard Henderson int i; 18272173a75fSAlex Williamson struct last_map map = { }; 18282173a75fSAlex Williamson 18295cd2c5b6SRichard Henderson for (i = 0; i < P_L1_SIZE; ++i) { 18305cd2c5b6SRichard Henderson phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1, 18312173a75fSAlex Williamson l1_phys_map + i, i, &map); 18322173a75fSAlex Williamson } 18332173a75fSAlex Williamson if (map.size) { 18342173a75fSAlex Williamson client->set_memory(client, map.start_addr, map.size, map.phys_offset, 18352173a75fSAlex Williamson false); 1836f6f3fbcaSMichael S. Tsirkin } 1837f6f3fbcaSMichael S. Tsirkin } 1838f6f3fbcaSMichael S. Tsirkin 1839f6f3fbcaSMichael S. Tsirkin void cpu_register_phys_memory_client(CPUPhysMemoryClient *client) 1840f6f3fbcaSMichael S. Tsirkin { 1841f6f3fbcaSMichael S. Tsirkin QLIST_INSERT_HEAD(&memory_client_list, client, list); 1842f6f3fbcaSMichael S. Tsirkin phys_page_for_each(client); 1843f6f3fbcaSMichael S. Tsirkin } 1844f6f3fbcaSMichael S. Tsirkin 1845f6f3fbcaSMichael S. Tsirkin void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client) 1846f6f3fbcaSMichael S. Tsirkin { 1847f6f3fbcaSMichael S. Tsirkin QLIST_REMOVE(client, list); 1848f6f3fbcaSMichael S. Tsirkin } 1849f6f3fbcaSMichael S. Tsirkin #endif 1850f6f3fbcaSMichael S. Tsirkin 1851f193c797Sbellard static int cmp1(const char *s1, int n, const char *s2) 1852f193c797Sbellard { 1853f193c797Sbellard if (strlen(s2) != n) 1854f193c797Sbellard return 0; 1855f193c797Sbellard return memcmp(s1, s2, n) == 0; 1856f193c797Sbellard } 1857f193c797Sbellard 1858f193c797Sbellard /* takes a comma separated list of log masks. Return 0 if error. */ 1859f193c797Sbellard int cpu_str_to_log_mask(const char *str) 1860f193c797Sbellard { 1861c7cd6a37Sblueswir1 const CPULogItem *item; 1862f193c797Sbellard int mask; 1863f193c797Sbellard const char *p, *p1; 1864f193c797Sbellard 1865f193c797Sbellard p = str; 1866f193c797Sbellard mask = 0; 1867f193c797Sbellard for(;;) { 1868f193c797Sbellard p1 = strchr(p, ','); 1869f193c797Sbellard if (!p1) 1870f193c797Sbellard p1 = p + strlen(p); 18718e3a9fd2Sbellard if(cmp1(p,p1-p,"all")) { 18728e3a9fd2Sbellard for(item = cpu_log_items; item->mask != 0; item++) { 18738e3a9fd2Sbellard mask |= item->mask; 18748e3a9fd2Sbellard } 18758e3a9fd2Sbellard } else { 1876f193c797Sbellard for(item = cpu_log_items; item->mask != 0; item++) { 1877f193c797Sbellard if (cmp1(p, p1 - p, item->name)) 1878f193c797Sbellard goto found; 1879f193c797Sbellard } 1880f193c797Sbellard return 0; 18818e3a9fd2Sbellard } 1882f193c797Sbellard found: 1883f193c797Sbellard mask |= item->mask; 1884f193c797Sbellard if (*p1 != ',') 1885f193c797Sbellard break; 1886f193c797Sbellard p = p1 + 1; 1887f193c797Sbellard } 1888f193c797Sbellard return mask; 1889f193c797Sbellard } 1890ea041c0eSbellard 18917501267eSbellard void cpu_abort(CPUState *env, const char *fmt, ...) 18927501267eSbellard { 18937501267eSbellard va_list ap; 1894493ae1f0Spbrook va_list ap2; 18957501267eSbellard 18967501267eSbellard va_start(ap, fmt); 1897493ae1f0Spbrook va_copy(ap2, ap); 18987501267eSbellard fprintf(stderr, "qemu: fatal: "); 18997501267eSbellard vfprintf(stderr, fmt, ap); 19007501267eSbellard fprintf(stderr, "\n"); 19017501267eSbellard #ifdef TARGET_I386 19027fe48483Sbellard cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP); 19037fe48483Sbellard #else 19047fe48483Sbellard cpu_dump_state(env, stderr, fprintf, 0); 19057501267eSbellard #endif 190693fcfe39Saliguori if (qemu_log_enabled()) { 190793fcfe39Saliguori qemu_log("qemu: fatal: "); 190893fcfe39Saliguori qemu_log_vprintf(fmt, ap2); 190993fcfe39Saliguori qemu_log("\n"); 1910f9373291Sj_mayer #ifdef TARGET_I386 191193fcfe39Saliguori log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP); 1912f9373291Sj_mayer #else 191393fcfe39Saliguori log_cpu_state(env, 0); 1914f9373291Sj_mayer #endif 191531b1a7b4Saliguori qemu_log_flush(); 191693fcfe39Saliguori qemu_log_close(); 1917924edcaeSbalrog } 1918493ae1f0Spbrook va_end(ap2); 1919f9373291Sj_mayer va_end(ap); 1920fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY) 1921fd052bf6SRiku Voipio { 1922fd052bf6SRiku Voipio struct sigaction act; 1923fd052bf6SRiku Voipio sigfillset(&act.sa_mask); 1924fd052bf6SRiku Voipio act.sa_handler = SIG_DFL; 1925fd052bf6SRiku Voipio sigaction(SIGABRT, &act, NULL); 1926fd052bf6SRiku Voipio } 1927fd052bf6SRiku Voipio #endif 19287501267eSbellard abort(); 19297501267eSbellard } 19307501267eSbellard 1931c5be9f08Sths CPUState *cpu_copy(CPUState *env) 1932c5be9f08Sths { 193301ba9816Sths CPUState *new_env = cpu_init(env->cpu_model_str); 1934c5be9f08Sths CPUState *next_cpu = new_env->next_cpu; 1935c5be9f08Sths int cpu_index = new_env->cpu_index; 19365a38f081Saliguori #if defined(TARGET_HAS_ICE) 19375a38f081Saliguori CPUBreakpoint *bp; 19385a38f081Saliguori CPUWatchpoint *wp; 19395a38f081Saliguori #endif 19405a38f081Saliguori 1941c5be9f08Sths memcpy(new_env, env, sizeof(CPUState)); 19425a38f081Saliguori 19435a38f081Saliguori /* Preserve chaining and index. */ 1944c5be9f08Sths new_env->next_cpu = next_cpu; 1945c5be9f08Sths new_env->cpu_index = cpu_index; 19465a38f081Saliguori 19475a38f081Saliguori /* Clone all break/watchpoints. 19485a38f081Saliguori Note: Once we support ptrace with hw-debug register access, make sure 19495a38f081Saliguori BP_CPU break/watchpoints are handled correctly on clone. */ 195072cf2d4fSBlue Swirl QTAILQ_INIT(&env->breakpoints); 195172cf2d4fSBlue Swirl QTAILQ_INIT(&env->watchpoints); 19525a38f081Saliguori #if defined(TARGET_HAS_ICE) 195372cf2d4fSBlue Swirl QTAILQ_FOREACH(bp, &env->breakpoints, entry) { 19545a38f081Saliguori cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL); 19555a38f081Saliguori } 195672cf2d4fSBlue Swirl QTAILQ_FOREACH(wp, &env->watchpoints, entry) { 19575a38f081Saliguori cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1, 19585a38f081Saliguori wp->flags, NULL); 19595a38f081Saliguori } 19605a38f081Saliguori #endif 19615a38f081Saliguori 1962c5be9f08Sths return new_env; 1963c5be9f08Sths } 1964c5be9f08Sths 19650124311eSbellard #if !defined(CONFIG_USER_ONLY) 19660124311eSbellard 19675c751e99Sedgar_igl static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr) 19685c751e99Sedgar_igl { 19695c751e99Sedgar_igl unsigned int i; 19705c751e99Sedgar_igl 19715c751e99Sedgar_igl /* Discard jump cache entries for any tb which might potentially 19725c751e99Sedgar_igl overlap the flushed page. */ 19735c751e99Sedgar_igl i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); 19745c751e99Sedgar_igl memset (&env->tb_jmp_cache[i], 0, 19755c751e99Sedgar_igl TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); 19765c751e99Sedgar_igl 19775c751e99Sedgar_igl i = tb_jmp_cache_hash_page(addr); 19785c751e99Sedgar_igl memset (&env->tb_jmp_cache[i], 0, 19795c751e99Sedgar_igl TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); 19805c751e99Sedgar_igl } 19815c751e99Sedgar_igl 198208738984SIgor Kovalenko static CPUTLBEntry s_cputlb_empty_entry = { 198308738984SIgor Kovalenko .addr_read = -1, 198408738984SIgor Kovalenko .addr_write = -1, 198508738984SIgor Kovalenko .addr_code = -1, 198608738984SIgor Kovalenko .addend = -1, 198708738984SIgor Kovalenko }; 198808738984SIgor Kovalenko 1989ee8b7021Sbellard /* NOTE: if flush_global is true, also flush global entries (not 1990ee8b7021Sbellard implemented yet) */ 1991ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global) 199233417e70Sbellard { 199333417e70Sbellard int i; 19940124311eSbellard 19959fa3e853Sbellard #if defined(DEBUG_TLB) 19969fa3e853Sbellard printf("tlb_flush:\n"); 19979fa3e853Sbellard #endif 19980124311eSbellard /* must reset current TB so that interrupts cannot modify the 19990124311eSbellard links while we are modifying them */ 20000124311eSbellard env->current_tb = NULL; 20010124311eSbellard 200233417e70Sbellard for(i = 0; i < CPU_TLB_SIZE; i++) { 2003cfde4bd9SIsaku Yamahata int mmu_idx; 2004cfde4bd9SIsaku Yamahata for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 200508738984SIgor Kovalenko env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry; 2006cfde4bd9SIsaku Yamahata } 200733417e70Sbellard } 20089fa3e853Sbellard 20098a40a180Sbellard memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); 20109fa3e853Sbellard 2011d4c430a8SPaul Brook env->tlb_flush_addr = -1; 2012d4c430a8SPaul Brook env->tlb_flush_mask = 0; 2013e3db7226Sbellard tlb_flush_count++; 201433417e70Sbellard } 201533417e70Sbellard 2016274da6b2Sbellard static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) 201761382a50Sbellard { 201884b7b8e7Sbellard if (addr == (tlb_entry->addr_read & 201984b7b8e7Sbellard (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || 202084b7b8e7Sbellard addr == (tlb_entry->addr_write & 202184b7b8e7Sbellard (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || 202284b7b8e7Sbellard addr == (tlb_entry->addr_code & 202384b7b8e7Sbellard (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { 202408738984SIgor Kovalenko *tlb_entry = s_cputlb_empty_entry; 202584b7b8e7Sbellard } 202661382a50Sbellard } 202761382a50Sbellard 20282e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr) 202933417e70Sbellard { 20308a40a180Sbellard int i; 2031cfde4bd9SIsaku Yamahata int mmu_idx; 20320124311eSbellard 20339fa3e853Sbellard #if defined(DEBUG_TLB) 2034108c49b8Sbellard printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); 20359fa3e853Sbellard #endif 2036d4c430a8SPaul Brook /* Check if we need to flush due to large pages. */ 2037d4c430a8SPaul Brook if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { 2038d4c430a8SPaul Brook #if defined(DEBUG_TLB) 2039d4c430a8SPaul Brook printf("tlb_flush_page: forced full flush (" 2040d4c430a8SPaul Brook TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 2041d4c430a8SPaul Brook env->tlb_flush_addr, env->tlb_flush_mask); 2042d4c430a8SPaul Brook #endif 2043d4c430a8SPaul Brook tlb_flush(env, 1); 2044d4c430a8SPaul Brook return; 2045d4c430a8SPaul Brook } 20460124311eSbellard /* must reset current TB so that interrupts cannot modify the 20470124311eSbellard links while we are modifying them */ 20480124311eSbellard env->current_tb = NULL; 204933417e70Sbellard 205061382a50Sbellard addr &= TARGET_PAGE_MASK; 205133417e70Sbellard i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 2052cfde4bd9SIsaku Yamahata for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) 2053cfde4bd9SIsaku Yamahata tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); 20540124311eSbellard 20555c751e99Sedgar_igl tlb_flush_jmp_cache(env, addr); 20569fa3e853Sbellard } 20579fa3e853Sbellard 20589fa3e853Sbellard /* update the TLBs so that writes to code in the virtual page 'addr' 20599fa3e853Sbellard can be detected */ 2060c227f099SAnthony Liguori static void tlb_protect_code(ram_addr_t ram_addr) 206161382a50Sbellard { 20626a00d601Sbellard cpu_physical_memory_reset_dirty(ram_addr, 20636a00d601Sbellard ram_addr + TARGET_PAGE_SIZE, 20646a00d601Sbellard CODE_DIRTY_FLAG); 20659fa3e853Sbellard } 20669fa3e853Sbellard 20679fa3e853Sbellard /* update the TLB so that writes in physical page 'phys_addr' are no longer 20683a7d929eSbellard tested for self modifying code */ 2069c227f099SAnthony Liguori static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 20703a7d929eSbellard target_ulong vaddr) 20719fa3e853Sbellard { 2072f7c11b53SYoshiaki Tamura cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG); 20739fa3e853Sbellard } 20749fa3e853Sbellard 20751ccde1cbSbellard static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 20761ccde1cbSbellard unsigned long start, unsigned long length) 20771ccde1cbSbellard { 20781ccde1cbSbellard unsigned long addr; 207984b7b8e7Sbellard if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { 208084b7b8e7Sbellard addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; 20811ccde1cbSbellard if ((addr - start) < length) { 20820f459d16Spbrook tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY; 20831ccde1cbSbellard } 20841ccde1cbSbellard } 20851ccde1cbSbellard } 20861ccde1cbSbellard 20875579c7f3Spbrook /* Note: start and end must be within the same ram block. */ 2088c227f099SAnthony Liguori void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, 20890a962c02Sbellard int dirty_flags) 20901ccde1cbSbellard { 20911ccde1cbSbellard CPUState *env; 20924f2ac237Sbellard unsigned long length, start1; 2093f7c11b53SYoshiaki Tamura int i; 20941ccde1cbSbellard 20951ccde1cbSbellard start &= TARGET_PAGE_MASK; 20961ccde1cbSbellard end = TARGET_PAGE_ALIGN(end); 20971ccde1cbSbellard 20981ccde1cbSbellard length = end - start; 20991ccde1cbSbellard if (length == 0) 21001ccde1cbSbellard return; 2101f7c11b53SYoshiaki Tamura cpu_physical_memory_mask_dirty_range(start, length, dirty_flags); 2102f23db169Sbellard 21031ccde1cbSbellard /* we modify the TLB cache so that the dirty bit will be set again 21041ccde1cbSbellard when accessing the range */ 2105b2e0a138SMichael S. Tsirkin start1 = (unsigned long)qemu_safe_ram_ptr(start); 2106a57d23e4SStefan Weil /* Check that we don't span multiple blocks - this breaks the 21075579c7f3Spbrook address comparisons below. */ 2108b2e0a138SMichael S. Tsirkin if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1 21095579c7f3Spbrook != (end - 1) - start) { 21105579c7f3Spbrook abort(); 21115579c7f3Spbrook } 21125579c7f3Spbrook 21136a00d601Sbellard for(env = first_cpu; env != NULL; env = env->next_cpu) { 2114cfde4bd9SIsaku Yamahata int mmu_idx; 2115cfde4bd9SIsaku Yamahata for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 21161ccde1cbSbellard for(i = 0; i < CPU_TLB_SIZE; i++) 2117cfde4bd9SIsaku Yamahata tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], 2118cfde4bd9SIsaku Yamahata start1, length); 2119cfde4bd9SIsaku Yamahata } 21206a00d601Sbellard } 21211ccde1cbSbellard } 21221ccde1cbSbellard 212374576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable) 212474576198Saliguori { 2125f6f3fbcaSMichael S. Tsirkin int ret = 0; 212674576198Saliguori in_migration = enable; 2127f6f3fbcaSMichael S. Tsirkin ret = cpu_notify_migration_log(!!enable); 2128f6f3fbcaSMichael S. Tsirkin return ret; 212974576198Saliguori } 213074576198Saliguori 213174576198Saliguori int cpu_physical_memory_get_dirty_tracking(void) 213274576198Saliguori { 213374576198Saliguori return in_migration; 213474576198Saliguori } 213574576198Saliguori 2136c227f099SAnthony Liguori int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, 2137c227f099SAnthony Liguori target_phys_addr_t end_addr) 21382bec46dcSaliguori { 21397b8f3b78SMichael S. Tsirkin int ret; 2140151f7749SJan Kiszka 2141f6f3fbcaSMichael S. Tsirkin ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr); 2142151f7749SJan Kiszka return ret; 21432bec46dcSaliguori } 21442bec46dcSaliguori 2145e5896b12SAnthony PERARD int cpu_physical_log_start(target_phys_addr_t start_addr, 2146e5896b12SAnthony PERARD ram_addr_t size) 2147e5896b12SAnthony PERARD { 2148e5896b12SAnthony PERARD CPUPhysMemoryClient *client; 2149e5896b12SAnthony PERARD QLIST_FOREACH(client, &memory_client_list, list) { 2150e5896b12SAnthony PERARD if (client->log_start) { 2151e5896b12SAnthony PERARD int r = client->log_start(client, start_addr, size); 2152e5896b12SAnthony PERARD if (r < 0) { 2153e5896b12SAnthony PERARD return r; 2154e5896b12SAnthony PERARD } 2155e5896b12SAnthony PERARD } 2156e5896b12SAnthony PERARD } 2157e5896b12SAnthony PERARD return 0; 2158e5896b12SAnthony PERARD } 2159e5896b12SAnthony PERARD 2160e5896b12SAnthony PERARD int cpu_physical_log_stop(target_phys_addr_t start_addr, 2161e5896b12SAnthony PERARD ram_addr_t size) 2162e5896b12SAnthony PERARD { 2163e5896b12SAnthony PERARD CPUPhysMemoryClient *client; 2164e5896b12SAnthony PERARD QLIST_FOREACH(client, &memory_client_list, list) { 2165e5896b12SAnthony PERARD if (client->log_stop) { 2166e5896b12SAnthony PERARD int r = client->log_stop(client, start_addr, size); 2167e5896b12SAnthony PERARD if (r < 0) { 2168e5896b12SAnthony PERARD return r; 2169e5896b12SAnthony PERARD } 2170e5896b12SAnthony PERARD } 2171e5896b12SAnthony PERARD } 2172e5896b12SAnthony PERARD return 0; 2173e5896b12SAnthony PERARD } 2174e5896b12SAnthony PERARD 21753a7d929eSbellard static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) 21763a7d929eSbellard { 2177c227f099SAnthony Liguori ram_addr_t ram_addr; 21785579c7f3Spbrook void *p; 21793a7d929eSbellard 218084b7b8e7Sbellard if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { 21815579c7f3Spbrook p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK) 21825579c7f3Spbrook + tlb_entry->addend); 2183e890261fSMarcelo Tosatti ram_addr = qemu_ram_addr_from_host_nofail(p); 21843a7d929eSbellard if (!cpu_physical_memory_is_dirty(ram_addr)) { 21850f459d16Spbrook tlb_entry->addr_write |= TLB_NOTDIRTY; 21863a7d929eSbellard } 21873a7d929eSbellard } 21883a7d929eSbellard } 21893a7d929eSbellard 21903a7d929eSbellard /* update the TLB according to the current state of the dirty bits */ 21913a7d929eSbellard void cpu_tlb_update_dirty(CPUState *env) 21923a7d929eSbellard { 21933a7d929eSbellard int i; 2194cfde4bd9SIsaku Yamahata int mmu_idx; 2195cfde4bd9SIsaku Yamahata for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 21963a7d929eSbellard for(i = 0; i < CPU_TLB_SIZE; i++) 2197cfde4bd9SIsaku Yamahata tlb_update_dirty(&env->tlb_table[mmu_idx][i]); 2198cfde4bd9SIsaku Yamahata } 21993a7d929eSbellard } 22003a7d929eSbellard 22010f459d16Spbrook static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) 22021ccde1cbSbellard { 22030f459d16Spbrook if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) 22040f459d16Spbrook tlb_entry->addr_write = vaddr; 22051ccde1cbSbellard } 22061ccde1cbSbellard 22070f459d16Spbrook /* update the TLB corresponding to virtual page vaddr 22080f459d16Spbrook so that it is no longer dirty */ 22090f459d16Spbrook static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr) 22101ccde1cbSbellard { 22111ccde1cbSbellard int i; 2212cfde4bd9SIsaku Yamahata int mmu_idx; 22131ccde1cbSbellard 22140f459d16Spbrook vaddr &= TARGET_PAGE_MASK; 22151ccde1cbSbellard i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 2216cfde4bd9SIsaku Yamahata for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) 2217cfde4bd9SIsaku Yamahata tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); 22181ccde1cbSbellard } 22191ccde1cbSbellard 2220d4c430a8SPaul Brook /* Our TLB does not support large pages, so remember the area covered by 2221d4c430a8SPaul Brook large pages and trigger a full TLB flush if these are invalidated. */ 2222d4c430a8SPaul Brook static void tlb_add_large_page(CPUState *env, target_ulong vaddr, 2223d4c430a8SPaul Brook target_ulong size) 2224d4c430a8SPaul Brook { 2225d4c430a8SPaul Brook target_ulong mask = ~(size - 1); 2226d4c430a8SPaul Brook 2227d4c430a8SPaul Brook if (env->tlb_flush_addr == (target_ulong)-1) { 2228d4c430a8SPaul Brook env->tlb_flush_addr = vaddr & mask; 2229d4c430a8SPaul Brook env->tlb_flush_mask = mask; 2230d4c430a8SPaul Brook return; 2231d4c430a8SPaul Brook } 2232d4c430a8SPaul Brook /* Extend the existing region to include the new page. 2233d4c430a8SPaul Brook This is a compromise between unnecessary flushes and the cost 2234d4c430a8SPaul Brook of maintaining a full variable size TLB. */ 2235d4c430a8SPaul Brook mask &= env->tlb_flush_mask; 2236d4c430a8SPaul Brook while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { 2237d4c430a8SPaul Brook mask <<= 1; 2238d4c430a8SPaul Brook } 2239d4c430a8SPaul Brook env->tlb_flush_addr &= mask; 2240d4c430a8SPaul Brook env->tlb_flush_mask = mask; 2241d4c430a8SPaul Brook } 2242d4c430a8SPaul Brook 2243d4c430a8SPaul Brook /* Add a new TLB entry. At most one entry for a given virtual address 2244d4c430a8SPaul Brook is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 2245d4c430a8SPaul Brook supplied size is only used by tlb_flush_page. */ 2246d4c430a8SPaul Brook void tlb_set_page(CPUState *env, target_ulong vaddr, 2247c227f099SAnthony Liguori target_phys_addr_t paddr, int prot, 2248d4c430a8SPaul Brook int mmu_idx, target_ulong size) 22499fa3e853Sbellard { 225092e873b9Sbellard PhysPageDesc *p; 22514f2ac237Sbellard unsigned long pd; 22529fa3e853Sbellard unsigned int index; 22534f2ac237Sbellard target_ulong address; 22540f459d16Spbrook target_ulong code_address; 2255355b1943SPaul Brook unsigned long addend; 225684b7b8e7Sbellard CPUTLBEntry *te; 2257a1d1bb31Saliguori CPUWatchpoint *wp; 2258c227f099SAnthony Liguori target_phys_addr_t iotlb; 22599fa3e853Sbellard 2260d4c430a8SPaul Brook assert(size >= TARGET_PAGE_SIZE); 2261d4c430a8SPaul Brook if (size != TARGET_PAGE_SIZE) { 2262d4c430a8SPaul Brook tlb_add_large_page(env, vaddr, size); 2263d4c430a8SPaul Brook } 226492e873b9Sbellard p = phys_page_find(paddr >> TARGET_PAGE_BITS); 22659fa3e853Sbellard if (!p) { 22669fa3e853Sbellard pd = IO_MEM_UNASSIGNED; 22679fa3e853Sbellard } else { 22689fa3e853Sbellard pd = p->phys_offset; 22699fa3e853Sbellard } 22709fa3e853Sbellard #if defined(DEBUG_TLB) 22717fd3f494SStefan Weil printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 22727fd3f494SStefan Weil " prot=%x idx=%d pd=0x%08lx\n", 22737fd3f494SStefan Weil vaddr, paddr, prot, mmu_idx, pd); 22749fa3e853Sbellard #endif 22759fa3e853Sbellard 22769fa3e853Sbellard address = vaddr; 22770f459d16Spbrook if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { 22780f459d16Spbrook /* IO memory case (romd handled later) */ 22790f459d16Spbrook address |= TLB_MMIO; 22800f459d16Spbrook } 22815579c7f3Spbrook addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK); 22820f459d16Spbrook if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) { 22830f459d16Spbrook /* Normal RAM. */ 22840f459d16Spbrook iotlb = pd & TARGET_PAGE_MASK; 22850f459d16Spbrook if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM) 22860f459d16Spbrook iotlb |= IO_MEM_NOTDIRTY; 22870f459d16Spbrook else 22880f459d16Spbrook iotlb |= IO_MEM_ROM; 22890f459d16Spbrook } else { 2290ccbb4d44SStuart Brady /* IO handlers are currently passed a physical address. 22910f459d16Spbrook It would be nice to pass an offset from the base address 22920f459d16Spbrook of that region. This would avoid having to special case RAM, 22930f459d16Spbrook and avoid full address decoding in every device. 22940f459d16Spbrook We can't use the high bits of pd for this because 22950f459d16Spbrook IO_MEM_ROMD uses these as a ram address. */ 22968da3ff18Spbrook iotlb = (pd & ~TARGET_PAGE_MASK); 22978da3ff18Spbrook if (p) { 22988da3ff18Spbrook iotlb += p->region_offset; 22998da3ff18Spbrook } else { 23008da3ff18Spbrook iotlb += paddr; 23018da3ff18Spbrook } 23029fa3e853Sbellard } 23039fa3e853Sbellard 23040f459d16Spbrook code_address = address; 23056658ffb8Spbrook /* Make accesses to pages with watchpoints go via the 23066658ffb8Spbrook watchpoint trap routines. */ 230772cf2d4fSBlue Swirl QTAILQ_FOREACH(wp, &env->watchpoints, entry) { 2308a1d1bb31Saliguori if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { 2309bf298f83SJun Koi /* Avoid trapping reads of pages with a write breakpoint. */ 2310bf298f83SJun Koi if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { 23110f459d16Spbrook iotlb = io_mem_watch + paddr; 23120f459d16Spbrook address |= TLB_MMIO; 2313bf298f83SJun Koi break; 2314bf298f83SJun Koi } 23156658ffb8Spbrook } 23166658ffb8Spbrook } 23176658ffb8Spbrook 231890f18422Sbellard index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 23190f459d16Spbrook env->iotlb[mmu_idx][index] = iotlb - vaddr; 23206ebbf390Sj_mayer te = &env->tlb_table[mmu_idx][index]; 23210f459d16Spbrook te->addend = addend - vaddr; 232267b915a5Sbellard if (prot & PAGE_READ) { 232384b7b8e7Sbellard te->addr_read = address; 23249fa3e853Sbellard } else { 232584b7b8e7Sbellard te->addr_read = -1; 232684b7b8e7Sbellard } 23275c751e99Sedgar_igl 232884b7b8e7Sbellard if (prot & PAGE_EXEC) { 23290f459d16Spbrook te->addr_code = code_address; 233084b7b8e7Sbellard } else { 233184b7b8e7Sbellard te->addr_code = -1; 23329fa3e853Sbellard } 233367b915a5Sbellard if (prot & PAGE_WRITE) { 2334856074ecSbellard if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 2335856074ecSbellard (pd & IO_MEM_ROMD)) { 23360f459d16Spbrook /* Write access calls the I/O callback. */ 23370f459d16Spbrook te->addr_write = address | TLB_MMIO; 23383a7d929eSbellard } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 23391ccde1cbSbellard !cpu_physical_memory_is_dirty(pd)) { 23400f459d16Spbrook te->addr_write = address | TLB_NOTDIRTY; 23419fa3e853Sbellard } else { 234284b7b8e7Sbellard te->addr_write = address; 23439fa3e853Sbellard } 23449fa3e853Sbellard } else { 234584b7b8e7Sbellard te->addr_write = -1; 23469fa3e853Sbellard } 23479fa3e853Sbellard } 23489fa3e853Sbellard 23490124311eSbellard #else 23500124311eSbellard 2351ee8b7021Sbellard void tlb_flush(CPUState *env, int flush_global) 23520124311eSbellard { 23530124311eSbellard } 23540124311eSbellard 23552e12669aSbellard void tlb_flush_page(CPUState *env, target_ulong addr) 23560124311eSbellard { 23570124311eSbellard } 23580124311eSbellard 2359edf8e2afSMika Westerberg /* 2360edf8e2afSMika Westerberg * Walks guest process memory "regions" one by one 2361edf8e2afSMika Westerberg * and calls callback function 'fn' for each region. 2362edf8e2afSMika Westerberg */ 23635cd2c5b6SRichard Henderson 23645cd2c5b6SRichard Henderson struct walk_memory_regions_data 236533417e70Sbellard { 23665cd2c5b6SRichard Henderson walk_memory_regions_fn fn; 23675cd2c5b6SRichard Henderson void *priv; 23685cd2c5b6SRichard Henderson unsigned long start; 23695cd2c5b6SRichard Henderson int prot; 23705cd2c5b6SRichard Henderson }; 23719fa3e853Sbellard 23725cd2c5b6SRichard Henderson static int walk_memory_regions_end(struct walk_memory_regions_data *data, 2373b480d9b7SPaul Brook abi_ulong end, int new_prot) 23745cd2c5b6SRichard Henderson { 23755cd2c5b6SRichard Henderson if (data->start != -1ul) { 23765cd2c5b6SRichard Henderson int rc = data->fn(data->priv, data->start, end, data->prot); 23775cd2c5b6SRichard Henderson if (rc != 0) { 23785cd2c5b6SRichard Henderson return rc; 23795cd2c5b6SRichard Henderson } 23805cd2c5b6SRichard Henderson } 2381edf8e2afSMika Westerberg 23825cd2c5b6SRichard Henderson data->start = (new_prot ? end : -1ul); 23835cd2c5b6SRichard Henderson data->prot = new_prot; 23845cd2c5b6SRichard Henderson 23855cd2c5b6SRichard Henderson return 0; 238633417e70Sbellard } 23875cd2c5b6SRichard Henderson 23885cd2c5b6SRichard Henderson static int walk_memory_regions_1(struct walk_memory_regions_data *data, 2389b480d9b7SPaul Brook abi_ulong base, int level, void **lp) 23905cd2c5b6SRichard Henderson { 2391b480d9b7SPaul Brook abi_ulong pa; 23925cd2c5b6SRichard Henderson int i, rc; 23935cd2c5b6SRichard Henderson 23945cd2c5b6SRichard Henderson if (*lp == NULL) { 23955cd2c5b6SRichard Henderson return walk_memory_regions_end(data, base, 0); 23969fa3e853Sbellard } 23975cd2c5b6SRichard Henderson 23985cd2c5b6SRichard Henderson if (level == 0) { 23995cd2c5b6SRichard Henderson PageDesc *pd = *lp; 24007296abacSPaul Brook for (i = 0; i < L2_SIZE; ++i) { 24015cd2c5b6SRichard Henderson int prot = pd[i].flags; 24025cd2c5b6SRichard Henderson 24035cd2c5b6SRichard Henderson pa = base | (i << TARGET_PAGE_BITS); 24045cd2c5b6SRichard Henderson if (prot != data->prot) { 24055cd2c5b6SRichard Henderson rc = walk_memory_regions_end(data, pa, prot); 24065cd2c5b6SRichard Henderson if (rc != 0) { 24075cd2c5b6SRichard Henderson return rc; 24089fa3e853Sbellard } 24099fa3e853Sbellard } 24105cd2c5b6SRichard Henderson } 24115cd2c5b6SRichard Henderson } else { 24125cd2c5b6SRichard Henderson void **pp = *lp; 24137296abacSPaul Brook for (i = 0; i < L2_SIZE; ++i) { 2414b480d9b7SPaul Brook pa = base | ((abi_ulong)i << 2415b480d9b7SPaul Brook (TARGET_PAGE_BITS + L2_BITS * level)); 24165cd2c5b6SRichard Henderson rc = walk_memory_regions_1(data, pa, level - 1, pp + i); 24175cd2c5b6SRichard Henderson if (rc != 0) { 24185cd2c5b6SRichard Henderson return rc; 24195cd2c5b6SRichard Henderson } 24205cd2c5b6SRichard Henderson } 24215cd2c5b6SRichard Henderson } 24225cd2c5b6SRichard Henderson 24235cd2c5b6SRichard Henderson return 0; 24245cd2c5b6SRichard Henderson } 24255cd2c5b6SRichard Henderson 24265cd2c5b6SRichard Henderson int walk_memory_regions(void *priv, walk_memory_regions_fn fn) 24275cd2c5b6SRichard Henderson { 24285cd2c5b6SRichard Henderson struct walk_memory_regions_data data; 24295cd2c5b6SRichard Henderson unsigned long i; 24305cd2c5b6SRichard Henderson 24315cd2c5b6SRichard Henderson data.fn = fn; 24325cd2c5b6SRichard Henderson data.priv = priv; 24335cd2c5b6SRichard Henderson data.start = -1ul; 24345cd2c5b6SRichard Henderson data.prot = 0; 24355cd2c5b6SRichard Henderson 24365cd2c5b6SRichard Henderson for (i = 0; i < V_L1_SIZE; i++) { 2437b480d9b7SPaul Brook int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT, 24385cd2c5b6SRichard Henderson V_L1_SHIFT / L2_BITS - 1, l1_map + i); 24395cd2c5b6SRichard Henderson if (rc != 0) { 24405cd2c5b6SRichard Henderson return rc; 24415cd2c5b6SRichard Henderson } 24425cd2c5b6SRichard Henderson } 24435cd2c5b6SRichard Henderson 24445cd2c5b6SRichard Henderson return walk_memory_regions_end(&data, 0, 0); 2445edf8e2afSMika Westerberg } 2446edf8e2afSMika Westerberg 2447b480d9b7SPaul Brook static int dump_region(void *priv, abi_ulong start, 2448b480d9b7SPaul Brook abi_ulong end, unsigned long prot) 2449edf8e2afSMika Westerberg { 2450edf8e2afSMika Westerberg FILE *f = (FILE *)priv; 2451edf8e2afSMika Westerberg 2452b480d9b7SPaul Brook (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx 2453b480d9b7SPaul Brook " "TARGET_ABI_FMT_lx" %c%c%c\n", 2454edf8e2afSMika Westerberg start, end, end - start, 2455edf8e2afSMika Westerberg ((prot & PAGE_READ) ? 'r' : '-'), 2456edf8e2afSMika Westerberg ((prot & PAGE_WRITE) ? 'w' : '-'), 2457edf8e2afSMika Westerberg ((prot & PAGE_EXEC) ? 'x' : '-')); 2458edf8e2afSMika Westerberg 2459edf8e2afSMika Westerberg return (0); 2460edf8e2afSMika Westerberg } 2461edf8e2afSMika Westerberg 2462edf8e2afSMika Westerberg /* dump memory mappings */ 2463edf8e2afSMika Westerberg void page_dump(FILE *f) 2464edf8e2afSMika Westerberg { 2465edf8e2afSMika Westerberg (void) fprintf(f, "%-8s %-8s %-8s %s\n", 2466edf8e2afSMika Westerberg "start", "end", "size", "prot"); 2467edf8e2afSMika Westerberg walk_memory_regions(f, dump_region); 24689fa3e853Sbellard } 24699fa3e853Sbellard 247053a5960aSpbrook int page_get_flags(target_ulong address) 24719fa3e853Sbellard { 24729fa3e853Sbellard PageDesc *p; 24739fa3e853Sbellard 24749fa3e853Sbellard p = page_find(address >> TARGET_PAGE_BITS); 24759fa3e853Sbellard if (!p) 24769fa3e853Sbellard return 0; 24779fa3e853Sbellard return p->flags; 24789fa3e853Sbellard } 24799fa3e853Sbellard 2480376a7909SRichard Henderson /* Modify the flags of a page and invalidate the code if necessary. 2481376a7909SRichard Henderson The flag PAGE_WRITE_ORG is positioned automatically depending 2482376a7909SRichard Henderson on PAGE_WRITE. The mmap_lock should already be held. */ 248353a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags) 24849fa3e853Sbellard { 2485376a7909SRichard Henderson target_ulong addr, len; 24869fa3e853Sbellard 2487376a7909SRichard Henderson /* This function should never be called with addresses outside the 2488376a7909SRichard Henderson guest address space. If this assert fires, it probably indicates 2489376a7909SRichard Henderson a missing call to h2g_valid. */ 2490b480d9b7SPaul Brook #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2491b480d9b7SPaul Brook assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2492376a7909SRichard Henderson #endif 2493376a7909SRichard Henderson assert(start < end); 2494376a7909SRichard Henderson 24959fa3e853Sbellard start = start & TARGET_PAGE_MASK; 24969fa3e853Sbellard end = TARGET_PAGE_ALIGN(end); 2497376a7909SRichard Henderson 2498376a7909SRichard Henderson if (flags & PAGE_WRITE) { 24999fa3e853Sbellard flags |= PAGE_WRITE_ORG; 2500376a7909SRichard Henderson } 2501376a7909SRichard Henderson 2502376a7909SRichard Henderson for (addr = start, len = end - start; 2503376a7909SRichard Henderson len != 0; 2504376a7909SRichard Henderson len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2505376a7909SRichard Henderson PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2506376a7909SRichard Henderson 2507376a7909SRichard Henderson /* If the write protection bit is set, then we invalidate 2508376a7909SRichard Henderson the code inside. */ 25099fa3e853Sbellard if (!(p->flags & PAGE_WRITE) && 25109fa3e853Sbellard (flags & PAGE_WRITE) && 25119fa3e853Sbellard p->first_tb) { 2512d720b93dSbellard tb_invalidate_phys_page(addr, 0, NULL); 25139fa3e853Sbellard } 25149fa3e853Sbellard p->flags = flags; 25159fa3e853Sbellard } 25169fa3e853Sbellard } 25179fa3e853Sbellard 25183d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags) 25193d97b40bSths { 25203d97b40bSths PageDesc *p; 25213d97b40bSths target_ulong end; 25223d97b40bSths target_ulong addr; 25233d97b40bSths 2524376a7909SRichard Henderson /* This function should never be called with addresses outside the 2525376a7909SRichard Henderson guest address space. If this assert fires, it probably indicates 2526376a7909SRichard Henderson a missing call to h2g_valid. */ 2527338e9e6cSBlue Swirl #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2528338e9e6cSBlue Swirl assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2529376a7909SRichard Henderson #endif 2530376a7909SRichard Henderson 25313e0650a9SRichard Henderson if (len == 0) { 25323e0650a9SRichard Henderson return 0; 25333e0650a9SRichard Henderson } 2534376a7909SRichard Henderson if (start + len - 1 < start) { 2535376a7909SRichard Henderson /* We've wrapped around. */ 253655f280c9Sbalrog return -1; 2537376a7909SRichard Henderson } 253855f280c9Sbalrog 25393d97b40bSths end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */ 25403d97b40bSths start = start & TARGET_PAGE_MASK; 25413d97b40bSths 2542376a7909SRichard Henderson for (addr = start, len = end - start; 2543376a7909SRichard Henderson len != 0; 2544376a7909SRichard Henderson len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 25453d97b40bSths p = page_find(addr >> TARGET_PAGE_BITS); 25463d97b40bSths if( !p ) 25473d97b40bSths return -1; 25483d97b40bSths if( !(p->flags & PAGE_VALID) ) 25493d97b40bSths return -1; 25503d97b40bSths 2551dae3270cSbellard if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) 25523d97b40bSths return -1; 2553dae3270cSbellard if (flags & PAGE_WRITE) { 2554dae3270cSbellard if (!(p->flags & PAGE_WRITE_ORG)) 25553d97b40bSths return -1; 2556dae3270cSbellard /* unprotect the page if it was put read-only because it 2557dae3270cSbellard contains translated code */ 2558dae3270cSbellard if (!(p->flags & PAGE_WRITE)) { 2559dae3270cSbellard if (!page_unprotect(addr, 0, NULL)) 2560dae3270cSbellard return -1; 2561dae3270cSbellard } 2562dae3270cSbellard return 0; 2563dae3270cSbellard } 25643d97b40bSths } 25653d97b40bSths return 0; 25663d97b40bSths } 25673d97b40bSths 25689fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the 2569ccbb4d44SStuart Brady page. Return TRUE if the fault was successfully handled. */ 257053a5960aSpbrook int page_unprotect(target_ulong address, unsigned long pc, void *puc) 25719fa3e853Sbellard { 257245d679d6SAurelien Jarno unsigned int prot; 257345d679d6SAurelien Jarno PageDesc *p; 257453a5960aSpbrook target_ulong host_start, host_end, addr; 25759fa3e853Sbellard 2576c8a706feSpbrook /* Technically this isn't safe inside a signal handler. However we 2577c8a706feSpbrook know this only ever happens in a synchronous SEGV handler, so in 2578c8a706feSpbrook practice it seems to be ok. */ 2579c8a706feSpbrook mmap_lock(); 2580c8a706feSpbrook 258145d679d6SAurelien Jarno p = page_find(address >> TARGET_PAGE_BITS); 258245d679d6SAurelien Jarno if (!p) { 2583c8a706feSpbrook mmap_unlock(); 25849fa3e853Sbellard return 0; 2585c8a706feSpbrook } 258645d679d6SAurelien Jarno 25879fa3e853Sbellard /* if the page was really writable, then we change its 25889fa3e853Sbellard protection back to writable */ 258945d679d6SAurelien Jarno if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { 259045d679d6SAurelien Jarno host_start = address & qemu_host_page_mask; 259145d679d6SAurelien Jarno host_end = host_start + qemu_host_page_size; 259245d679d6SAurelien Jarno 259345d679d6SAurelien Jarno prot = 0; 259445d679d6SAurelien Jarno for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { 259545d679d6SAurelien Jarno p = page_find(addr >> TARGET_PAGE_BITS); 259645d679d6SAurelien Jarno p->flags |= PAGE_WRITE; 259745d679d6SAurelien Jarno prot |= p->flags; 259845d679d6SAurelien Jarno 25999fa3e853Sbellard /* and since the content will be modified, we must invalidate 26009fa3e853Sbellard the corresponding translated code. */ 260145d679d6SAurelien Jarno tb_invalidate_phys_page(addr, pc, puc); 26029fa3e853Sbellard #ifdef DEBUG_TB_CHECK 260345d679d6SAurelien Jarno tb_invalidate_check(addr); 26049fa3e853Sbellard #endif 260545d679d6SAurelien Jarno } 260645d679d6SAurelien Jarno mprotect((void *)g2h(host_start), qemu_host_page_size, 260745d679d6SAurelien Jarno prot & PAGE_BITS); 260845d679d6SAurelien Jarno 2609c8a706feSpbrook mmap_unlock(); 26109fa3e853Sbellard return 1; 26119fa3e853Sbellard } 2612c8a706feSpbrook mmap_unlock(); 26139fa3e853Sbellard return 0; 26149fa3e853Sbellard } 26159fa3e853Sbellard 26166a00d601Sbellard static inline void tlb_set_dirty(CPUState *env, 26176a00d601Sbellard unsigned long addr, target_ulong vaddr) 26181ccde1cbSbellard { 26191ccde1cbSbellard } 26209fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */ 262133417e70Sbellard 2622e2eef170Spbrook #if !defined(CONFIG_USER_ONLY) 26238da3ff18Spbrook 2624c04b2b78SPaul Brook #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) 2625c04b2b78SPaul Brook typedef struct subpage_t { 2626c04b2b78SPaul Brook target_phys_addr_t base; 2627f6405247SRichard Henderson ram_addr_t sub_io_index[TARGET_PAGE_SIZE]; 2628f6405247SRichard Henderson ram_addr_t region_offset[TARGET_PAGE_SIZE]; 2629c04b2b78SPaul Brook } subpage_t; 2630c04b2b78SPaul Brook 2631c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 2632c227f099SAnthony Liguori ram_addr_t memory, ram_addr_t region_offset); 2633f6405247SRichard Henderson static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 2634f6405247SRichard Henderson ram_addr_t orig_memory, 2635f6405247SRichard Henderson ram_addr_t region_offset); 2636db7b5426Sblueswir1 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \ 2637db7b5426Sblueswir1 need_subpage) \ 2638db7b5426Sblueswir1 do { \ 2639db7b5426Sblueswir1 if (addr > start_addr) \ 2640db7b5426Sblueswir1 start_addr2 = 0; \ 2641db7b5426Sblueswir1 else { \ 2642db7b5426Sblueswir1 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \ 2643db7b5426Sblueswir1 if (start_addr2 > 0) \ 2644db7b5426Sblueswir1 need_subpage = 1; \ 2645db7b5426Sblueswir1 } \ 2646db7b5426Sblueswir1 \ 264749e9fba2Sblueswir1 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \ 2648db7b5426Sblueswir1 end_addr2 = TARGET_PAGE_SIZE - 1; \ 2649db7b5426Sblueswir1 else { \ 2650db7b5426Sblueswir1 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \ 2651db7b5426Sblueswir1 if (end_addr2 < TARGET_PAGE_SIZE - 1) \ 2652db7b5426Sblueswir1 need_subpage = 1; \ 2653db7b5426Sblueswir1 } \ 2654db7b5426Sblueswir1 } while (0) 2655db7b5426Sblueswir1 26568f2498f9SMichael S. Tsirkin /* register physical memory. 26578f2498f9SMichael S. Tsirkin For RAM, 'size' must be a multiple of the target page size. 26588f2498f9SMichael S. Tsirkin If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an 26598da3ff18Spbrook io memory page. The address used when calling the IO function is 26608da3ff18Spbrook the offset from the start of the region, plus region_offset. Both 2661ccbb4d44SStuart Brady start_addr and region_offset are rounded down to a page boundary 26628da3ff18Spbrook before calculating this offset. This should not be a problem unless 26638da3ff18Spbrook the low bits of start_addr and region_offset differ. */ 26640fd542fbSMichael S. Tsirkin void cpu_register_physical_memory_log(target_phys_addr_t start_addr, 2665c227f099SAnthony Liguori ram_addr_t size, 2666c227f099SAnthony Liguori ram_addr_t phys_offset, 26670fd542fbSMichael S. Tsirkin ram_addr_t region_offset, 26680fd542fbSMichael S. Tsirkin bool log_dirty) 266933417e70Sbellard { 2670c227f099SAnthony Liguori target_phys_addr_t addr, end_addr; 267192e873b9Sbellard PhysPageDesc *p; 26729d42037bSbellard CPUState *env; 2673c227f099SAnthony Liguori ram_addr_t orig_size = size; 2674f6405247SRichard Henderson subpage_t *subpage; 267533417e70Sbellard 26763b8e6a2dSEdgar E. Iglesias assert(size); 26770fd542fbSMichael S. Tsirkin cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty); 2678f6f3fbcaSMichael S. Tsirkin 267967c4d23cSpbrook if (phys_offset == IO_MEM_UNASSIGNED) { 268067c4d23cSpbrook region_offset = start_addr; 268167c4d23cSpbrook } 26828da3ff18Spbrook region_offset &= TARGET_PAGE_MASK; 26835fd386f6Sbellard size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; 2684c227f099SAnthony Liguori end_addr = start_addr + (target_phys_addr_t)size; 26853b8e6a2dSEdgar E. Iglesias 26863b8e6a2dSEdgar E. Iglesias addr = start_addr; 26873b8e6a2dSEdgar E. Iglesias do { 2688db7b5426Sblueswir1 p = phys_page_find(addr >> TARGET_PAGE_BITS); 2689db7b5426Sblueswir1 if (p && p->phys_offset != IO_MEM_UNASSIGNED) { 2690c227f099SAnthony Liguori ram_addr_t orig_memory = p->phys_offset; 2691c227f099SAnthony Liguori target_phys_addr_t start_addr2, end_addr2; 2692db7b5426Sblueswir1 int need_subpage = 0; 2693db7b5426Sblueswir1 2694db7b5426Sblueswir1 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, 2695db7b5426Sblueswir1 need_subpage); 2696f6405247SRichard Henderson if (need_subpage) { 2697db7b5426Sblueswir1 if (!(orig_memory & IO_MEM_SUBPAGE)) { 2698db7b5426Sblueswir1 subpage = subpage_init((addr & TARGET_PAGE_MASK), 26998da3ff18Spbrook &p->phys_offset, orig_memory, 27008da3ff18Spbrook p->region_offset); 2701db7b5426Sblueswir1 } else { 2702db7b5426Sblueswir1 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK) 2703db7b5426Sblueswir1 >> IO_MEM_SHIFT]; 2704db7b5426Sblueswir1 } 27058da3ff18Spbrook subpage_register(subpage, start_addr2, end_addr2, phys_offset, 27068da3ff18Spbrook region_offset); 27078da3ff18Spbrook p->region_offset = 0; 2708db7b5426Sblueswir1 } else { 2709db7b5426Sblueswir1 p->phys_offset = phys_offset; 2710db7b5426Sblueswir1 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || 2711db7b5426Sblueswir1 (phys_offset & IO_MEM_ROMD)) 2712db7b5426Sblueswir1 phys_offset += TARGET_PAGE_SIZE; 2713db7b5426Sblueswir1 } 2714db7b5426Sblueswir1 } else { 2715108c49b8Sbellard p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 27169fa3e853Sbellard p->phys_offset = phys_offset; 27178da3ff18Spbrook p->region_offset = region_offset; 27182a4188a3Sbellard if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || 27198da3ff18Spbrook (phys_offset & IO_MEM_ROMD)) { 272033417e70Sbellard phys_offset += TARGET_PAGE_SIZE; 27218da3ff18Spbrook } else { 2722c227f099SAnthony Liguori target_phys_addr_t start_addr2, end_addr2; 2723db7b5426Sblueswir1 int need_subpage = 0; 2724db7b5426Sblueswir1 2725db7b5426Sblueswir1 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, 2726db7b5426Sblueswir1 end_addr2, need_subpage); 2727db7b5426Sblueswir1 2728f6405247SRichard Henderson if (need_subpage) { 2729db7b5426Sblueswir1 subpage = subpage_init((addr & TARGET_PAGE_MASK), 27308da3ff18Spbrook &p->phys_offset, IO_MEM_UNASSIGNED, 273167c4d23cSpbrook addr & TARGET_PAGE_MASK); 2732db7b5426Sblueswir1 subpage_register(subpage, start_addr2, end_addr2, 27338da3ff18Spbrook phys_offset, region_offset); 27348da3ff18Spbrook p->region_offset = 0; 2735db7b5426Sblueswir1 } 2736db7b5426Sblueswir1 } 2737db7b5426Sblueswir1 } 27388da3ff18Spbrook region_offset += TARGET_PAGE_SIZE; 27393b8e6a2dSEdgar E. Iglesias addr += TARGET_PAGE_SIZE; 27403b8e6a2dSEdgar E. Iglesias } while (addr != end_addr); 27419d42037bSbellard 27429d42037bSbellard /* since each CPU stores ram addresses in its TLB cache, we must 27439d42037bSbellard reset the modified entries */ 27449d42037bSbellard /* XXX: slow ! */ 27459d42037bSbellard for(env = first_cpu; env != NULL; env = env->next_cpu) { 27469d42037bSbellard tlb_flush(env, 1); 27479d42037bSbellard } 274833417e70Sbellard } 274933417e70Sbellard 2750ba863458Sbellard /* XXX: temporary until new memory mapping API */ 2751c227f099SAnthony Liguori ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr) 2752ba863458Sbellard { 2753ba863458Sbellard PhysPageDesc *p; 2754ba863458Sbellard 2755ba863458Sbellard p = phys_page_find(addr >> TARGET_PAGE_BITS); 2756ba863458Sbellard if (!p) 2757ba863458Sbellard return IO_MEM_UNASSIGNED; 2758ba863458Sbellard return p->phys_offset; 2759ba863458Sbellard } 2760ba863458Sbellard 2761c227f099SAnthony Liguori void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) 2762f65ed4c1Saliguori { 2763f65ed4c1Saliguori if (kvm_enabled()) 2764f65ed4c1Saliguori kvm_coalesce_mmio_region(addr, size); 2765f65ed4c1Saliguori } 2766f65ed4c1Saliguori 2767c227f099SAnthony Liguori void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) 2768f65ed4c1Saliguori { 2769f65ed4c1Saliguori if (kvm_enabled()) 2770f65ed4c1Saliguori kvm_uncoalesce_mmio_region(addr, size); 2771f65ed4c1Saliguori } 2772f65ed4c1Saliguori 277362a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void) 277462a2744cSSheng Yang { 277562a2744cSSheng Yang if (kvm_enabled()) 277662a2744cSSheng Yang kvm_flush_coalesced_mmio_buffer(); 277762a2744cSSheng Yang } 277862a2744cSSheng Yang 2779c902760fSMarcelo Tosatti #if defined(__linux__) && !defined(TARGET_S390X) 2780c902760fSMarcelo Tosatti 2781c902760fSMarcelo Tosatti #include <sys/vfs.h> 2782c902760fSMarcelo Tosatti 2783c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC 0x958458f6 2784c902760fSMarcelo Tosatti 2785c902760fSMarcelo Tosatti static long gethugepagesize(const char *path) 2786c902760fSMarcelo Tosatti { 2787c902760fSMarcelo Tosatti struct statfs fs; 2788c902760fSMarcelo Tosatti int ret; 2789c902760fSMarcelo Tosatti 2790c902760fSMarcelo Tosatti do { 2791c902760fSMarcelo Tosatti ret = statfs(path, &fs); 2792c902760fSMarcelo Tosatti } while (ret != 0 && errno == EINTR); 2793c902760fSMarcelo Tosatti 2794c902760fSMarcelo Tosatti if (ret != 0) { 27956adc0549SMichael Tokarev perror(path); 2796c902760fSMarcelo Tosatti return 0; 2797c902760fSMarcelo Tosatti } 2798c902760fSMarcelo Tosatti 2799c902760fSMarcelo Tosatti if (fs.f_type != HUGETLBFS_MAGIC) 2800c902760fSMarcelo Tosatti fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); 2801c902760fSMarcelo Tosatti 2802c902760fSMarcelo Tosatti return fs.f_bsize; 2803c902760fSMarcelo Tosatti } 2804c902760fSMarcelo Tosatti 280504b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block, 280604b16653SAlex Williamson ram_addr_t memory, 280704b16653SAlex Williamson const char *path) 2808c902760fSMarcelo Tosatti { 2809c902760fSMarcelo Tosatti char *filename; 2810c902760fSMarcelo Tosatti void *area; 2811c902760fSMarcelo Tosatti int fd; 2812c902760fSMarcelo Tosatti #ifdef MAP_POPULATE 2813c902760fSMarcelo Tosatti int flags; 2814c902760fSMarcelo Tosatti #endif 2815c902760fSMarcelo Tosatti unsigned long hpagesize; 2816c902760fSMarcelo Tosatti 2817c902760fSMarcelo Tosatti hpagesize = gethugepagesize(path); 2818c902760fSMarcelo Tosatti if (!hpagesize) { 2819c902760fSMarcelo Tosatti return NULL; 2820c902760fSMarcelo Tosatti } 2821c902760fSMarcelo Tosatti 2822c902760fSMarcelo Tosatti if (memory < hpagesize) { 2823c902760fSMarcelo Tosatti return NULL; 2824c902760fSMarcelo Tosatti } 2825c902760fSMarcelo Tosatti 2826c902760fSMarcelo Tosatti if (kvm_enabled() && !kvm_has_sync_mmu()) { 2827c902760fSMarcelo Tosatti fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n"); 2828c902760fSMarcelo Tosatti return NULL; 2829c902760fSMarcelo Tosatti } 2830c902760fSMarcelo Tosatti 2831c902760fSMarcelo Tosatti if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) { 2832c902760fSMarcelo Tosatti return NULL; 2833c902760fSMarcelo Tosatti } 2834c902760fSMarcelo Tosatti 2835c902760fSMarcelo Tosatti fd = mkstemp(filename); 2836c902760fSMarcelo Tosatti if (fd < 0) { 28376adc0549SMichael Tokarev perror("unable to create backing store for hugepages"); 2838c902760fSMarcelo Tosatti free(filename); 2839c902760fSMarcelo Tosatti return NULL; 2840c902760fSMarcelo Tosatti } 2841c902760fSMarcelo Tosatti unlink(filename); 2842c902760fSMarcelo Tosatti free(filename); 2843c902760fSMarcelo Tosatti 2844c902760fSMarcelo Tosatti memory = (memory+hpagesize-1) & ~(hpagesize-1); 2845c902760fSMarcelo Tosatti 2846c902760fSMarcelo Tosatti /* 2847c902760fSMarcelo Tosatti * ftruncate is not supported by hugetlbfs in older 2848c902760fSMarcelo Tosatti * hosts, so don't bother bailing out on errors. 2849c902760fSMarcelo Tosatti * If anything goes wrong with it under other filesystems, 2850c902760fSMarcelo Tosatti * mmap will fail. 2851c902760fSMarcelo Tosatti */ 2852c902760fSMarcelo Tosatti if (ftruncate(fd, memory)) 2853c902760fSMarcelo Tosatti perror("ftruncate"); 2854c902760fSMarcelo Tosatti 2855c902760fSMarcelo Tosatti #ifdef MAP_POPULATE 2856c902760fSMarcelo Tosatti /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case 2857c902760fSMarcelo Tosatti * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED 2858c902760fSMarcelo Tosatti * to sidestep this quirk. 2859c902760fSMarcelo Tosatti */ 2860c902760fSMarcelo Tosatti flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE; 2861c902760fSMarcelo Tosatti area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0); 2862c902760fSMarcelo Tosatti #else 2863c902760fSMarcelo Tosatti area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); 2864c902760fSMarcelo Tosatti #endif 2865c902760fSMarcelo Tosatti if (area == MAP_FAILED) { 2866c902760fSMarcelo Tosatti perror("file_ram_alloc: can't mmap RAM pages"); 2867c902760fSMarcelo Tosatti close(fd); 2868c902760fSMarcelo Tosatti return (NULL); 2869c902760fSMarcelo Tosatti } 287004b16653SAlex Williamson block->fd = fd; 2871c902760fSMarcelo Tosatti return area; 2872c902760fSMarcelo Tosatti } 2873c902760fSMarcelo Tosatti #endif 2874c902760fSMarcelo Tosatti 2875d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size) 2876d17b5288SAlex Williamson { 287704b16653SAlex Williamson RAMBlock *block, *next_block; 28783e837b2cSAlex Williamson ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; 287904b16653SAlex Williamson 288004b16653SAlex Williamson if (QLIST_EMPTY(&ram_list.blocks)) 288104b16653SAlex Williamson return 0; 288204b16653SAlex Williamson 288304b16653SAlex Williamson QLIST_FOREACH(block, &ram_list.blocks, next) { 2884f15fbc4bSAnthony PERARD ram_addr_t end, next = RAM_ADDR_MAX; 288504b16653SAlex Williamson 288604b16653SAlex Williamson end = block->offset + block->length; 288704b16653SAlex Williamson 288804b16653SAlex Williamson QLIST_FOREACH(next_block, &ram_list.blocks, next) { 288904b16653SAlex Williamson if (next_block->offset >= end) { 289004b16653SAlex Williamson next = MIN(next, next_block->offset); 289104b16653SAlex Williamson } 289204b16653SAlex Williamson } 289304b16653SAlex Williamson if (next - end >= size && next - end < mingap) { 289404b16653SAlex Williamson offset = end; 289504b16653SAlex Williamson mingap = next - end; 289604b16653SAlex Williamson } 289704b16653SAlex Williamson } 28983e837b2cSAlex Williamson 28993e837b2cSAlex Williamson if (offset == RAM_ADDR_MAX) { 29003e837b2cSAlex Williamson fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", 29013e837b2cSAlex Williamson (uint64_t)size); 29023e837b2cSAlex Williamson abort(); 29033e837b2cSAlex Williamson } 29043e837b2cSAlex Williamson 290504b16653SAlex Williamson return offset; 290604b16653SAlex Williamson } 290704b16653SAlex Williamson 290804b16653SAlex Williamson static ram_addr_t last_ram_offset(void) 290904b16653SAlex Williamson { 2910d17b5288SAlex Williamson RAMBlock *block; 2911d17b5288SAlex Williamson ram_addr_t last = 0; 2912d17b5288SAlex Williamson 2913d17b5288SAlex Williamson QLIST_FOREACH(block, &ram_list.blocks, next) 2914d17b5288SAlex Williamson last = MAX(last, block->offset + block->length); 2915d17b5288SAlex Williamson 2916d17b5288SAlex Williamson return last; 2917d17b5288SAlex Williamson } 2918d17b5288SAlex Williamson 291984b89d78SCam Macdonell ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, 292084b89d78SCam Macdonell ram_addr_t size, void *host) 292184b89d78SCam Macdonell { 292284b89d78SCam Macdonell RAMBlock *new_block, *block; 292384b89d78SCam Macdonell 292484b89d78SCam Macdonell size = TARGET_PAGE_ALIGN(size); 29257267c094SAnthony Liguori new_block = g_malloc0(sizeof(*new_block)); 292684b89d78SCam Macdonell 292784b89d78SCam Macdonell if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { 292884b89d78SCam Macdonell char *id = dev->parent_bus->info->get_dev_path(dev); 292984b89d78SCam Macdonell if (id) { 293084b89d78SCam Macdonell snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 29317267c094SAnthony Liguori g_free(id); 293284b89d78SCam Macdonell } 293384b89d78SCam Macdonell } 293484b89d78SCam Macdonell pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 293584b89d78SCam Macdonell 293684b89d78SCam Macdonell QLIST_FOREACH(block, &ram_list.blocks, next) { 293784b89d78SCam Macdonell if (!strcmp(block->idstr, new_block->idstr)) { 293884b89d78SCam Macdonell fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 293984b89d78SCam Macdonell new_block->idstr); 294084b89d78SCam Macdonell abort(); 294184b89d78SCam Macdonell } 294284b89d78SCam Macdonell } 294384b89d78SCam Macdonell 2944432d268cSJun Nakajima new_block->offset = find_ram_offset(size); 29456977dfe6SYoshiaki Tamura if (host) { 294684b89d78SCam Macdonell new_block->host = host; 2947cd19cfa2SHuang Ying new_block->flags |= RAM_PREALLOC_MASK; 29486977dfe6SYoshiaki Tamura } else { 2949c902760fSMarcelo Tosatti if (mem_path) { 2950c902760fSMarcelo Tosatti #if defined (__linux__) && !defined(TARGET_S390X) 295104b16653SAlex Williamson new_block->host = file_ram_alloc(new_block, size, mem_path); 2952618a568dSMarcelo Tosatti if (!new_block->host) { 2953618a568dSMarcelo Tosatti new_block->host = qemu_vmalloc(size); 2954e78815a5SAndreas Färber qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE); 2955618a568dSMarcelo Tosatti } 2956c902760fSMarcelo Tosatti #else 2957c902760fSMarcelo Tosatti fprintf(stderr, "-mem-path option unsupported\n"); 2958c902760fSMarcelo Tosatti exit(1); 2959c902760fSMarcelo Tosatti #endif 2960c902760fSMarcelo Tosatti } else { 29616b02494dSAlexander Graf #if defined(TARGET_S390X) && defined(CONFIG_KVM) 2962ff83678aSChristian Borntraeger /* S390 KVM requires the topmost vma of the RAM to be smaller than 2963ff83678aSChristian Borntraeger an system defined value, which is at least 256GB. Larger systems 2964ff83678aSChristian Borntraeger have larger values. We put the guest between the end of data 2965ff83678aSChristian Borntraeger segment (system break) and this value. We use 32GB as a base to 2966ff83678aSChristian Borntraeger have enough room for the system break to grow. */ 2967ff83678aSChristian Borntraeger new_block->host = mmap((void*)0x800000000, size, 2968c902760fSMarcelo Tosatti PROT_EXEC|PROT_READ|PROT_WRITE, 2969ff83678aSChristian Borntraeger MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); 2970fb8b2735SAlexander Graf if (new_block->host == MAP_FAILED) { 2971fb8b2735SAlexander Graf fprintf(stderr, "Allocating RAM failed\n"); 2972fb8b2735SAlexander Graf abort(); 2973fb8b2735SAlexander Graf } 29746b02494dSAlexander Graf #else 2975868bb33fSJan Kiszka if (xen_enabled()) { 2976432d268cSJun Nakajima xen_ram_alloc(new_block->offset, size); 2977432d268cSJun Nakajima } else { 297894a6b54fSpbrook new_block->host = qemu_vmalloc(size); 2979432d268cSJun Nakajima } 29806b02494dSAlexander Graf #endif 2981e78815a5SAndreas Färber qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE); 2982c902760fSMarcelo Tosatti } 29836977dfe6SYoshiaki Tamura } 298494a6b54fSpbrook new_block->length = size; 298594a6b54fSpbrook 2986f471a17eSAlex Williamson QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); 298794a6b54fSpbrook 29887267c094SAnthony Liguori ram_list.phys_dirty = g_realloc(ram_list.phys_dirty, 298904b16653SAlex Williamson last_ram_offset() >> TARGET_PAGE_BITS); 2990d17b5288SAlex Williamson memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), 299194a6b54fSpbrook 0xff, size >> TARGET_PAGE_BITS); 299294a6b54fSpbrook 29936f0437e8SJan Kiszka if (kvm_enabled()) 29946f0437e8SJan Kiszka kvm_setup_guest_memory(new_block->host, size); 29956f0437e8SJan Kiszka 299694a6b54fSpbrook return new_block->offset; 299794a6b54fSpbrook } 2998e9a1ab19Sbellard 29996977dfe6SYoshiaki Tamura ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size) 30006977dfe6SYoshiaki Tamura { 30016977dfe6SYoshiaki Tamura return qemu_ram_alloc_from_ptr(dev, name, size, NULL); 30026977dfe6SYoshiaki Tamura } 30036977dfe6SYoshiaki Tamura 30041f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr) 30051f2e98b6SAlex Williamson { 30061f2e98b6SAlex Williamson RAMBlock *block; 30071f2e98b6SAlex Williamson 30081f2e98b6SAlex Williamson QLIST_FOREACH(block, &ram_list.blocks, next) { 30091f2e98b6SAlex Williamson if (addr == block->offset) { 30101f2e98b6SAlex Williamson QLIST_REMOVE(block, next); 30117267c094SAnthony Liguori g_free(block); 30121f2e98b6SAlex Williamson return; 30131f2e98b6SAlex Williamson } 30141f2e98b6SAlex Williamson } 30151f2e98b6SAlex Williamson } 30161f2e98b6SAlex Williamson 3017c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr) 3018e9a1ab19Sbellard { 301904b16653SAlex Williamson RAMBlock *block; 302004b16653SAlex Williamson 302104b16653SAlex Williamson QLIST_FOREACH(block, &ram_list.blocks, next) { 302204b16653SAlex Williamson if (addr == block->offset) { 302304b16653SAlex Williamson QLIST_REMOVE(block, next); 3024cd19cfa2SHuang Ying if (block->flags & RAM_PREALLOC_MASK) { 3025cd19cfa2SHuang Ying ; 3026cd19cfa2SHuang Ying } else if (mem_path) { 302704b16653SAlex Williamson #if defined (__linux__) && !defined(TARGET_S390X) 302804b16653SAlex Williamson if (block->fd) { 302904b16653SAlex Williamson munmap(block->host, block->length); 303004b16653SAlex Williamson close(block->fd); 303104b16653SAlex Williamson } else { 303204b16653SAlex Williamson qemu_vfree(block->host); 303304b16653SAlex Williamson } 3034fd28aa13SJan Kiszka #else 3035fd28aa13SJan Kiszka abort(); 303604b16653SAlex Williamson #endif 303704b16653SAlex Williamson } else { 303804b16653SAlex Williamson #if defined(TARGET_S390X) && defined(CONFIG_KVM) 303904b16653SAlex Williamson munmap(block->host, block->length); 304004b16653SAlex Williamson #else 3041868bb33fSJan Kiszka if (xen_enabled()) { 3042e41d7c69SJan Kiszka xen_invalidate_map_cache_entry(block->host); 3043432d268cSJun Nakajima } else { 304404b16653SAlex Williamson qemu_vfree(block->host); 3045432d268cSJun Nakajima } 304604b16653SAlex Williamson #endif 304704b16653SAlex Williamson } 30487267c094SAnthony Liguori g_free(block); 304904b16653SAlex Williamson return; 305004b16653SAlex Williamson } 305104b16653SAlex Williamson } 305204b16653SAlex Williamson 3053e9a1ab19Sbellard } 3054e9a1ab19Sbellard 3055cd19cfa2SHuang Ying #ifndef _WIN32 3056cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) 3057cd19cfa2SHuang Ying { 3058cd19cfa2SHuang Ying RAMBlock *block; 3059cd19cfa2SHuang Ying ram_addr_t offset; 3060cd19cfa2SHuang Ying int flags; 3061cd19cfa2SHuang Ying void *area, *vaddr; 3062cd19cfa2SHuang Ying 3063cd19cfa2SHuang Ying QLIST_FOREACH(block, &ram_list.blocks, next) { 3064cd19cfa2SHuang Ying offset = addr - block->offset; 3065cd19cfa2SHuang Ying if (offset < block->length) { 3066cd19cfa2SHuang Ying vaddr = block->host + offset; 3067cd19cfa2SHuang Ying if (block->flags & RAM_PREALLOC_MASK) { 3068cd19cfa2SHuang Ying ; 3069cd19cfa2SHuang Ying } else { 3070cd19cfa2SHuang Ying flags = MAP_FIXED; 3071cd19cfa2SHuang Ying munmap(vaddr, length); 3072cd19cfa2SHuang Ying if (mem_path) { 3073cd19cfa2SHuang Ying #if defined(__linux__) && !defined(TARGET_S390X) 3074cd19cfa2SHuang Ying if (block->fd) { 3075cd19cfa2SHuang Ying #ifdef MAP_POPULATE 3076cd19cfa2SHuang Ying flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED : 3077cd19cfa2SHuang Ying MAP_PRIVATE; 3078cd19cfa2SHuang Ying #else 3079cd19cfa2SHuang Ying flags |= MAP_PRIVATE; 3080cd19cfa2SHuang Ying #endif 3081cd19cfa2SHuang Ying area = mmap(vaddr, length, PROT_READ | PROT_WRITE, 3082cd19cfa2SHuang Ying flags, block->fd, offset); 3083cd19cfa2SHuang Ying } else { 3084cd19cfa2SHuang Ying flags |= MAP_PRIVATE | MAP_ANONYMOUS; 3085cd19cfa2SHuang Ying area = mmap(vaddr, length, PROT_READ | PROT_WRITE, 3086cd19cfa2SHuang Ying flags, -1, 0); 3087cd19cfa2SHuang Ying } 3088fd28aa13SJan Kiszka #else 3089fd28aa13SJan Kiszka abort(); 3090cd19cfa2SHuang Ying #endif 3091cd19cfa2SHuang Ying } else { 3092cd19cfa2SHuang Ying #if defined(TARGET_S390X) && defined(CONFIG_KVM) 3093cd19cfa2SHuang Ying flags |= MAP_SHARED | MAP_ANONYMOUS; 3094cd19cfa2SHuang Ying area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE, 3095cd19cfa2SHuang Ying flags, -1, 0); 3096cd19cfa2SHuang Ying #else 3097cd19cfa2SHuang Ying flags |= MAP_PRIVATE | MAP_ANONYMOUS; 3098cd19cfa2SHuang Ying area = mmap(vaddr, length, PROT_READ | PROT_WRITE, 3099cd19cfa2SHuang Ying flags, -1, 0); 3100cd19cfa2SHuang Ying #endif 3101cd19cfa2SHuang Ying } 3102cd19cfa2SHuang Ying if (area != vaddr) { 3103f15fbc4bSAnthony PERARD fprintf(stderr, "Could not remap addr: " 3104f15fbc4bSAnthony PERARD RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", 3105cd19cfa2SHuang Ying length, addr); 3106cd19cfa2SHuang Ying exit(1); 3107cd19cfa2SHuang Ying } 3108cd19cfa2SHuang Ying qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE); 3109cd19cfa2SHuang Ying } 3110cd19cfa2SHuang Ying return; 3111cd19cfa2SHuang Ying } 3112cd19cfa2SHuang Ying } 3113cd19cfa2SHuang Ying } 3114cd19cfa2SHuang Ying #endif /* !_WIN32 */ 3115cd19cfa2SHuang Ying 3116dc828ca1Spbrook /* Return a host pointer to ram allocated with qemu_ram_alloc. 31175579c7f3Spbrook With the exception of the softmmu code in this file, this should 31185579c7f3Spbrook only be used for local memory (e.g. video ram) that the device owns, 31195579c7f3Spbrook and knows it isn't going to access beyond the end of the block. 31205579c7f3Spbrook 31215579c7f3Spbrook It should not be used for general purpose DMA. 31225579c7f3Spbrook Use cpu_physical_memory_map/cpu_physical_memory_rw instead. 31235579c7f3Spbrook */ 3124c227f099SAnthony Liguori void *qemu_get_ram_ptr(ram_addr_t addr) 3125dc828ca1Spbrook { 312694a6b54fSpbrook RAMBlock *block; 312794a6b54fSpbrook 3128f471a17eSAlex Williamson QLIST_FOREACH(block, &ram_list.blocks, next) { 3129f471a17eSAlex Williamson if (addr - block->offset < block->length) { 31307d82af38SVincent Palatin /* Move this entry to to start of the list. */ 31317d82af38SVincent Palatin if (block != QLIST_FIRST(&ram_list.blocks)) { 3132f471a17eSAlex Williamson QLIST_REMOVE(block, next); 3133f471a17eSAlex Williamson QLIST_INSERT_HEAD(&ram_list.blocks, block, next); 31347d82af38SVincent Palatin } 3135868bb33fSJan Kiszka if (xen_enabled()) { 3136432d268cSJun Nakajima /* We need to check if the requested address is in the RAM 3137432d268cSJun Nakajima * because we don't want to map the entire memory in QEMU. 3138712c2b41SStefano Stabellini * In that case just map until the end of the page. 3139432d268cSJun Nakajima */ 3140432d268cSJun Nakajima if (block->offset == 0) { 3141e41d7c69SJan Kiszka return xen_map_cache(addr, 0, 0); 3142432d268cSJun Nakajima } else if (block->host == NULL) { 3143e41d7c69SJan Kiszka block->host = 3144e41d7c69SJan Kiszka xen_map_cache(block->offset, block->length, 1); 3145432d268cSJun Nakajima } 3146432d268cSJun Nakajima } 3147f471a17eSAlex Williamson return block->host + (addr - block->offset); 314894a6b54fSpbrook } 3149f471a17eSAlex Williamson } 3150f471a17eSAlex Williamson 315194a6b54fSpbrook fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 315294a6b54fSpbrook abort(); 3153f471a17eSAlex Williamson 3154f471a17eSAlex Williamson return NULL; 3155dc828ca1Spbrook } 3156dc828ca1Spbrook 3157b2e0a138SMichael S. Tsirkin /* Return a host pointer to ram allocated with qemu_ram_alloc. 3158b2e0a138SMichael S. Tsirkin * Same as qemu_get_ram_ptr but avoid reordering ramblocks. 3159b2e0a138SMichael S. Tsirkin */ 3160b2e0a138SMichael S. Tsirkin void *qemu_safe_ram_ptr(ram_addr_t addr) 3161b2e0a138SMichael S. Tsirkin { 3162b2e0a138SMichael S. Tsirkin RAMBlock *block; 3163b2e0a138SMichael S. Tsirkin 3164b2e0a138SMichael S. Tsirkin QLIST_FOREACH(block, &ram_list.blocks, next) { 3165b2e0a138SMichael S. Tsirkin if (addr - block->offset < block->length) { 3166868bb33fSJan Kiszka if (xen_enabled()) { 3167432d268cSJun Nakajima /* We need to check if the requested address is in the RAM 3168432d268cSJun Nakajima * because we don't want to map the entire memory in QEMU. 3169712c2b41SStefano Stabellini * In that case just map until the end of the page. 3170432d268cSJun Nakajima */ 3171432d268cSJun Nakajima if (block->offset == 0) { 3172e41d7c69SJan Kiszka return xen_map_cache(addr, 0, 0); 3173432d268cSJun Nakajima } else if (block->host == NULL) { 3174e41d7c69SJan Kiszka block->host = 3175e41d7c69SJan Kiszka xen_map_cache(block->offset, block->length, 1); 3176432d268cSJun Nakajima } 3177432d268cSJun Nakajima } 3178b2e0a138SMichael S. Tsirkin return block->host + (addr - block->offset); 3179b2e0a138SMichael S. Tsirkin } 3180b2e0a138SMichael S. Tsirkin } 3181b2e0a138SMichael S. Tsirkin 3182b2e0a138SMichael S. Tsirkin fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 3183b2e0a138SMichael S. Tsirkin abort(); 3184b2e0a138SMichael S. Tsirkin 3185b2e0a138SMichael S. Tsirkin return NULL; 3186b2e0a138SMichael S. Tsirkin } 3187b2e0a138SMichael S. Tsirkin 318838bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr 318938bee5dcSStefano Stabellini * but takes a size argument */ 31908ab934f9SStefano Stabellini void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size) 319138bee5dcSStefano Stabellini { 31928ab934f9SStefano Stabellini if (*size == 0) { 31938ab934f9SStefano Stabellini return NULL; 31948ab934f9SStefano Stabellini } 3195868bb33fSJan Kiszka if (xen_enabled()) { 3196e41d7c69SJan Kiszka return xen_map_cache(addr, *size, 1); 3197868bb33fSJan Kiszka } else { 319838bee5dcSStefano Stabellini RAMBlock *block; 319938bee5dcSStefano Stabellini 320038bee5dcSStefano Stabellini QLIST_FOREACH(block, &ram_list.blocks, next) { 320138bee5dcSStefano Stabellini if (addr - block->offset < block->length) { 320238bee5dcSStefano Stabellini if (addr - block->offset + *size > block->length) 320338bee5dcSStefano Stabellini *size = block->length - addr + block->offset; 320438bee5dcSStefano Stabellini return block->host + (addr - block->offset); 320538bee5dcSStefano Stabellini } 320638bee5dcSStefano Stabellini } 320738bee5dcSStefano Stabellini 320838bee5dcSStefano Stabellini fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 320938bee5dcSStefano Stabellini abort(); 321038bee5dcSStefano Stabellini } 321138bee5dcSStefano Stabellini } 321238bee5dcSStefano Stabellini 3213050a0ddfSAnthony PERARD void qemu_put_ram_ptr(void *addr) 3214050a0ddfSAnthony PERARD { 3215050a0ddfSAnthony PERARD trace_qemu_put_ram_ptr(addr); 3216050a0ddfSAnthony PERARD } 3217050a0ddfSAnthony PERARD 3218e890261fSMarcelo Tosatti int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) 32195579c7f3Spbrook { 322094a6b54fSpbrook RAMBlock *block; 322194a6b54fSpbrook uint8_t *host = ptr; 322294a6b54fSpbrook 3223868bb33fSJan Kiszka if (xen_enabled()) { 3224e41d7c69SJan Kiszka *ram_addr = xen_ram_addr_from_mapcache(ptr); 3225712c2b41SStefano Stabellini return 0; 3226712c2b41SStefano Stabellini } 3227712c2b41SStefano Stabellini 3228f471a17eSAlex Williamson QLIST_FOREACH(block, &ram_list.blocks, next) { 3229432d268cSJun Nakajima /* This case append when the block is not mapped. */ 3230432d268cSJun Nakajima if (block->host == NULL) { 3231432d268cSJun Nakajima continue; 3232432d268cSJun Nakajima } 3233f471a17eSAlex Williamson if (host - block->host < block->length) { 3234e890261fSMarcelo Tosatti *ram_addr = block->offset + (host - block->host); 3235e890261fSMarcelo Tosatti return 0; 323694a6b54fSpbrook } 3237f471a17eSAlex Williamson } 3238432d268cSJun Nakajima 3239e890261fSMarcelo Tosatti return -1; 3240e890261fSMarcelo Tosatti } 3241f471a17eSAlex Williamson 3242e890261fSMarcelo Tosatti /* Some of the softmmu routines need to translate from a host pointer 3243e890261fSMarcelo Tosatti (typically a TLB entry) back to a ram offset. */ 3244e890261fSMarcelo Tosatti ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 3245e890261fSMarcelo Tosatti { 3246e890261fSMarcelo Tosatti ram_addr_t ram_addr; 3247e890261fSMarcelo Tosatti 3248e890261fSMarcelo Tosatti if (qemu_ram_addr_from_host(ptr, &ram_addr)) { 324994a6b54fSpbrook fprintf(stderr, "Bad ram pointer %p\n", ptr); 325094a6b54fSpbrook abort(); 3251e890261fSMarcelo Tosatti } 3252e890261fSMarcelo Tosatti return ram_addr; 32535579c7f3Spbrook } 32545579c7f3Spbrook 3255c227f099SAnthony Liguori static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) 325633417e70Sbellard { 325767d3b957Spbrook #ifdef DEBUG_UNASSIGNED 3258ab3d1727Sblueswir1 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 325967d3b957Spbrook #endif 32605b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) 3261b14ef7c9SBlue Swirl cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1); 3262e18231a3Sblueswir1 #endif 3263e18231a3Sblueswir1 return 0; 3264e18231a3Sblueswir1 } 3265e18231a3Sblueswir1 3266c227f099SAnthony Liguori static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr) 3267e18231a3Sblueswir1 { 3268e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED 3269e18231a3Sblueswir1 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 3270e18231a3Sblueswir1 #endif 32715b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) 3272b14ef7c9SBlue Swirl cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2); 3273e18231a3Sblueswir1 #endif 3274e18231a3Sblueswir1 return 0; 3275e18231a3Sblueswir1 } 3276e18231a3Sblueswir1 3277c227f099SAnthony Liguori static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr) 3278e18231a3Sblueswir1 { 3279e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED 3280e18231a3Sblueswir1 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 3281e18231a3Sblueswir1 #endif 32825b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) 3283b14ef7c9SBlue Swirl cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4); 3284b4f0a316Sblueswir1 #endif 328533417e70Sbellard return 0; 328633417e70Sbellard } 328733417e70Sbellard 3288c227f099SAnthony Liguori static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) 328933417e70Sbellard { 329067d3b957Spbrook #ifdef DEBUG_UNASSIGNED 3291ab3d1727Sblueswir1 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 329267d3b957Spbrook #endif 32935b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) 3294b14ef7c9SBlue Swirl cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1); 3295e18231a3Sblueswir1 #endif 3296e18231a3Sblueswir1 } 3297e18231a3Sblueswir1 3298c227f099SAnthony Liguori static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) 3299e18231a3Sblueswir1 { 3300e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED 3301e18231a3Sblueswir1 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 3302e18231a3Sblueswir1 #endif 33035b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) 3304b14ef7c9SBlue Swirl cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2); 3305e18231a3Sblueswir1 #endif 3306e18231a3Sblueswir1 } 3307e18231a3Sblueswir1 3308c227f099SAnthony Liguori static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) 3309e18231a3Sblueswir1 { 3310e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED 3311e18231a3Sblueswir1 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); 3312e18231a3Sblueswir1 #endif 33135b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) 3314b14ef7c9SBlue Swirl cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4); 3315b4f0a316Sblueswir1 #endif 331633417e70Sbellard } 331733417e70Sbellard 3318d60efc6bSBlue Swirl static CPUReadMemoryFunc * const unassigned_mem_read[3] = { 331933417e70Sbellard unassigned_mem_readb, 3320e18231a3Sblueswir1 unassigned_mem_readw, 3321e18231a3Sblueswir1 unassigned_mem_readl, 332233417e70Sbellard }; 332333417e70Sbellard 3324d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const unassigned_mem_write[3] = { 332533417e70Sbellard unassigned_mem_writeb, 3326e18231a3Sblueswir1 unassigned_mem_writew, 3327e18231a3Sblueswir1 unassigned_mem_writel, 332833417e70Sbellard }; 332933417e70Sbellard 3330c227f099SAnthony Liguori static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr, 33310f459d16Spbrook uint32_t val) 33321ccde1cbSbellard { 33333a7d929eSbellard int dirty_flags; 3334f7c11b53SYoshiaki Tamura dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 33353a7d929eSbellard if (!(dirty_flags & CODE_DIRTY_FLAG)) { 33363a7d929eSbellard #if !defined(CONFIG_USER_ONLY) 33373a7d929eSbellard tb_invalidate_phys_page_fast(ram_addr, 1); 3338f7c11b53SYoshiaki Tamura dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 33393a7d929eSbellard #endif 33403a7d929eSbellard } 33415579c7f3Spbrook stb_p(qemu_get_ram_ptr(ram_addr), val); 3342f23db169Sbellard dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 3343f7c11b53SYoshiaki Tamura cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); 3344f23db169Sbellard /* we remove the notdirty callback only if the code has been 3345f23db169Sbellard flushed */ 3346f23db169Sbellard if (dirty_flags == 0xff) 33472e70f6efSpbrook tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); 33481ccde1cbSbellard } 33491ccde1cbSbellard 3350c227f099SAnthony Liguori static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr, 33510f459d16Spbrook uint32_t val) 33521ccde1cbSbellard { 33533a7d929eSbellard int dirty_flags; 3354f7c11b53SYoshiaki Tamura dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 33553a7d929eSbellard if (!(dirty_flags & CODE_DIRTY_FLAG)) { 33563a7d929eSbellard #if !defined(CONFIG_USER_ONLY) 33573a7d929eSbellard tb_invalidate_phys_page_fast(ram_addr, 2); 3358f7c11b53SYoshiaki Tamura dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 33593a7d929eSbellard #endif 33603a7d929eSbellard } 33615579c7f3Spbrook stw_p(qemu_get_ram_ptr(ram_addr), val); 3362f23db169Sbellard dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 3363f7c11b53SYoshiaki Tamura cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); 3364f23db169Sbellard /* we remove the notdirty callback only if the code has been 3365f23db169Sbellard flushed */ 3366f23db169Sbellard if (dirty_flags == 0xff) 33672e70f6efSpbrook tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); 33681ccde1cbSbellard } 33691ccde1cbSbellard 3370c227f099SAnthony Liguori static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr, 33710f459d16Spbrook uint32_t val) 33721ccde1cbSbellard { 33733a7d929eSbellard int dirty_flags; 3374f7c11b53SYoshiaki Tamura dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 33753a7d929eSbellard if (!(dirty_flags & CODE_DIRTY_FLAG)) { 33763a7d929eSbellard #if !defined(CONFIG_USER_ONLY) 33773a7d929eSbellard tb_invalidate_phys_page_fast(ram_addr, 4); 3378f7c11b53SYoshiaki Tamura dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 33793a7d929eSbellard #endif 33803a7d929eSbellard } 33815579c7f3Spbrook stl_p(qemu_get_ram_ptr(ram_addr), val); 3382f23db169Sbellard dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 3383f7c11b53SYoshiaki Tamura cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); 3384f23db169Sbellard /* we remove the notdirty callback only if the code has been 3385f23db169Sbellard flushed */ 3386f23db169Sbellard if (dirty_flags == 0xff) 33872e70f6efSpbrook tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); 33881ccde1cbSbellard } 33891ccde1cbSbellard 3390d60efc6bSBlue Swirl static CPUReadMemoryFunc * const error_mem_read[3] = { 33913a7d929eSbellard NULL, /* never used */ 33923a7d929eSbellard NULL, /* never used */ 33933a7d929eSbellard NULL, /* never used */ 33943a7d929eSbellard }; 33953a7d929eSbellard 3396d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const notdirty_mem_write[3] = { 33971ccde1cbSbellard notdirty_mem_writeb, 33981ccde1cbSbellard notdirty_mem_writew, 33991ccde1cbSbellard notdirty_mem_writel, 34001ccde1cbSbellard }; 34011ccde1cbSbellard 34020f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit. */ 3403b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags) 34040f459d16Spbrook { 34050f459d16Spbrook CPUState *env = cpu_single_env; 340606d55cc1Saliguori target_ulong pc, cs_base; 340706d55cc1Saliguori TranslationBlock *tb; 34080f459d16Spbrook target_ulong vaddr; 3409a1d1bb31Saliguori CPUWatchpoint *wp; 341006d55cc1Saliguori int cpu_flags; 34110f459d16Spbrook 341206d55cc1Saliguori if (env->watchpoint_hit) { 341306d55cc1Saliguori /* We re-entered the check after replacing the TB. Now raise 341406d55cc1Saliguori * the debug interrupt so that is will trigger after the 341506d55cc1Saliguori * current instruction. */ 341606d55cc1Saliguori cpu_interrupt(env, CPU_INTERRUPT_DEBUG); 341706d55cc1Saliguori return; 341806d55cc1Saliguori } 34192e70f6efSpbrook vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; 342072cf2d4fSBlue Swirl QTAILQ_FOREACH(wp, &env->watchpoints, entry) { 3421b4051334Saliguori if ((vaddr == (wp->vaddr & len_mask) || 3422b4051334Saliguori (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { 34236e140f28Saliguori wp->flags |= BP_WATCHPOINT_HIT; 34246e140f28Saliguori if (!env->watchpoint_hit) { 3425a1d1bb31Saliguori env->watchpoint_hit = wp; 342606d55cc1Saliguori tb = tb_find_pc(env->mem_io_pc); 342706d55cc1Saliguori if (!tb) { 34286e140f28Saliguori cpu_abort(env, "check_watchpoint: could not find TB for " 34296e140f28Saliguori "pc=%p", (void *)env->mem_io_pc); 343006d55cc1Saliguori } 3431618ba8e6SStefan Weil cpu_restore_state(tb, env, env->mem_io_pc); 343206d55cc1Saliguori tb_phys_invalidate(tb, -1); 343306d55cc1Saliguori if (wp->flags & BP_STOP_BEFORE_ACCESS) { 343406d55cc1Saliguori env->exception_index = EXCP_DEBUG; 343506d55cc1Saliguori } else { 343606d55cc1Saliguori cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); 343706d55cc1Saliguori tb_gen_code(env, pc, cs_base, cpu_flags, 1); 343806d55cc1Saliguori } 343906d55cc1Saliguori cpu_resume_from_signal(env, NULL); 34400f459d16Spbrook } 34416e140f28Saliguori } else { 34426e140f28Saliguori wp->flags &= ~BP_WATCHPOINT_HIT; 34436e140f28Saliguori } 34440f459d16Spbrook } 34450f459d16Spbrook } 34460f459d16Spbrook 34476658ffb8Spbrook /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, 34486658ffb8Spbrook so these check for a hit then pass through to the normal out-of-line 34496658ffb8Spbrook phys routines. */ 3450c227f099SAnthony Liguori static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr) 34516658ffb8Spbrook { 3452b4051334Saliguori check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ); 34536658ffb8Spbrook return ldub_phys(addr); 34546658ffb8Spbrook } 34556658ffb8Spbrook 3456c227f099SAnthony Liguori static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr) 34576658ffb8Spbrook { 3458b4051334Saliguori check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ); 34596658ffb8Spbrook return lduw_phys(addr); 34606658ffb8Spbrook } 34616658ffb8Spbrook 3462c227f099SAnthony Liguori static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr) 34636658ffb8Spbrook { 3464b4051334Saliguori check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ); 34656658ffb8Spbrook return ldl_phys(addr); 34666658ffb8Spbrook } 34676658ffb8Spbrook 3468c227f099SAnthony Liguori static void watch_mem_writeb(void *opaque, target_phys_addr_t addr, 34696658ffb8Spbrook uint32_t val) 34706658ffb8Spbrook { 3471b4051334Saliguori check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE); 34726658ffb8Spbrook stb_phys(addr, val); 34736658ffb8Spbrook } 34746658ffb8Spbrook 3475c227f099SAnthony Liguori static void watch_mem_writew(void *opaque, target_phys_addr_t addr, 34766658ffb8Spbrook uint32_t val) 34776658ffb8Spbrook { 3478b4051334Saliguori check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE); 34796658ffb8Spbrook stw_phys(addr, val); 34806658ffb8Spbrook } 34816658ffb8Spbrook 3482c227f099SAnthony Liguori static void watch_mem_writel(void *opaque, target_phys_addr_t addr, 34836658ffb8Spbrook uint32_t val) 34846658ffb8Spbrook { 3485b4051334Saliguori check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE); 34866658ffb8Spbrook stl_phys(addr, val); 34876658ffb8Spbrook } 34886658ffb8Spbrook 3489d60efc6bSBlue Swirl static CPUReadMemoryFunc * const watch_mem_read[3] = { 34906658ffb8Spbrook watch_mem_readb, 34916658ffb8Spbrook watch_mem_readw, 34926658ffb8Spbrook watch_mem_readl, 34936658ffb8Spbrook }; 34946658ffb8Spbrook 3495d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const watch_mem_write[3] = { 34966658ffb8Spbrook watch_mem_writeb, 34976658ffb8Spbrook watch_mem_writew, 34986658ffb8Spbrook watch_mem_writel, 34996658ffb8Spbrook }; 35006658ffb8Spbrook 3501f6405247SRichard Henderson static inline uint32_t subpage_readlen (subpage_t *mmio, 3502f6405247SRichard Henderson target_phys_addr_t addr, 3503db7b5426Sblueswir1 unsigned int len) 3504db7b5426Sblueswir1 { 3505f6405247SRichard Henderson unsigned int idx = SUBPAGE_IDX(addr); 3506db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 3507db7b5426Sblueswir1 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__, 3508db7b5426Sblueswir1 mmio, len, addr, idx); 3509db7b5426Sblueswir1 #endif 3510db7b5426Sblueswir1 3511f6405247SRichard Henderson addr += mmio->region_offset[idx]; 3512f6405247SRichard Henderson idx = mmio->sub_io_index[idx]; 3513f6405247SRichard Henderson return io_mem_read[idx][len](io_mem_opaque[idx], addr); 3514db7b5426Sblueswir1 } 3515db7b5426Sblueswir1 3516c227f099SAnthony Liguori static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr, 3517db7b5426Sblueswir1 uint32_t value, unsigned int len) 3518db7b5426Sblueswir1 { 3519f6405247SRichard Henderson unsigned int idx = SUBPAGE_IDX(addr); 3520db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 3521f6405247SRichard Henderson printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", 3522f6405247SRichard Henderson __func__, mmio, len, addr, idx, value); 3523db7b5426Sblueswir1 #endif 3524f6405247SRichard Henderson 3525f6405247SRichard Henderson addr += mmio->region_offset[idx]; 3526f6405247SRichard Henderson idx = mmio->sub_io_index[idx]; 3527f6405247SRichard Henderson io_mem_write[idx][len](io_mem_opaque[idx], addr, value); 3528db7b5426Sblueswir1 } 3529db7b5426Sblueswir1 3530c227f099SAnthony Liguori static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr) 3531db7b5426Sblueswir1 { 3532db7b5426Sblueswir1 return subpage_readlen(opaque, addr, 0); 3533db7b5426Sblueswir1 } 3534db7b5426Sblueswir1 3535c227f099SAnthony Liguori static void subpage_writeb (void *opaque, target_phys_addr_t addr, 3536db7b5426Sblueswir1 uint32_t value) 3537db7b5426Sblueswir1 { 3538db7b5426Sblueswir1 subpage_writelen(opaque, addr, value, 0); 3539db7b5426Sblueswir1 } 3540db7b5426Sblueswir1 3541c227f099SAnthony Liguori static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr) 3542db7b5426Sblueswir1 { 3543db7b5426Sblueswir1 return subpage_readlen(opaque, addr, 1); 3544db7b5426Sblueswir1 } 3545db7b5426Sblueswir1 3546c227f099SAnthony Liguori static void subpage_writew (void *opaque, target_phys_addr_t addr, 3547db7b5426Sblueswir1 uint32_t value) 3548db7b5426Sblueswir1 { 3549db7b5426Sblueswir1 subpage_writelen(opaque, addr, value, 1); 3550db7b5426Sblueswir1 } 3551db7b5426Sblueswir1 3552c227f099SAnthony Liguori static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr) 3553db7b5426Sblueswir1 { 3554db7b5426Sblueswir1 return subpage_readlen(opaque, addr, 2); 3555db7b5426Sblueswir1 } 3556db7b5426Sblueswir1 3557f6405247SRichard Henderson static void subpage_writel (void *opaque, target_phys_addr_t addr, 3558f6405247SRichard Henderson uint32_t value) 3559db7b5426Sblueswir1 { 3560db7b5426Sblueswir1 subpage_writelen(opaque, addr, value, 2); 3561db7b5426Sblueswir1 } 3562db7b5426Sblueswir1 3563d60efc6bSBlue Swirl static CPUReadMemoryFunc * const subpage_read[] = { 3564db7b5426Sblueswir1 &subpage_readb, 3565db7b5426Sblueswir1 &subpage_readw, 3566db7b5426Sblueswir1 &subpage_readl, 3567db7b5426Sblueswir1 }; 3568db7b5426Sblueswir1 3569d60efc6bSBlue Swirl static CPUWriteMemoryFunc * const subpage_write[] = { 3570db7b5426Sblueswir1 &subpage_writeb, 3571db7b5426Sblueswir1 &subpage_writew, 3572db7b5426Sblueswir1 &subpage_writel, 3573db7b5426Sblueswir1 }; 3574db7b5426Sblueswir1 3575c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 3576c227f099SAnthony Liguori ram_addr_t memory, ram_addr_t region_offset) 3577db7b5426Sblueswir1 { 3578db7b5426Sblueswir1 int idx, eidx; 3579db7b5426Sblueswir1 3580db7b5426Sblueswir1 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) 3581db7b5426Sblueswir1 return -1; 3582db7b5426Sblueswir1 idx = SUBPAGE_IDX(start); 3583db7b5426Sblueswir1 eidx = SUBPAGE_IDX(end); 3584db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 35850bf9e31aSBlue Swirl printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__, 3586db7b5426Sblueswir1 mmio, start, end, idx, eidx, memory); 3587db7b5426Sblueswir1 #endif 358895c318f5SGleb Natapov if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) 358995c318f5SGleb Natapov memory = IO_MEM_UNASSIGNED; 3590f6405247SRichard Henderson memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3591db7b5426Sblueswir1 for (; idx <= eidx; idx++) { 3592f6405247SRichard Henderson mmio->sub_io_index[idx] = memory; 3593f6405247SRichard Henderson mmio->region_offset[idx] = region_offset; 3594db7b5426Sblueswir1 } 3595db7b5426Sblueswir1 3596db7b5426Sblueswir1 return 0; 3597db7b5426Sblueswir1 } 3598db7b5426Sblueswir1 3599f6405247SRichard Henderson static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 3600f6405247SRichard Henderson ram_addr_t orig_memory, 3601f6405247SRichard Henderson ram_addr_t region_offset) 3602db7b5426Sblueswir1 { 3603c227f099SAnthony Liguori subpage_t *mmio; 3604db7b5426Sblueswir1 int subpage_memory; 3605db7b5426Sblueswir1 36067267c094SAnthony Liguori mmio = g_malloc0(sizeof(subpage_t)); 36071eec614bSaliguori 3608db7b5426Sblueswir1 mmio->base = base; 36092507c12aSAlexander Graf subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio, 36102507c12aSAlexander Graf DEVICE_NATIVE_ENDIAN); 3611db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE) 3612db7b5426Sblueswir1 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, 3613db7b5426Sblueswir1 mmio, base, TARGET_PAGE_SIZE, subpage_memory); 3614db7b5426Sblueswir1 #endif 3615db7b5426Sblueswir1 *phys = subpage_memory | IO_MEM_SUBPAGE; 3616f6405247SRichard Henderson subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset); 3617db7b5426Sblueswir1 3618db7b5426Sblueswir1 return mmio; 3619db7b5426Sblueswir1 } 3620db7b5426Sblueswir1 362188715657Saliguori static int get_free_io_mem_idx(void) 362288715657Saliguori { 362388715657Saliguori int i; 362488715657Saliguori 362588715657Saliguori for (i = 0; i<IO_MEM_NB_ENTRIES; i++) 362688715657Saliguori if (!io_mem_used[i]) { 362788715657Saliguori io_mem_used[i] = 1; 362888715657Saliguori return i; 362988715657Saliguori } 3630c6703b47SRiku Voipio fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES); 363188715657Saliguori return -1; 363288715657Saliguori } 363388715657Saliguori 3634dd310534SAlexander Graf /* 3635dd310534SAlexander Graf * Usually, devices operate in little endian mode. There are devices out 3636dd310534SAlexander Graf * there that operate in big endian too. Each device gets byte swapped 3637dd310534SAlexander Graf * mmio if plugged onto a CPU that does the other endianness. 3638dd310534SAlexander Graf * 3639dd310534SAlexander Graf * CPU Device swap? 3640dd310534SAlexander Graf * 3641dd310534SAlexander Graf * little little no 3642dd310534SAlexander Graf * little big yes 3643dd310534SAlexander Graf * big little yes 3644dd310534SAlexander Graf * big big no 3645dd310534SAlexander Graf */ 3646dd310534SAlexander Graf 3647dd310534SAlexander Graf typedef struct SwapEndianContainer { 3648dd310534SAlexander Graf CPUReadMemoryFunc *read[3]; 3649dd310534SAlexander Graf CPUWriteMemoryFunc *write[3]; 3650dd310534SAlexander Graf void *opaque; 3651dd310534SAlexander Graf } SwapEndianContainer; 3652dd310534SAlexander Graf 3653dd310534SAlexander Graf static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr) 3654dd310534SAlexander Graf { 3655dd310534SAlexander Graf uint32_t val; 3656dd310534SAlexander Graf SwapEndianContainer *c = opaque; 3657dd310534SAlexander Graf val = c->read[0](c->opaque, addr); 3658dd310534SAlexander Graf return val; 3659dd310534SAlexander Graf } 3660dd310534SAlexander Graf 3661dd310534SAlexander Graf static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr) 3662dd310534SAlexander Graf { 3663dd310534SAlexander Graf uint32_t val; 3664dd310534SAlexander Graf SwapEndianContainer *c = opaque; 3665dd310534SAlexander Graf val = bswap16(c->read[1](c->opaque, addr)); 3666dd310534SAlexander Graf return val; 3667dd310534SAlexander Graf } 3668dd310534SAlexander Graf 3669dd310534SAlexander Graf static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr) 3670dd310534SAlexander Graf { 3671dd310534SAlexander Graf uint32_t val; 3672dd310534SAlexander Graf SwapEndianContainer *c = opaque; 3673dd310534SAlexander Graf val = bswap32(c->read[2](c->opaque, addr)); 3674dd310534SAlexander Graf return val; 3675dd310534SAlexander Graf } 3676dd310534SAlexander Graf 3677dd310534SAlexander Graf static CPUReadMemoryFunc * const swapendian_readfn[3]={ 3678dd310534SAlexander Graf swapendian_mem_readb, 3679dd310534SAlexander Graf swapendian_mem_readw, 3680dd310534SAlexander Graf swapendian_mem_readl 3681dd310534SAlexander Graf }; 3682dd310534SAlexander Graf 3683dd310534SAlexander Graf static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr, 3684dd310534SAlexander Graf uint32_t val) 3685dd310534SAlexander Graf { 3686dd310534SAlexander Graf SwapEndianContainer *c = opaque; 3687dd310534SAlexander Graf c->write[0](c->opaque, addr, val); 3688dd310534SAlexander Graf } 3689dd310534SAlexander Graf 3690dd310534SAlexander Graf static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr, 3691dd310534SAlexander Graf uint32_t val) 3692dd310534SAlexander Graf { 3693dd310534SAlexander Graf SwapEndianContainer *c = opaque; 3694dd310534SAlexander Graf c->write[1](c->opaque, addr, bswap16(val)); 3695dd310534SAlexander Graf } 3696dd310534SAlexander Graf 3697dd310534SAlexander Graf static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr, 3698dd310534SAlexander Graf uint32_t val) 3699dd310534SAlexander Graf { 3700dd310534SAlexander Graf SwapEndianContainer *c = opaque; 3701dd310534SAlexander Graf c->write[2](c->opaque, addr, bswap32(val)); 3702dd310534SAlexander Graf } 3703dd310534SAlexander Graf 3704dd310534SAlexander Graf static CPUWriteMemoryFunc * const swapendian_writefn[3]={ 3705dd310534SAlexander Graf swapendian_mem_writeb, 3706dd310534SAlexander Graf swapendian_mem_writew, 3707dd310534SAlexander Graf swapendian_mem_writel 3708dd310534SAlexander Graf }; 3709dd310534SAlexander Graf 3710dd310534SAlexander Graf static void swapendian_init(int io_index) 3711dd310534SAlexander Graf { 37127267c094SAnthony Liguori SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer)); 3713dd310534SAlexander Graf int i; 3714dd310534SAlexander Graf 3715dd310534SAlexander Graf /* Swap mmio for big endian targets */ 3716dd310534SAlexander Graf c->opaque = io_mem_opaque[io_index]; 3717dd310534SAlexander Graf for (i = 0; i < 3; i++) { 3718dd310534SAlexander Graf c->read[i] = io_mem_read[io_index][i]; 3719dd310534SAlexander Graf c->write[i] = io_mem_write[io_index][i]; 3720dd310534SAlexander Graf 3721dd310534SAlexander Graf io_mem_read[io_index][i] = swapendian_readfn[i]; 3722dd310534SAlexander Graf io_mem_write[io_index][i] = swapendian_writefn[i]; 3723dd310534SAlexander Graf } 3724dd310534SAlexander Graf io_mem_opaque[io_index] = c; 3725dd310534SAlexander Graf } 3726dd310534SAlexander Graf 3727dd310534SAlexander Graf static void swapendian_del(int io_index) 3728dd310534SAlexander Graf { 3729dd310534SAlexander Graf if (io_mem_read[io_index][0] == swapendian_readfn[0]) { 37307267c094SAnthony Liguori g_free(io_mem_opaque[io_index]); 3731dd310534SAlexander Graf } 3732dd310534SAlexander Graf } 3733dd310534SAlexander Graf 373433417e70Sbellard /* mem_read and mem_write are arrays of functions containing the 373533417e70Sbellard function to access byte (index 0), word (index 1) and dword (index 37360b4e6e3eSPaul Brook 2). Functions can be omitted with a NULL function pointer. 37373ee89922Sblueswir1 If io_index is non zero, the corresponding io zone is 37384254fab8Sblueswir1 modified. If it is zero, a new io zone is allocated. The return 37394254fab8Sblueswir1 value can be used with cpu_register_physical_memory(). (-1) is 37404254fab8Sblueswir1 returned if error. */ 37411eed09cbSAvi Kivity static int cpu_register_io_memory_fixed(int io_index, 3742d60efc6bSBlue Swirl CPUReadMemoryFunc * const *mem_read, 3743d60efc6bSBlue Swirl CPUWriteMemoryFunc * const *mem_write, 3744dd310534SAlexander Graf void *opaque, enum device_endian endian) 374533417e70Sbellard { 37463cab721dSRichard Henderson int i; 37473cab721dSRichard Henderson 374833417e70Sbellard if (io_index <= 0) { 374988715657Saliguori io_index = get_free_io_mem_idx(); 375088715657Saliguori if (io_index == -1) 375188715657Saliguori return io_index; 375233417e70Sbellard } else { 37531eed09cbSAvi Kivity io_index >>= IO_MEM_SHIFT; 375433417e70Sbellard if (io_index >= IO_MEM_NB_ENTRIES) 375533417e70Sbellard return -1; 375633417e70Sbellard } 375733417e70Sbellard 37583cab721dSRichard Henderson for (i = 0; i < 3; ++i) { 37593cab721dSRichard Henderson io_mem_read[io_index][i] 37603cab721dSRichard Henderson = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]); 37613cab721dSRichard Henderson } 37623cab721dSRichard Henderson for (i = 0; i < 3; ++i) { 37633cab721dSRichard Henderson io_mem_write[io_index][i] 37643cab721dSRichard Henderson = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]); 37653cab721dSRichard Henderson } 3766a4193c8aSbellard io_mem_opaque[io_index] = opaque; 3767f6405247SRichard Henderson 3768dd310534SAlexander Graf switch (endian) { 3769dd310534SAlexander Graf case DEVICE_BIG_ENDIAN: 3770dd310534SAlexander Graf #ifndef TARGET_WORDS_BIGENDIAN 3771dd310534SAlexander Graf swapendian_init(io_index); 3772dd310534SAlexander Graf #endif 3773dd310534SAlexander Graf break; 3774dd310534SAlexander Graf case DEVICE_LITTLE_ENDIAN: 3775dd310534SAlexander Graf #ifdef TARGET_WORDS_BIGENDIAN 3776dd310534SAlexander Graf swapendian_init(io_index); 3777dd310534SAlexander Graf #endif 3778dd310534SAlexander Graf break; 3779dd310534SAlexander Graf case DEVICE_NATIVE_ENDIAN: 3780dd310534SAlexander Graf default: 3781dd310534SAlexander Graf break; 3782dd310534SAlexander Graf } 3783dd310534SAlexander Graf 3784f6405247SRichard Henderson return (io_index << IO_MEM_SHIFT); 378533417e70Sbellard } 378661382a50Sbellard 3787d60efc6bSBlue Swirl int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read, 3788d60efc6bSBlue Swirl CPUWriteMemoryFunc * const *mem_write, 3789dd310534SAlexander Graf void *opaque, enum device_endian endian) 37901eed09cbSAvi Kivity { 37912507c12aSAlexander Graf return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian); 37921eed09cbSAvi Kivity } 37931eed09cbSAvi Kivity 379488715657Saliguori void cpu_unregister_io_memory(int io_table_address) 379588715657Saliguori { 379688715657Saliguori int i; 379788715657Saliguori int io_index = io_table_address >> IO_MEM_SHIFT; 379888715657Saliguori 3799dd310534SAlexander Graf swapendian_del(io_index); 3800dd310534SAlexander Graf 380188715657Saliguori for (i=0;i < 3; i++) { 380288715657Saliguori io_mem_read[io_index][i] = unassigned_mem_read[i]; 380388715657Saliguori io_mem_write[io_index][i] = unassigned_mem_write[i]; 380488715657Saliguori } 380588715657Saliguori io_mem_opaque[io_index] = NULL; 380688715657Saliguori io_mem_used[io_index] = 0; 380788715657Saliguori } 380888715657Saliguori 3809e9179ce1SAvi Kivity static void io_mem_init(void) 3810e9179ce1SAvi Kivity { 3811e9179ce1SAvi Kivity int i; 3812e9179ce1SAvi Kivity 38132507c12aSAlexander Graf cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, 38142507c12aSAlexander Graf unassigned_mem_write, NULL, 38152507c12aSAlexander Graf DEVICE_NATIVE_ENDIAN); 38162507c12aSAlexander Graf cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, 38172507c12aSAlexander Graf unassigned_mem_write, NULL, 38182507c12aSAlexander Graf DEVICE_NATIVE_ENDIAN); 38192507c12aSAlexander Graf cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, 38202507c12aSAlexander Graf notdirty_mem_write, NULL, 38212507c12aSAlexander Graf DEVICE_NATIVE_ENDIAN); 3822e9179ce1SAvi Kivity for (i=0; i<5; i++) 3823e9179ce1SAvi Kivity io_mem_used[i] = 1; 3824e9179ce1SAvi Kivity 3825e9179ce1SAvi Kivity io_mem_watch = cpu_register_io_memory(watch_mem_read, 38262507c12aSAlexander Graf watch_mem_write, NULL, 38272507c12aSAlexander Graf DEVICE_NATIVE_ENDIAN); 3828e9179ce1SAvi Kivity } 3829e9179ce1SAvi Kivity 383062152b8aSAvi Kivity static void memory_map_init(void) 383162152b8aSAvi Kivity { 38327267c094SAnthony Liguori system_memory = g_malloc(sizeof(*system_memory)); 38338417cebfSAvi Kivity memory_region_init(system_memory, "system", INT64_MAX); 383462152b8aSAvi Kivity set_system_memory_map(system_memory); 3835309cb471SAvi Kivity 38367267c094SAnthony Liguori system_io = g_malloc(sizeof(*system_io)); 3837309cb471SAvi Kivity memory_region_init(system_io, "io", 65536); 3838309cb471SAvi Kivity set_system_io_map(system_io); 383962152b8aSAvi Kivity } 384062152b8aSAvi Kivity 384162152b8aSAvi Kivity MemoryRegion *get_system_memory(void) 384262152b8aSAvi Kivity { 384362152b8aSAvi Kivity return system_memory; 384462152b8aSAvi Kivity } 384562152b8aSAvi Kivity 3846309cb471SAvi Kivity MemoryRegion *get_system_io(void) 3847309cb471SAvi Kivity { 3848309cb471SAvi Kivity return system_io; 3849309cb471SAvi Kivity } 3850309cb471SAvi Kivity 3851e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */ 3852e2eef170Spbrook 385313eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */ 385413eb76e0Sbellard #if defined(CONFIG_USER_ONLY) 3855a68fe89cSPaul Brook int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 3856a68fe89cSPaul Brook uint8_t *buf, int len, int is_write) 385713eb76e0Sbellard { 385813eb76e0Sbellard int l, flags; 385913eb76e0Sbellard target_ulong page; 386053a5960aSpbrook void * p; 386113eb76e0Sbellard 386213eb76e0Sbellard while (len > 0) { 386313eb76e0Sbellard page = addr & TARGET_PAGE_MASK; 386413eb76e0Sbellard l = (page + TARGET_PAGE_SIZE) - addr; 386513eb76e0Sbellard if (l > len) 386613eb76e0Sbellard l = len; 386713eb76e0Sbellard flags = page_get_flags(page); 386813eb76e0Sbellard if (!(flags & PAGE_VALID)) 3869a68fe89cSPaul Brook return -1; 387013eb76e0Sbellard if (is_write) { 387113eb76e0Sbellard if (!(flags & PAGE_WRITE)) 3872a68fe89cSPaul Brook return -1; 3873579a97f7Sbellard /* XXX: this code should not depend on lock_user */ 387472fb7daaSaurel32 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) 3875a68fe89cSPaul Brook return -1; 387672fb7daaSaurel32 memcpy(p, buf, l); 387772fb7daaSaurel32 unlock_user(p, addr, l); 387813eb76e0Sbellard } else { 387913eb76e0Sbellard if (!(flags & PAGE_READ)) 3880a68fe89cSPaul Brook return -1; 3881579a97f7Sbellard /* XXX: this code should not depend on lock_user */ 388272fb7daaSaurel32 if (!(p = lock_user(VERIFY_READ, addr, l, 1))) 3883a68fe89cSPaul Brook return -1; 388472fb7daaSaurel32 memcpy(buf, p, l); 38855b257578Saurel32 unlock_user(p, addr, 0); 388613eb76e0Sbellard } 388713eb76e0Sbellard len -= l; 388813eb76e0Sbellard buf += l; 388913eb76e0Sbellard addr += l; 389013eb76e0Sbellard } 3891a68fe89cSPaul Brook return 0; 389213eb76e0Sbellard } 38938df1cd07Sbellard 389413eb76e0Sbellard #else 3895c227f099SAnthony Liguori void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 389613eb76e0Sbellard int len, int is_write) 389713eb76e0Sbellard { 389813eb76e0Sbellard int l, io_index; 389913eb76e0Sbellard uint8_t *ptr; 390013eb76e0Sbellard uint32_t val; 3901c227f099SAnthony Liguori target_phys_addr_t page; 39028ca5692dSAnthony PERARD ram_addr_t pd; 390392e873b9Sbellard PhysPageDesc *p; 390413eb76e0Sbellard 390513eb76e0Sbellard while (len > 0) { 390613eb76e0Sbellard page = addr & TARGET_PAGE_MASK; 390713eb76e0Sbellard l = (page + TARGET_PAGE_SIZE) - addr; 390813eb76e0Sbellard if (l > len) 390913eb76e0Sbellard l = len; 391092e873b9Sbellard p = phys_page_find(page >> TARGET_PAGE_BITS); 391113eb76e0Sbellard if (!p) { 391213eb76e0Sbellard pd = IO_MEM_UNASSIGNED; 391313eb76e0Sbellard } else { 391413eb76e0Sbellard pd = p->phys_offset; 391513eb76e0Sbellard } 391613eb76e0Sbellard 391713eb76e0Sbellard if (is_write) { 39183a7d929eSbellard if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 3919c227f099SAnthony Liguori target_phys_addr_t addr1 = addr; 392013eb76e0Sbellard io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 39218da3ff18Spbrook if (p) 39226c2934dbSaurel32 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 39236a00d601Sbellard /* XXX: could force cpu_single_env to NULL to avoid 39246a00d601Sbellard potential bugs */ 39256c2934dbSaurel32 if (l >= 4 && ((addr1 & 3) == 0)) { 39261c213d19Sbellard /* 32 bit write access */ 3927c27004ecSbellard val = ldl_p(buf); 39286c2934dbSaurel32 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val); 392913eb76e0Sbellard l = 4; 39306c2934dbSaurel32 } else if (l >= 2 && ((addr1 & 1) == 0)) { 39311c213d19Sbellard /* 16 bit write access */ 3932c27004ecSbellard val = lduw_p(buf); 39336c2934dbSaurel32 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val); 393413eb76e0Sbellard l = 2; 393513eb76e0Sbellard } else { 39361c213d19Sbellard /* 8 bit write access */ 3937c27004ecSbellard val = ldub_p(buf); 39386c2934dbSaurel32 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val); 393913eb76e0Sbellard l = 1; 394013eb76e0Sbellard } 394113eb76e0Sbellard } else { 39428ca5692dSAnthony PERARD ram_addr_t addr1; 3943b448f2f3Sbellard addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 394413eb76e0Sbellard /* RAM case */ 39455579c7f3Spbrook ptr = qemu_get_ram_ptr(addr1); 394613eb76e0Sbellard memcpy(ptr, buf, l); 39473a7d929eSbellard if (!cpu_physical_memory_is_dirty(addr1)) { 3948b448f2f3Sbellard /* invalidate code */ 3949b448f2f3Sbellard tb_invalidate_phys_page_range(addr1, addr1 + l, 0); 3950b448f2f3Sbellard /* set dirty bit */ 3951f7c11b53SYoshiaki Tamura cpu_physical_memory_set_dirty_flags( 3952f7c11b53SYoshiaki Tamura addr1, (0xff & ~CODE_DIRTY_FLAG)); 395313eb76e0Sbellard } 3954050a0ddfSAnthony PERARD qemu_put_ram_ptr(ptr); 39553a7d929eSbellard } 395613eb76e0Sbellard } else { 39572a4188a3Sbellard if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 39582a4188a3Sbellard !(pd & IO_MEM_ROMD)) { 3959c227f099SAnthony Liguori target_phys_addr_t addr1 = addr; 396013eb76e0Sbellard /* I/O case */ 396113eb76e0Sbellard io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 39628da3ff18Spbrook if (p) 39636c2934dbSaurel32 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 39646c2934dbSaurel32 if (l >= 4 && ((addr1 & 3) == 0)) { 396513eb76e0Sbellard /* 32 bit read access */ 39666c2934dbSaurel32 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1); 3967c27004ecSbellard stl_p(buf, val); 396813eb76e0Sbellard l = 4; 39696c2934dbSaurel32 } else if (l >= 2 && ((addr1 & 1) == 0)) { 397013eb76e0Sbellard /* 16 bit read access */ 39716c2934dbSaurel32 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1); 3972c27004ecSbellard stw_p(buf, val); 397313eb76e0Sbellard l = 2; 397413eb76e0Sbellard } else { 39751c213d19Sbellard /* 8 bit read access */ 39766c2934dbSaurel32 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1); 3977c27004ecSbellard stb_p(buf, val); 397813eb76e0Sbellard l = 1; 397913eb76e0Sbellard } 398013eb76e0Sbellard } else { 398113eb76e0Sbellard /* RAM case */ 3982050a0ddfSAnthony PERARD ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK); 3983050a0ddfSAnthony PERARD memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l); 3984050a0ddfSAnthony PERARD qemu_put_ram_ptr(ptr); 398513eb76e0Sbellard } 398613eb76e0Sbellard } 398713eb76e0Sbellard len -= l; 398813eb76e0Sbellard buf += l; 398913eb76e0Sbellard addr += l; 399013eb76e0Sbellard } 399113eb76e0Sbellard } 39928df1cd07Sbellard 3993d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */ 3994c227f099SAnthony Liguori void cpu_physical_memory_write_rom(target_phys_addr_t addr, 3995d0ecd2aaSbellard const uint8_t *buf, int len) 3996d0ecd2aaSbellard { 3997d0ecd2aaSbellard int l; 3998d0ecd2aaSbellard uint8_t *ptr; 3999c227f099SAnthony Liguori target_phys_addr_t page; 4000d0ecd2aaSbellard unsigned long pd; 4001d0ecd2aaSbellard PhysPageDesc *p; 4002d0ecd2aaSbellard 4003d0ecd2aaSbellard while (len > 0) { 4004d0ecd2aaSbellard page = addr & TARGET_PAGE_MASK; 4005d0ecd2aaSbellard l = (page + TARGET_PAGE_SIZE) - addr; 4006d0ecd2aaSbellard if (l > len) 4007d0ecd2aaSbellard l = len; 4008d0ecd2aaSbellard p = phys_page_find(page >> TARGET_PAGE_BITS); 4009d0ecd2aaSbellard if (!p) { 4010d0ecd2aaSbellard pd = IO_MEM_UNASSIGNED; 4011d0ecd2aaSbellard } else { 4012d0ecd2aaSbellard pd = p->phys_offset; 4013d0ecd2aaSbellard } 4014d0ecd2aaSbellard 4015d0ecd2aaSbellard if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM && 40162a4188a3Sbellard (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM && 40172a4188a3Sbellard !(pd & IO_MEM_ROMD)) { 4018d0ecd2aaSbellard /* do nothing */ 4019d0ecd2aaSbellard } else { 4020d0ecd2aaSbellard unsigned long addr1; 4021d0ecd2aaSbellard addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 4022d0ecd2aaSbellard /* ROM/RAM case */ 40235579c7f3Spbrook ptr = qemu_get_ram_ptr(addr1); 4024d0ecd2aaSbellard memcpy(ptr, buf, l); 4025050a0ddfSAnthony PERARD qemu_put_ram_ptr(ptr); 4026d0ecd2aaSbellard } 4027d0ecd2aaSbellard len -= l; 4028d0ecd2aaSbellard buf += l; 4029d0ecd2aaSbellard addr += l; 4030d0ecd2aaSbellard } 4031d0ecd2aaSbellard } 4032d0ecd2aaSbellard 40336d16c2f8Saliguori typedef struct { 40346d16c2f8Saliguori void *buffer; 4035c227f099SAnthony Liguori target_phys_addr_t addr; 4036c227f099SAnthony Liguori target_phys_addr_t len; 40376d16c2f8Saliguori } BounceBuffer; 40386d16c2f8Saliguori 40396d16c2f8Saliguori static BounceBuffer bounce; 40406d16c2f8Saliguori 4041ba223c29Saliguori typedef struct MapClient { 4042ba223c29Saliguori void *opaque; 4043ba223c29Saliguori void (*callback)(void *opaque); 404472cf2d4fSBlue Swirl QLIST_ENTRY(MapClient) link; 4045ba223c29Saliguori } MapClient; 4046ba223c29Saliguori 404772cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list 404872cf2d4fSBlue Swirl = QLIST_HEAD_INITIALIZER(map_client_list); 4049ba223c29Saliguori 4050ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) 4051ba223c29Saliguori { 40527267c094SAnthony Liguori MapClient *client = g_malloc(sizeof(*client)); 4053ba223c29Saliguori 4054ba223c29Saliguori client->opaque = opaque; 4055ba223c29Saliguori client->callback = callback; 405672cf2d4fSBlue Swirl QLIST_INSERT_HEAD(&map_client_list, client, link); 4057ba223c29Saliguori return client; 4058ba223c29Saliguori } 4059ba223c29Saliguori 4060ba223c29Saliguori void cpu_unregister_map_client(void *_client) 4061ba223c29Saliguori { 4062ba223c29Saliguori MapClient *client = (MapClient *)_client; 4063ba223c29Saliguori 406472cf2d4fSBlue Swirl QLIST_REMOVE(client, link); 40657267c094SAnthony Liguori g_free(client); 4066ba223c29Saliguori } 4067ba223c29Saliguori 4068ba223c29Saliguori static void cpu_notify_map_clients(void) 4069ba223c29Saliguori { 4070ba223c29Saliguori MapClient *client; 4071ba223c29Saliguori 407272cf2d4fSBlue Swirl while (!QLIST_EMPTY(&map_client_list)) { 407372cf2d4fSBlue Swirl client = QLIST_FIRST(&map_client_list); 4074ba223c29Saliguori client->callback(client->opaque); 407534d5e948SIsaku Yamahata cpu_unregister_map_client(client); 4076ba223c29Saliguori } 4077ba223c29Saliguori } 4078ba223c29Saliguori 40796d16c2f8Saliguori /* Map a physical memory region into a host virtual address. 40806d16c2f8Saliguori * May map a subset of the requested range, given by and returned in *plen. 40816d16c2f8Saliguori * May return NULL if resources needed to perform the mapping are exhausted. 40826d16c2f8Saliguori * Use only for reads OR writes - not for read-modify-write operations. 4083ba223c29Saliguori * Use cpu_register_map_client() to know when retrying the map operation is 4084ba223c29Saliguori * likely to succeed. 40856d16c2f8Saliguori */ 4086c227f099SAnthony Liguori void *cpu_physical_memory_map(target_phys_addr_t addr, 4087c227f099SAnthony Liguori target_phys_addr_t *plen, 40886d16c2f8Saliguori int is_write) 40896d16c2f8Saliguori { 4090c227f099SAnthony Liguori target_phys_addr_t len = *plen; 409138bee5dcSStefano Stabellini target_phys_addr_t todo = 0; 40926d16c2f8Saliguori int l; 4093c227f099SAnthony Liguori target_phys_addr_t page; 40946d16c2f8Saliguori unsigned long pd; 40956d16c2f8Saliguori PhysPageDesc *p; 4096f15fbc4bSAnthony PERARD ram_addr_t raddr = RAM_ADDR_MAX; 40978ab934f9SStefano Stabellini ram_addr_t rlen; 40988ab934f9SStefano Stabellini void *ret; 40996d16c2f8Saliguori 41006d16c2f8Saliguori while (len > 0) { 41016d16c2f8Saliguori page = addr & TARGET_PAGE_MASK; 41026d16c2f8Saliguori l = (page + TARGET_PAGE_SIZE) - addr; 41036d16c2f8Saliguori if (l > len) 41046d16c2f8Saliguori l = len; 41056d16c2f8Saliguori p = phys_page_find(page >> TARGET_PAGE_BITS); 41066d16c2f8Saliguori if (!p) { 41076d16c2f8Saliguori pd = IO_MEM_UNASSIGNED; 41086d16c2f8Saliguori } else { 41096d16c2f8Saliguori pd = p->phys_offset; 41106d16c2f8Saliguori } 41116d16c2f8Saliguori 41126d16c2f8Saliguori if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 411338bee5dcSStefano Stabellini if (todo || bounce.buffer) { 41146d16c2f8Saliguori break; 41156d16c2f8Saliguori } 41166d16c2f8Saliguori bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); 41176d16c2f8Saliguori bounce.addr = addr; 41186d16c2f8Saliguori bounce.len = l; 41196d16c2f8Saliguori if (!is_write) { 412054f7b4a3SStefan Weil cpu_physical_memory_read(addr, bounce.buffer, l); 41216d16c2f8Saliguori } 412238bee5dcSStefano Stabellini 412338bee5dcSStefano Stabellini *plen = l; 412438bee5dcSStefano Stabellini return bounce.buffer; 41256d16c2f8Saliguori } 41268ab934f9SStefano Stabellini if (!todo) { 41278ab934f9SStefano Stabellini raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 41288ab934f9SStefano Stabellini } 41296d16c2f8Saliguori 41306d16c2f8Saliguori len -= l; 41316d16c2f8Saliguori addr += l; 413238bee5dcSStefano Stabellini todo += l; 41336d16c2f8Saliguori } 41348ab934f9SStefano Stabellini rlen = todo; 41358ab934f9SStefano Stabellini ret = qemu_ram_ptr_length(raddr, &rlen); 41368ab934f9SStefano Stabellini *plen = rlen; 41378ab934f9SStefano Stabellini return ret; 41386d16c2f8Saliguori } 41396d16c2f8Saliguori 41406d16c2f8Saliguori /* Unmaps a memory region previously mapped by cpu_physical_memory_map(). 41416d16c2f8Saliguori * Will also mark the memory as dirty if is_write == 1. access_len gives 41426d16c2f8Saliguori * the amount of memory that was actually read or written by the caller. 41436d16c2f8Saliguori */ 4144c227f099SAnthony Liguori void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, 4145c227f099SAnthony Liguori int is_write, target_phys_addr_t access_len) 41466d16c2f8Saliguori { 41476d16c2f8Saliguori if (buffer != bounce.buffer) { 41486d16c2f8Saliguori if (is_write) { 4149e890261fSMarcelo Tosatti ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer); 41506d16c2f8Saliguori while (access_len) { 41516d16c2f8Saliguori unsigned l; 41526d16c2f8Saliguori l = TARGET_PAGE_SIZE; 41536d16c2f8Saliguori if (l > access_len) 41546d16c2f8Saliguori l = access_len; 41556d16c2f8Saliguori if (!cpu_physical_memory_is_dirty(addr1)) { 41566d16c2f8Saliguori /* invalidate code */ 41576d16c2f8Saliguori tb_invalidate_phys_page_range(addr1, addr1 + l, 0); 41586d16c2f8Saliguori /* set dirty bit */ 4159f7c11b53SYoshiaki Tamura cpu_physical_memory_set_dirty_flags( 4160f7c11b53SYoshiaki Tamura addr1, (0xff & ~CODE_DIRTY_FLAG)); 41616d16c2f8Saliguori } 41626d16c2f8Saliguori addr1 += l; 41636d16c2f8Saliguori access_len -= l; 41646d16c2f8Saliguori } 41656d16c2f8Saliguori } 4166868bb33fSJan Kiszka if (xen_enabled()) { 4167e41d7c69SJan Kiszka xen_invalidate_map_cache_entry(buffer); 4168050a0ddfSAnthony PERARD } 41696d16c2f8Saliguori return; 41706d16c2f8Saliguori } 41716d16c2f8Saliguori if (is_write) { 41726d16c2f8Saliguori cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len); 41736d16c2f8Saliguori } 4174f8a83245SHerve Poussineau qemu_vfree(bounce.buffer); 41756d16c2f8Saliguori bounce.buffer = NULL; 4176ba223c29Saliguori cpu_notify_map_clients(); 41776d16c2f8Saliguori } 4178d0ecd2aaSbellard 41798df1cd07Sbellard /* warning: addr must be aligned */ 41801e78bcc1SAlexander Graf static inline uint32_t ldl_phys_internal(target_phys_addr_t addr, 41811e78bcc1SAlexander Graf enum device_endian endian) 41828df1cd07Sbellard { 41838df1cd07Sbellard int io_index; 41848df1cd07Sbellard uint8_t *ptr; 41858df1cd07Sbellard uint32_t val; 41868df1cd07Sbellard unsigned long pd; 41878df1cd07Sbellard PhysPageDesc *p; 41888df1cd07Sbellard 41898df1cd07Sbellard p = phys_page_find(addr >> TARGET_PAGE_BITS); 41908df1cd07Sbellard if (!p) { 41918df1cd07Sbellard pd = IO_MEM_UNASSIGNED; 41928df1cd07Sbellard } else { 41938df1cd07Sbellard pd = p->phys_offset; 41948df1cd07Sbellard } 41958df1cd07Sbellard 41962a4188a3Sbellard if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 41972a4188a3Sbellard !(pd & IO_MEM_ROMD)) { 41988df1cd07Sbellard /* I/O case */ 41998df1cd07Sbellard io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 42008da3ff18Spbrook if (p) 42018da3ff18Spbrook addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 42028df1cd07Sbellard val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); 42031e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN) 42041e78bcc1SAlexander Graf if (endian == DEVICE_LITTLE_ENDIAN) { 42051e78bcc1SAlexander Graf val = bswap32(val); 42061e78bcc1SAlexander Graf } 42071e78bcc1SAlexander Graf #else 42081e78bcc1SAlexander Graf if (endian == DEVICE_BIG_ENDIAN) { 42091e78bcc1SAlexander Graf val = bswap32(val); 42101e78bcc1SAlexander Graf } 42111e78bcc1SAlexander Graf #endif 42128df1cd07Sbellard } else { 42138df1cd07Sbellard /* RAM case */ 42145579c7f3Spbrook ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 42158df1cd07Sbellard (addr & ~TARGET_PAGE_MASK); 42161e78bcc1SAlexander Graf switch (endian) { 42171e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 42181e78bcc1SAlexander Graf val = ldl_le_p(ptr); 42191e78bcc1SAlexander Graf break; 42201e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 42211e78bcc1SAlexander Graf val = ldl_be_p(ptr); 42221e78bcc1SAlexander Graf break; 42231e78bcc1SAlexander Graf default: 42248df1cd07Sbellard val = ldl_p(ptr); 42251e78bcc1SAlexander Graf break; 42261e78bcc1SAlexander Graf } 42278df1cd07Sbellard } 42288df1cd07Sbellard return val; 42298df1cd07Sbellard } 42308df1cd07Sbellard 42311e78bcc1SAlexander Graf uint32_t ldl_phys(target_phys_addr_t addr) 42321e78bcc1SAlexander Graf { 42331e78bcc1SAlexander Graf return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN); 42341e78bcc1SAlexander Graf } 42351e78bcc1SAlexander Graf 42361e78bcc1SAlexander Graf uint32_t ldl_le_phys(target_phys_addr_t addr) 42371e78bcc1SAlexander Graf { 42381e78bcc1SAlexander Graf return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN); 42391e78bcc1SAlexander Graf } 42401e78bcc1SAlexander Graf 42411e78bcc1SAlexander Graf uint32_t ldl_be_phys(target_phys_addr_t addr) 42421e78bcc1SAlexander Graf { 42431e78bcc1SAlexander Graf return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN); 42441e78bcc1SAlexander Graf } 42451e78bcc1SAlexander Graf 424684b7b8e7Sbellard /* warning: addr must be aligned */ 42471e78bcc1SAlexander Graf static inline uint64_t ldq_phys_internal(target_phys_addr_t addr, 42481e78bcc1SAlexander Graf enum device_endian endian) 424984b7b8e7Sbellard { 425084b7b8e7Sbellard int io_index; 425184b7b8e7Sbellard uint8_t *ptr; 425284b7b8e7Sbellard uint64_t val; 425384b7b8e7Sbellard unsigned long pd; 425484b7b8e7Sbellard PhysPageDesc *p; 425584b7b8e7Sbellard 425684b7b8e7Sbellard p = phys_page_find(addr >> TARGET_PAGE_BITS); 425784b7b8e7Sbellard if (!p) { 425884b7b8e7Sbellard pd = IO_MEM_UNASSIGNED; 425984b7b8e7Sbellard } else { 426084b7b8e7Sbellard pd = p->phys_offset; 426184b7b8e7Sbellard } 426284b7b8e7Sbellard 42632a4188a3Sbellard if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 42642a4188a3Sbellard !(pd & IO_MEM_ROMD)) { 426584b7b8e7Sbellard /* I/O case */ 426684b7b8e7Sbellard io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 42678da3ff18Spbrook if (p) 42688da3ff18Spbrook addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 42691e78bcc1SAlexander Graf 42701e78bcc1SAlexander Graf /* XXX This is broken when device endian != cpu endian. 42711e78bcc1SAlexander Graf Fix and add "endian" variable check */ 427284b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN 427384b7b8e7Sbellard val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; 427484b7b8e7Sbellard val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4); 427584b7b8e7Sbellard #else 427684b7b8e7Sbellard val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); 427784b7b8e7Sbellard val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32; 427884b7b8e7Sbellard #endif 427984b7b8e7Sbellard } else { 428084b7b8e7Sbellard /* RAM case */ 42815579c7f3Spbrook ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 428284b7b8e7Sbellard (addr & ~TARGET_PAGE_MASK); 42831e78bcc1SAlexander Graf switch (endian) { 42841e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 42851e78bcc1SAlexander Graf val = ldq_le_p(ptr); 42861e78bcc1SAlexander Graf break; 42871e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 42881e78bcc1SAlexander Graf val = ldq_be_p(ptr); 42891e78bcc1SAlexander Graf break; 42901e78bcc1SAlexander Graf default: 429184b7b8e7Sbellard val = ldq_p(ptr); 42921e78bcc1SAlexander Graf break; 42931e78bcc1SAlexander Graf } 429484b7b8e7Sbellard } 429584b7b8e7Sbellard return val; 429684b7b8e7Sbellard } 429784b7b8e7Sbellard 42981e78bcc1SAlexander Graf uint64_t ldq_phys(target_phys_addr_t addr) 42991e78bcc1SAlexander Graf { 43001e78bcc1SAlexander Graf return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN); 43011e78bcc1SAlexander Graf } 43021e78bcc1SAlexander Graf 43031e78bcc1SAlexander Graf uint64_t ldq_le_phys(target_phys_addr_t addr) 43041e78bcc1SAlexander Graf { 43051e78bcc1SAlexander Graf return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN); 43061e78bcc1SAlexander Graf } 43071e78bcc1SAlexander Graf 43081e78bcc1SAlexander Graf uint64_t ldq_be_phys(target_phys_addr_t addr) 43091e78bcc1SAlexander Graf { 43101e78bcc1SAlexander Graf return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN); 43111e78bcc1SAlexander Graf } 43121e78bcc1SAlexander Graf 4313aab33094Sbellard /* XXX: optimize */ 4314c227f099SAnthony Liguori uint32_t ldub_phys(target_phys_addr_t addr) 4315aab33094Sbellard { 4316aab33094Sbellard uint8_t val; 4317aab33094Sbellard cpu_physical_memory_read(addr, &val, 1); 4318aab33094Sbellard return val; 4319aab33094Sbellard } 4320aab33094Sbellard 4321733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */ 43221e78bcc1SAlexander Graf static inline uint32_t lduw_phys_internal(target_phys_addr_t addr, 43231e78bcc1SAlexander Graf enum device_endian endian) 4324aab33094Sbellard { 4325733f0b02SMichael S. Tsirkin int io_index; 4326733f0b02SMichael S. Tsirkin uint8_t *ptr; 4327733f0b02SMichael S. Tsirkin uint64_t val; 4328733f0b02SMichael S. Tsirkin unsigned long pd; 4329733f0b02SMichael S. Tsirkin PhysPageDesc *p; 4330733f0b02SMichael S. Tsirkin 4331733f0b02SMichael S. Tsirkin p = phys_page_find(addr >> TARGET_PAGE_BITS); 4332733f0b02SMichael S. Tsirkin if (!p) { 4333733f0b02SMichael S. Tsirkin pd = IO_MEM_UNASSIGNED; 4334733f0b02SMichael S. Tsirkin } else { 4335733f0b02SMichael S. Tsirkin pd = p->phys_offset; 4336733f0b02SMichael S. Tsirkin } 4337733f0b02SMichael S. Tsirkin 4338733f0b02SMichael S. Tsirkin if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 4339733f0b02SMichael S. Tsirkin !(pd & IO_MEM_ROMD)) { 4340733f0b02SMichael S. Tsirkin /* I/O case */ 4341733f0b02SMichael S. Tsirkin io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 4342733f0b02SMichael S. Tsirkin if (p) 4343733f0b02SMichael S. Tsirkin addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 4344733f0b02SMichael S. Tsirkin val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); 43451e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN) 43461e78bcc1SAlexander Graf if (endian == DEVICE_LITTLE_ENDIAN) { 43471e78bcc1SAlexander Graf val = bswap16(val); 43481e78bcc1SAlexander Graf } 43491e78bcc1SAlexander Graf #else 43501e78bcc1SAlexander Graf if (endian == DEVICE_BIG_ENDIAN) { 43511e78bcc1SAlexander Graf val = bswap16(val); 43521e78bcc1SAlexander Graf } 43531e78bcc1SAlexander Graf #endif 4354733f0b02SMichael S. Tsirkin } else { 4355733f0b02SMichael S. Tsirkin /* RAM case */ 4356733f0b02SMichael S. Tsirkin ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 4357733f0b02SMichael S. Tsirkin (addr & ~TARGET_PAGE_MASK); 43581e78bcc1SAlexander Graf switch (endian) { 43591e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 43601e78bcc1SAlexander Graf val = lduw_le_p(ptr); 43611e78bcc1SAlexander Graf break; 43621e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 43631e78bcc1SAlexander Graf val = lduw_be_p(ptr); 43641e78bcc1SAlexander Graf break; 43651e78bcc1SAlexander Graf default: 4366733f0b02SMichael S. Tsirkin val = lduw_p(ptr); 43671e78bcc1SAlexander Graf break; 43681e78bcc1SAlexander Graf } 4369733f0b02SMichael S. Tsirkin } 4370733f0b02SMichael S. Tsirkin return val; 4371aab33094Sbellard } 4372aab33094Sbellard 43731e78bcc1SAlexander Graf uint32_t lduw_phys(target_phys_addr_t addr) 43741e78bcc1SAlexander Graf { 43751e78bcc1SAlexander Graf return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN); 43761e78bcc1SAlexander Graf } 43771e78bcc1SAlexander Graf 43781e78bcc1SAlexander Graf uint32_t lduw_le_phys(target_phys_addr_t addr) 43791e78bcc1SAlexander Graf { 43801e78bcc1SAlexander Graf return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN); 43811e78bcc1SAlexander Graf } 43821e78bcc1SAlexander Graf 43831e78bcc1SAlexander Graf uint32_t lduw_be_phys(target_phys_addr_t addr) 43841e78bcc1SAlexander Graf { 43851e78bcc1SAlexander Graf return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN); 43861e78bcc1SAlexander Graf } 43871e78bcc1SAlexander Graf 43888df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty 43898df1cd07Sbellard and the code inside is not invalidated. It is useful if the dirty 43908df1cd07Sbellard bits are used to track modified PTEs */ 4391c227f099SAnthony Liguori void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) 43928df1cd07Sbellard { 43938df1cd07Sbellard int io_index; 43948df1cd07Sbellard uint8_t *ptr; 43958df1cd07Sbellard unsigned long pd; 43968df1cd07Sbellard PhysPageDesc *p; 43978df1cd07Sbellard 43988df1cd07Sbellard p = phys_page_find(addr >> TARGET_PAGE_BITS); 43998df1cd07Sbellard if (!p) { 44008df1cd07Sbellard pd = IO_MEM_UNASSIGNED; 44018df1cd07Sbellard } else { 44028df1cd07Sbellard pd = p->phys_offset; 44038df1cd07Sbellard } 44048df1cd07Sbellard 44053a7d929eSbellard if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 44068df1cd07Sbellard io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 44078da3ff18Spbrook if (p) 44088da3ff18Spbrook addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 44098df1cd07Sbellard io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 44108df1cd07Sbellard } else { 441174576198Saliguori unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 44125579c7f3Spbrook ptr = qemu_get_ram_ptr(addr1); 44138df1cd07Sbellard stl_p(ptr, val); 441474576198Saliguori 441574576198Saliguori if (unlikely(in_migration)) { 441674576198Saliguori if (!cpu_physical_memory_is_dirty(addr1)) { 441774576198Saliguori /* invalidate code */ 441874576198Saliguori tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); 441974576198Saliguori /* set dirty bit */ 4420f7c11b53SYoshiaki Tamura cpu_physical_memory_set_dirty_flags( 4421f7c11b53SYoshiaki Tamura addr1, (0xff & ~CODE_DIRTY_FLAG)); 442274576198Saliguori } 442374576198Saliguori } 44248df1cd07Sbellard } 44258df1cd07Sbellard } 44268df1cd07Sbellard 4427c227f099SAnthony Liguori void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val) 4428bc98a7efSj_mayer { 4429bc98a7efSj_mayer int io_index; 4430bc98a7efSj_mayer uint8_t *ptr; 4431bc98a7efSj_mayer unsigned long pd; 4432bc98a7efSj_mayer PhysPageDesc *p; 4433bc98a7efSj_mayer 4434bc98a7efSj_mayer p = phys_page_find(addr >> TARGET_PAGE_BITS); 4435bc98a7efSj_mayer if (!p) { 4436bc98a7efSj_mayer pd = IO_MEM_UNASSIGNED; 4437bc98a7efSj_mayer } else { 4438bc98a7efSj_mayer pd = p->phys_offset; 4439bc98a7efSj_mayer } 4440bc98a7efSj_mayer 4441bc98a7efSj_mayer if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 4442bc98a7efSj_mayer io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 44438da3ff18Spbrook if (p) 44448da3ff18Spbrook addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 4445bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN 4446bc98a7efSj_mayer io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32); 4447bc98a7efSj_mayer io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val); 4448bc98a7efSj_mayer #else 4449bc98a7efSj_mayer io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 4450bc98a7efSj_mayer io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32); 4451bc98a7efSj_mayer #endif 4452bc98a7efSj_mayer } else { 44535579c7f3Spbrook ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 4454bc98a7efSj_mayer (addr & ~TARGET_PAGE_MASK); 4455bc98a7efSj_mayer stq_p(ptr, val); 4456bc98a7efSj_mayer } 4457bc98a7efSj_mayer } 4458bc98a7efSj_mayer 44598df1cd07Sbellard /* warning: addr must be aligned */ 44601e78bcc1SAlexander Graf static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val, 44611e78bcc1SAlexander Graf enum device_endian endian) 44628df1cd07Sbellard { 44638df1cd07Sbellard int io_index; 44648df1cd07Sbellard uint8_t *ptr; 44658df1cd07Sbellard unsigned long pd; 44668df1cd07Sbellard PhysPageDesc *p; 44678df1cd07Sbellard 44688df1cd07Sbellard p = phys_page_find(addr >> TARGET_PAGE_BITS); 44698df1cd07Sbellard if (!p) { 44708df1cd07Sbellard pd = IO_MEM_UNASSIGNED; 44718df1cd07Sbellard } else { 44728df1cd07Sbellard pd = p->phys_offset; 44738df1cd07Sbellard } 44748df1cd07Sbellard 44753a7d929eSbellard if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 44768df1cd07Sbellard io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 44778da3ff18Spbrook if (p) 44788da3ff18Spbrook addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 44791e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN) 44801e78bcc1SAlexander Graf if (endian == DEVICE_LITTLE_ENDIAN) { 44811e78bcc1SAlexander Graf val = bswap32(val); 44821e78bcc1SAlexander Graf } 44831e78bcc1SAlexander Graf #else 44841e78bcc1SAlexander Graf if (endian == DEVICE_BIG_ENDIAN) { 44851e78bcc1SAlexander Graf val = bswap32(val); 44861e78bcc1SAlexander Graf } 44871e78bcc1SAlexander Graf #endif 44888df1cd07Sbellard io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); 44898df1cd07Sbellard } else { 44908df1cd07Sbellard unsigned long addr1; 44918df1cd07Sbellard addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 44928df1cd07Sbellard /* RAM case */ 44935579c7f3Spbrook ptr = qemu_get_ram_ptr(addr1); 44941e78bcc1SAlexander Graf switch (endian) { 44951e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 44961e78bcc1SAlexander Graf stl_le_p(ptr, val); 44971e78bcc1SAlexander Graf break; 44981e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 44991e78bcc1SAlexander Graf stl_be_p(ptr, val); 45001e78bcc1SAlexander Graf break; 45011e78bcc1SAlexander Graf default: 45028df1cd07Sbellard stl_p(ptr, val); 45031e78bcc1SAlexander Graf break; 45041e78bcc1SAlexander Graf } 45053a7d929eSbellard if (!cpu_physical_memory_is_dirty(addr1)) { 45068df1cd07Sbellard /* invalidate code */ 45078df1cd07Sbellard tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); 45088df1cd07Sbellard /* set dirty bit */ 4509f7c11b53SYoshiaki Tamura cpu_physical_memory_set_dirty_flags(addr1, 4510f7c11b53SYoshiaki Tamura (0xff & ~CODE_DIRTY_FLAG)); 45118df1cd07Sbellard } 45128df1cd07Sbellard } 45133a7d929eSbellard } 45148df1cd07Sbellard 45151e78bcc1SAlexander Graf void stl_phys(target_phys_addr_t addr, uint32_t val) 45161e78bcc1SAlexander Graf { 45171e78bcc1SAlexander Graf stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN); 45181e78bcc1SAlexander Graf } 45191e78bcc1SAlexander Graf 45201e78bcc1SAlexander Graf void stl_le_phys(target_phys_addr_t addr, uint32_t val) 45211e78bcc1SAlexander Graf { 45221e78bcc1SAlexander Graf stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN); 45231e78bcc1SAlexander Graf } 45241e78bcc1SAlexander Graf 45251e78bcc1SAlexander Graf void stl_be_phys(target_phys_addr_t addr, uint32_t val) 45261e78bcc1SAlexander Graf { 45271e78bcc1SAlexander Graf stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN); 45281e78bcc1SAlexander Graf } 45291e78bcc1SAlexander Graf 4530aab33094Sbellard /* XXX: optimize */ 4531c227f099SAnthony Liguori void stb_phys(target_phys_addr_t addr, uint32_t val) 4532aab33094Sbellard { 4533aab33094Sbellard uint8_t v = val; 4534aab33094Sbellard cpu_physical_memory_write(addr, &v, 1); 4535aab33094Sbellard } 4536aab33094Sbellard 4537733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */ 45381e78bcc1SAlexander Graf static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val, 45391e78bcc1SAlexander Graf enum device_endian endian) 4540aab33094Sbellard { 4541733f0b02SMichael S. Tsirkin int io_index; 4542733f0b02SMichael S. Tsirkin uint8_t *ptr; 4543733f0b02SMichael S. Tsirkin unsigned long pd; 4544733f0b02SMichael S. Tsirkin PhysPageDesc *p; 4545733f0b02SMichael S. Tsirkin 4546733f0b02SMichael S. Tsirkin p = phys_page_find(addr >> TARGET_PAGE_BITS); 4547733f0b02SMichael S. Tsirkin if (!p) { 4548733f0b02SMichael S. Tsirkin pd = IO_MEM_UNASSIGNED; 4549733f0b02SMichael S. Tsirkin } else { 4550733f0b02SMichael S. Tsirkin pd = p->phys_offset; 4551733f0b02SMichael S. Tsirkin } 4552733f0b02SMichael S. Tsirkin 4553733f0b02SMichael S. Tsirkin if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 4554733f0b02SMichael S. Tsirkin io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 4555733f0b02SMichael S. Tsirkin if (p) 4556733f0b02SMichael S. Tsirkin addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 45571e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN) 45581e78bcc1SAlexander Graf if (endian == DEVICE_LITTLE_ENDIAN) { 45591e78bcc1SAlexander Graf val = bswap16(val); 45601e78bcc1SAlexander Graf } 45611e78bcc1SAlexander Graf #else 45621e78bcc1SAlexander Graf if (endian == DEVICE_BIG_ENDIAN) { 45631e78bcc1SAlexander Graf val = bswap16(val); 45641e78bcc1SAlexander Graf } 45651e78bcc1SAlexander Graf #endif 4566733f0b02SMichael S. Tsirkin io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); 4567733f0b02SMichael S. Tsirkin } else { 4568733f0b02SMichael S. Tsirkin unsigned long addr1; 4569733f0b02SMichael S. Tsirkin addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 4570733f0b02SMichael S. Tsirkin /* RAM case */ 4571733f0b02SMichael S. Tsirkin ptr = qemu_get_ram_ptr(addr1); 45721e78bcc1SAlexander Graf switch (endian) { 45731e78bcc1SAlexander Graf case DEVICE_LITTLE_ENDIAN: 45741e78bcc1SAlexander Graf stw_le_p(ptr, val); 45751e78bcc1SAlexander Graf break; 45761e78bcc1SAlexander Graf case DEVICE_BIG_ENDIAN: 45771e78bcc1SAlexander Graf stw_be_p(ptr, val); 45781e78bcc1SAlexander Graf break; 45791e78bcc1SAlexander Graf default: 4580733f0b02SMichael S. Tsirkin stw_p(ptr, val); 45811e78bcc1SAlexander Graf break; 45821e78bcc1SAlexander Graf } 4583733f0b02SMichael S. Tsirkin if (!cpu_physical_memory_is_dirty(addr1)) { 4584733f0b02SMichael S. Tsirkin /* invalidate code */ 4585733f0b02SMichael S. Tsirkin tb_invalidate_phys_page_range(addr1, addr1 + 2, 0); 4586733f0b02SMichael S. Tsirkin /* set dirty bit */ 4587733f0b02SMichael S. Tsirkin cpu_physical_memory_set_dirty_flags(addr1, 4588733f0b02SMichael S. Tsirkin (0xff & ~CODE_DIRTY_FLAG)); 4589733f0b02SMichael S. Tsirkin } 4590733f0b02SMichael S. Tsirkin } 4591aab33094Sbellard } 4592aab33094Sbellard 45931e78bcc1SAlexander Graf void stw_phys(target_phys_addr_t addr, uint32_t val) 45941e78bcc1SAlexander Graf { 45951e78bcc1SAlexander Graf stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN); 45961e78bcc1SAlexander Graf } 45971e78bcc1SAlexander Graf 45981e78bcc1SAlexander Graf void stw_le_phys(target_phys_addr_t addr, uint32_t val) 45991e78bcc1SAlexander Graf { 46001e78bcc1SAlexander Graf stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN); 46011e78bcc1SAlexander Graf } 46021e78bcc1SAlexander Graf 46031e78bcc1SAlexander Graf void stw_be_phys(target_phys_addr_t addr, uint32_t val) 46041e78bcc1SAlexander Graf { 46051e78bcc1SAlexander Graf stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN); 46061e78bcc1SAlexander Graf } 46071e78bcc1SAlexander Graf 4608aab33094Sbellard /* XXX: optimize */ 4609c227f099SAnthony Liguori void stq_phys(target_phys_addr_t addr, uint64_t val) 4610aab33094Sbellard { 4611aab33094Sbellard val = tswap64(val); 461271d2b725SStefan Weil cpu_physical_memory_write(addr, &val, 8); 4613aab33094Sbellard } 4614aab33094Sbellard 46151e78bcc1SAlexander Graf void stq_le_phys(target_phys_addr_t addr, uint64_t val) 46161e78bcc1SAlexander Graf { 46171e78bcc1SAlexander Graf val = cpu_to_le64(val); 46181e78bcc1SAlexander Graf cpu_physical_memory_write(addr, &val, 8); 46191e78bcc1SAlexander Graf } 46201e78bcc1SAlexander Graf 46211e78bcc1SAlexander Graf void stq_be_phys(target_phys_addr_t addr, uint64_t val) 46221e78bcc1SAlexander Graf { 46231e78bcc1SAlexander Graf val = cpu_to_be64(val); 46241e78bcc1SAlexander Graf cpu_physical_memory_write(addr, &val, 8); 46251e78bcc1SAlexander Graf } 46261e78bcc1SAlexander Graf 46275e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */ 4628b448f2f3Sbellard int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 4629b448f2f3Sbellard uint8_t *buf, int len, int is_write) 463013eb76e0Sbellard { 463113eb76e0Sbellard int l; 4632c227f099SAnthony Liguori target_phys_addr_t phys_addr; 46339b3c35e0Sj_mayer target_ulong page; 463413eb76e0Sbellard 463513eb76e0Sbellard while (len > 0) { 463613eb76e0Sbellard page = addr & TARGET_PAGE_MASK; 463713eb76e0Sbellard phys_addr = cpu_get_phys_page_debug(env, page); 463813eb76e0Sbellard /* if no physical page mapped, return an error */ 463913eb76e0Sbellard if (phys_addr == -1) 464013eb76e0Sbellard return -1; 464113eb76e0Sbellard l = (page + TARGET_PAGE_SIZE) - addr; 464213eb76e0Sbellard if (l > len) 464313eb76e0Sbellard l = len; 46445e2972fdSaliguori phys_addr += (addr & ~TARGET_PAGE_MASK); 46455e2972fdSaliguori if (is_write) 46465e2972fdSaliguori cpu_physical_memory_write_rom(phys_addr, buf, l); 46475e2972fdSaliguori else 46485e2972fdSaliguori cpu_physical_memory_rw(phys_addr, buf, l, is_write); 464913eb76e0Sbellard len -= l; 465013eb76e0Sbellard buf += l; 465113eb76e0Sbellard addr += l; 465213eb76e0Sbellard } 465313eb76e0Sbellard return 0; 465413eb76e0Sbellard } 4655a68fe89cSPaul Brook #endif 465613eb76e0Sbellard 46572e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os 46582e70f6efSpbrook must be at the end of the TB */ 46592e70f6efSpbrook void cpu_io_recompile(CPUState *env, void *retaddr) 46602e70f6efSpbrook { 46612e70f6efSpbrook TranslationBlock *tb; 46622e70f6efSpbrook uint32_t n, cflags; 46632e70f6efSpbrook target_ulong pc, cs_base; 46642e70f6efSpbrook uint64_t flags; 46652e70f6efSpbrook 46662e70f6efSpbrook tb = tb_find_pc((unsigned long)retaddr); 46672e70f6efSpbrook if (!tb) { 46682e70f6efSpbrook cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 46692e70f6efSpbrook retaddr); 46702e70f6efSpbrook } 46712e70f6efSpbrook n = env->icount_decr.u16.low + tb->icount; 4672618ba8e6SStefan Weil cpu_restore_state(tb, env, (unsigned long)retaddr); 46732e70f6efSpbrook /* Calculate how many instructions had been executed before the fault 4674bf20dc07Sths occurred. */ 46752e70f6efSpbrook n = n - env->icount_decr.u16.low; 46762e70f6efSpbrook /* Generate a new TB ending on the I/O insn. */ 46772e70f6efSpbrook n++; 46782e70f6efSpbrook /* On MIPS and SH, delay slot instructions can only be restarted if 46792e70f6efSpbrook they were already the first instruction in the TB. If this is not 4680bf20dc07Sths the first instruction in a TB then re-execute the preceding 46812e70f6efSpbrook branch. */ 46822e70f6efSpbrook #if defined(TARGET_MIPS) 46832e70f6efSpbrook if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { 46842e70f6efSpbrook env->active_tc.PC -= 4; 46852e70f6efSpbrook env->icount_decr.u16.low++; 46862e70f6efSpbrook env->hflags &= ~MIPS_HFLAG_BMASK; 46872e70f6efSpbrook } 46882e70f6efSpbrook #elif defined(TARGET_SH4) 46892e70f6efSpbrook if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 46902e70f6efSpbrook && n > 1) { 46912e70f6efSpbrook env->pc -= 2; 46922e70f6efSpbrook env->icount_decr.u16.low++; 46932e70f6efSpbrook env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); 46942e70f6efSpbrook } 46952e70f6efSpbrook #endif 46962e70f6efSpbrook /* This should never happen. */ 46972e70f6efSpbrook if (n > CF_COUNT_MASK) 46982e70f6efSpbrook cpu_abort(env, "TB too big during recompile"); 46992e70f6efSpbrook 47002e70f6efSpbrook cflags = n | CF_LAST_IO; 47012e70f6efSpbrook pc = tb->pc; 47022e70f6efSpbrook cs_base = tb->cs_base; 47032e70f6efSpbrook flags = tb->flags; 47042e70f6efSpbrook tb_phys_invalidate(tb, -1); 47052e70f6efSpbrook /* FIXME: In theory this could raise an exception. In practice 47062e70f6efSpbrook we have already translated the block once so it's probably ok. */ 47072e70f6efSpbrook tb_gen_code(env, pc, cs_base, flags, cflags); 4708bf20dc07Sths /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not 47092e70f6efSpbrook the first in the TB) then we end up generating a whole new TB and 47102e70f6efSpbrook repeating the fault, which is horribly inefficient. 47112e70f6efSpbrook Better would be to execute just this insn uncached, or generate a 47122e70f6efSpbrook second new TB. */ 47132e70f6efSpbrook cpu_resume_from_signal(env, NULL); 47142e70f6efSpbrook } 47152e70f6efSpbrook 4716b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY) 4717b3755a91SPaul Brook 4718055403b2SStefan Weil void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) 4719e3db7226Sbellard { 4720e3db7226Sbellard int i, target_code_size, max_target_code_size; 4721e3db7226Sbellard int direct_jmp_count, direct_jmp2_count, cross_page; 4722e3db7226Sbellard TranslationBlock *tb; 4723e3db7226Sbellard 4724e3db7226Sbellard target_code_size = 0; 4725e3db7226Sbellard max_target_code_size = 0; 4726e3db7226Sbellard cross_page = 0; 4727e3db7226Sbellard direct_jmp_count = 0; 4728e3db7226Sbellard direct_jmp2_count = 0; 4729e3db7226Sbellard for(i = 0; i < nb_tbs; i++) { 4730e3db7226Sbellard tb = &tbs[i]; 4731e3db7226Sbellard target_code_size += tb->size; 4732e3db7226Sbellard if (tb->size > max_target_code_size) 4733e3db7226Sbellard max_target_code_size = tb->size; 4734e3db7226Sbellard if (tb->page_addr[1] != -1) 4735e3db7226Sbellard cross_page++; 4736e3db7226Sbellard if (tb->tb_next_offset[0] != 0xffff) { 4737e3db7226Sbellard direct_jmp_count++; 4738e3db7226Sbellard if (tb->tb_next_offset[1] != 0xffff) { 4739e3db7226Sbellard direct_jmp2_count++; 4740e3db7226Sbellard } 4741e3db7226Sbellard } 4742e3db7226Sbellard } 4743e3db7226Sbellard /* XXX: avoid using doubles ? */ 474457fec1feSbellard cpu_fprintf(f, "Translation buffer state:\n"); 4745055403b2SStefan Weil cpu_fprintf(f, "gen code size %td/%ld\n", 474626a5f13bSbellard code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size); 474726a5f13bSbellard cpu_fprintf(f, "TB count %d/%d\n", 474826a5f13bSbellard nb_tbs, code_gen_max_blocks); 4749e3db7226Sbellard cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", 4750e3db7226Sbellard nb_tbs ? target_code_size / nb_tbs : 0, 4751e3db7226Sbellard max_target_code_size); 4752055403b2SStefan Weil cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n", 4753e3db7226Sbellard nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0, 4754e3db7226Sbellard target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0); 4755e3db7226Sbellard cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 4756e3db7226Sbellard cross_page, 4757e3db7226Sbellard nb_tbs ? (cross_page * 100) / nb_tbs : 0); 4758e3db7226Sbellard cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", 4759e3db7226Sbellard direct_jmp_count, 4760e3db7226Sbellard nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0, 4761e3db7226Sbellard direct_jmp2_count, 4762e3db7226Sbellard nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0); 476357fec1feSbellard cpu_fprintf(f, "\nStatistics:\n"); 4764e3db7226Sbellard cpu_fprintf(f, "TB flush count %d\n", tb_flush_count); 4765e3db7226Sbellard cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count); 4766e3db7226Sbellard cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); 4767b67d9a52Sbellard tcg_dump_info(f, cpu_fprintf); 4768e3db7226Sbellard } 4769e3db7226Sbellard 477061382a50Sbellard #define MMUSUFFIX _cmmu 47713917149dSBlue Swirl #undef GETPC 477261382a50Sbellard #define GETPC() NULL 477361382a50Sbellard #define env cpu_single_env 4774b769d8feSbellard #define SOFTMMU_CODE_ACCESS 477561382a50Sbellard 477661382a50Sbellard #define SHIFT 0 477761382a50Sbellard #include "softmmu_template.h" 477861382a50Sbellard 477961382a50Sbellard #define SHIFT 1 478061382a50Sbellard #include "softmmu_template.h" 478161382a50Sbellard 478261382a50Sbellard #define SHIFT 2 478361382a50Sbellard #include "softmmu_template.h" 478461382a50Sbellard 478561382a50Sbellard #define SHIFT 3 478661382a50Sbellard #include "softmmu_template.h" 478761382a50Sbellard 478861382a50Sbellard #undef env 478961382a50Sbellard 479061382a50Sbellard #endif 4791