xref: /qemu/system/physmem.c (revision ad0b5321f1f797274603ebbe20108b0750baee94)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard 
27055403b2SStefan Weil #include "qemu-common.h"
286180a181Sbellard #include "cpu.h"
29b67d9a52Sbellard #include "tcg.h"
30b3c7724cSpbrook #include "hw/hw.h"
31cc9e98cbSAlex Williamson #include "hw/qdev.h"
3274576198Saliguori #include "osdep.h"
337ba1e619Saliguori #include "kvm.h"
34432d268cSJun Nakajima #include "hw/xen.h"
3529e922b6SBlue Swirl #include "qemu-timer.h"
3662152b8aSAvi Kivity #include "memory.h"
3762152b8aSAvi Kivity #include "exec-memory.h"
3853a5960aSpbrook #if defined(CONFIG_USER_ONLY)
3953a5960aSpbrook #include <qemu.h>
40f01576f1SJuergen Lock #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41f01576f1SJuergen Lock #include <sys/param.h>
42f01576f1SJuergen Lock #if __FreeBSD_version >= 700104
43f01576f1SJuergen Lock #define HAVE_KINFO_GETVMMAP
44f01576f1SJuergen Lock #define sigqueue sigqueue_freebsd  /* avoid redefinition */
45f01576f1SJuergen Lock #include <sys/time.h>
46f01576f1SJuergen Lock #include <sys/proc.h>
47f01576f1SJuergen Lock #include <machine/profile.h>
48f01576f1SJuergen Lock #define _KERNEL
49f01576f1SJuergen Lock #include <sys/user.h>
50f01576f1SJuergen Lock #undef _KERNEL
51f01576f1SJuergen Lock #undef sigqueue
52f01576f1SJuergen Lock #include <libutil.h>
53f01576f1SJuergen Lock #endif
54f01576f1SJuergen Lock #endif
55432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
56432d268cSJun Nakajima #include "xen-mapcache.h"
576506e4f9SStefano Stabellini #include "trace.h"
5853a5960aSpbrook #endif
5954936004Sbellard 
600cac1b66SBlue Swirl #include "cputlb.h"
610cac1b66SBlue Swirl 
627762c2c1SAvi Kivity #include "memory-internal.h"
6367d95c15SAvi Kivity 
64fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
6566e85a21Sbellard //#define DEBUG_FLUSH
6667d3b957Spbrook //#define DEBUG_UNASSIGNED
67fd6ce8f6Sbellard 
68fd6ce8f6Sbellard /* make various TB consistency checks */
69fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
70fd6ce8f6Sbellard 
711196be37Sths //#define DEBUG_IOPORT
72db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
731196be37Sths 
7499773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
7599773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
7699773bd4Spbrook #undef DEBUG_TB_CHECK
7799773bd4Spbrook #endif
7899773bd4Spbrook 
799fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
809fa3e853Sbellard 
81bdaf78e0Sblueswir1 static TranslationBlock *tbs;
8224ab68acSStefan Weil static int code_gen_max_blocks;
839fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84bdaf78e0Sblueswir1 static int nb_tbs;
85eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
86c227f099SAnthony Liguori spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87fd6ce8f6Sbellard 
884438c8a9SRichard Henderson uint8_t *code_gen_prologue;
89bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer;
90f1bc0bccSRichard Henderson static size_t code_gen_buffer_size;
9126a5f13bSbellard /* threshold to flush the translated code buffer */
92f1bc0bccSRichard Henderson static size_t code_gen_buffer_max_size;
9324ab68acSStefan Weil static uint8_t *code_gen_ptr;
94fd6ce8f6Sbellard 
95e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
969fa3e853Sbellard int phys_ram_fd;
9774576198Saliguori static int in_migration;
9894a6b54fSpbrook 
9985d59fefSPaolo Bonzini RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
10062152b8aSAvi Kivity 
10162152b8aSAvi Kivity static MemoryRegion *system_memory;
102309cb471SAvi Kivity static MemoryRegion *system_io;
10362152b8aSAvi Kivity 
104f6790af6SAvi Kivity AddressSpace address_space_io;
105f6790af6SAvi Kivity AddressSpace address_space_memory;
1062673a5daSAvi Kivity 
1070e0df1e2SAvi Kivity MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
108de712f94SAvi Kivity static MemoryRegion io_mem_subpage_ram;
1090e0df1e2SAvi Kivity 
110e2eef170Spbrook #endif
1119fa3e853Sbellard 
1129349b4f9SAndreas Färber CPUArchState *first_cpu;
1136a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1146a00d601Sbellard    cpu_exec() */
1159349b4f9SAndreas Färber DEFINE_TLS(CPUArchState *,cpu_single_env);
1162e70f6efSpbrook /* 0 = Do not count executed instructions.
117bf20dc07Sths    1 = Precise instruction counting.
1182e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1192e70f6efSpbrook int use_icount = 0;
1206a00d601Sbellard 
12154936004Sbellard typedef struct PageDesc {
12292e873b9Sbellard     /* list of TBs intersecting this ram page */
123fd6ce8f6Sbellard     TranslationBlock *first_tb;
1249fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1259fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1269fa3e853Sbellard     unsigned int code_write_count;
1279fa3e853Sbellard     uint8_t *code_bitmap;
1289fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1299fa3e853Sbellard     unsigned long flags;
1309fa3e853Sbellard #endif
13154936004Sbellard } PageDesc;
13254936004Sbellard 
13341c1b1c9SPaul Brook /* In system mode we want L1_MAP to be based on ram offsets,
1345cd2c5b6SRichard Henderson    while in user mode we want it to be based on virtual addresses.  */
1355cd2c5b6SRichard Henderson #if !defined(CONFIG_USER_ONLY)
13641c1b1c9SPaul Brook #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
13741c1b1c9SPaul Brook # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
13841c1b1c9SPaul Brook #else
1395cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
14041c1b1c9SPaul Brook #endif
141bedb69eaSj_mayer #else
1425cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
143bedb69eaSj_mayer #endif
14454936004Sbellard 
1455cd2c5b6SRichard Henderson /* Size of the L2 (and L3, etc) page tables.  */
1465cd2c5b6SRichard Henderson #define L2_BITS 10
14754936004Sbellard #define L2_SIZE (1 << L2_BITS)
14854936004Sbellard 
1493eef53dfSAvi Kivity #define P_L2_LEVELS \
1503eef53dfSAvi Kivity     (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
1513eef53dfSAvi Kivity 
1525cd2c5b6SRichard Henderson /* The bits remaining after N lower levels of page tables.  */
1535cd2c5b6SRichard Henderson #define V_L1_BITS_REM \
1545cd2c5b6SRichard Henderson     ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1555cd2c5b6SRichard Henderson 
1565cd2c5b6SRichard Henderson #if V_L1_BITS_REM < 4
1575cd2c5b6SRichard Henderson #define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
1585cd2c5b6SRichard Henderson #else
1595cd2c5b6SRichard Henderson #define V_L1_BITS  V_L1_BITS_REM
1605cd2c5b6SRichard Henderson #endif
1615cd2c5b6SRichard Henderson 
1625cd2c5b6SRichard Henderson #define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
1635cd2c5b6SRichard Henderson 
1645cd2c5b6SRichard Henderson #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
1655cd2c5b6SRichard Henderson 
166c6d50674SStefan Weil uintptr_t qemu_real_host_page_size;
167c6d50674SStefan Weil uintptr_t qemu_host_page_size;
168c6d50674SStefan Weil uintptr_t qemu_host_page_mask;
16954936004Sbellard 
1705cd2c5b6SRichard Henderson /* This is a multi-level map on the virtual address space.
1715cd2c5b6SRichard Henderson    The bottom level has pointers to PageDesc.  */
1725cd2c5b6SRichard Henderson static void *l1_map[V_L1_SIZE];
17354936004Sbellard 
174e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1754346ae3eSAvi Kivity 
1765312bd8bSAvi Kivity static MemoryRegionSection *phys_sections;
1775312bd8bSAvi Kivity static unsigned phys_sections_nb, phys_sections_nb_alloc;
1785312bd8bSAvi Kivity static uint16_t phys_section_unassigned;
179aa102231SAvi Kivity static uint16_t phys_section_notdirty;
180aa102231SAvi Kivity static uint16_t phys_section_rom;
181aa102231SAvi Kivity static uint16_t phys_section_watch;
1825312bd8bSAvi Kivity 
183d6f2ea22SAvi Kivity /* Simple allocator for PhysPageEntry nodes */
184d6f2ea22SAvi Kivity static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
185d6f2ea22SAvi Kivity static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
186d6f2ea22SAvi Kivity 
18707f07b31SAvi Kivity #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
188d6f2ea22SAvi Kivity 
189e2eef170Spbrook static void io_mem_init(void);
19062152b8aSAvi Kivity static void memory_map_init(void);
191e2eef170Spbrook 
1921ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
1936658ffb8Spbrook #endif
19433417e70Sbellard 
195e3db7226Sbellard /* statistics */
196e3db7226Sbellard static int tb_flush_count;
197e3db7226Sbellard static int tb_phys_invalidate_count;
198e3db7226Sbellard 
1997cb69caeSbellard #ifdef _WIN32
2004438c8a9SRichard Henderson static inline void map_exec(void *addr, long size)
2017cb69caeSbellard {
2027cb69caeSbellard     DWORD old_protect;
2037cb69caeSbellard     VirtualProtect(addr, size,
2047cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
2057cb69caeSbellard 
2067cb69caeSbellard }
2077cb69caeSbellard #else
2084438c8a9SRichard Henderson static inline void map_exec(void *addr, long size)
2097cb69caeSbellard {
2104369415fSbellard     unsigned long start, end, page_size;
2117cb69caeSbellard 
2124369415fSbellard     page_size = getpagesize();
2137cb69caeSbellard     start = (unsigned long)addr;
2144369415fSbellard     start &= ~(page_size - 1);
2157cb69caeSbellard 
2167cb69caeSbellard     end = (unsigned long)addr + size;
2174369415fSbellard     end += page_size - 1;
2184369415fSbellard     end &= ~(page_size - 1);
2197cb69caeSbellard 
2207cb69caeSbellard     mprotect((void *)start, end - start,
2217cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2227cb69caeSbellard }
2237cb69caeSbellard #endif
2247cb69caeSbellard 
225b346ff46Sbellard static void page_init(void)
22654936004Sbellard {
22783fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
22854936004Sbellard        TARGET_PAGE_SIZE */
229c2b48b69Saliguori #ifdef _WIN32
230c2b48b69Saliguori     {
231c2b48b69Saliguori         SYSTEM_INFO system_info;
232c2b48b69Saliguori 
233c2b48b69Saliguori         GetSystemInfo(&system_info);
234c2b48b69Saliguori         qemu_real_host_page_size = system_info.dwPageSize;
235c2b48b69Saliguori     }
236c2b48b69Saliguori #else
237c2b48b69Saliguori     qemu_real_host_page_size = getpagesize();
238c2b48b69Saliguori #endif
23983fb7adfSbellard     if (qemu_host_page_size == 0)
24083fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
24183fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
24283fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
24383fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
24450a9569bSbalrog 
2452e9a5713SPaul Brook #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
24650a9569bSbalrog     {
247f01576f1SJuergen Lock #ifdef HAVE_KINFO_GETVMMAP
248f01576f1SJuergen Lock         struct kinfo_vmentry *freep;
249f01576f1SJuergen Lock         int i, cnt;
250f01576f1SJuergen Lock 
251f01576f1SJuergen Lock         freep = kinfo_getvmmap(getpid(), &cnt);
252f01576f1SJuergen Lock         if (freep) {
253f01576f1SJuergen Lock             mmap_lock();
254f01576f1SJuergen Lock             for (i = 0; i < cnt; i++) {
255f01576f1SJuergen Lock                 unsigned long startaddr, endaddr;
256f01576f1SJuergen Lock 
257f01576f1SJuergen Lock                 startaddr = freep[i].kve_start;
258f01576f1SJuergen Lock                 endaddr = freep[i].kve_end;
259f01576f1SJuergen Lock                 if (h2g_valid(startaddr)) {
260f01576f1SJuergen Lock                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
261f01576f1SJuergen Lock 
262f01576f1SJuergen Lock                     if (h2g_valid(endaddr)) {
263f01576f1SJuergen Lock                         endaddr = h2g(endaddr);
264fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
265f01576f1SJuergen Lock                     } else {
266f01576f1SJuergen Lock #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
267f01576f1SJuergen Lock                         endaddr = ~0ul;
268fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
269f01576f1SJuergen Lock #endif
270f01576f1SJuergen Lock                     }
271f01576f1SJuergen Lock                 }
272f01576f1SJuergen Lock             }
273f01576f1SJuergen Lock             free(freep);
274f01576f1SJuergen Lock             mmap_unlock();
275f01576f1SJuergen Lock         }
276f01576f1SJuergen Lock #else
27750a9569bSbalrog         FILE *f;
27850a9569bSbalrog 
2790776590dSpbrook         last_brk = (unsigned long)sbrk(0);
2805cd2c5b6SRichard Henderson 
281fd436907SAurelien Jarno         f = fopen("/compat/linux/proc/self/maps", "r");
28250a9569bSbalrog         if (f) {
2835cd2c5b6SRichard Henderson             mmap_lock();
2845cd2c5b6SRichard Henderson 
28550a9569bSbalrog             do {
2865cd2c5b6SRichard Henderson                 unsigned long startaddr, endaddr;
2875cd2c5b6SRichard Henderson                 int n;
2885cd2c5b6SRichard Henderson 
2895cd2c5b6SRichard Henderson                 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
2905cd2c5b6SRichard Henderson 
2915cd2c5b6SRichard Henderson                 if (n == 2 && h2g_valid(startaddr)) {
2925cd2c5b6SRichard Henderson                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
2935cd2c5b6SRichard Henderson 
2945cd2c5b6SRichard Henderson                     if (h2g_valid(endaddr)) {
2955cd2c5b6SRichard Henderson                         endaddr = h2g(endaddr);
2965cd2c5b6SRichard Henderson                     } else {
2975cd2c5b6SRichard Henderson                         endaddr = ~0ul;
2985cd2c5b6SRichard Henderson                     }
2995cd2c5b6SRichard Henderson                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
30050a9569bSbalrog                 }
30150a9569bSbalrog             } while (!feof(f));
3025cd2c5b6SRichard Henderson 
30350a9569bSbalrog             fclose(f);
304c8a706feSpbrook             mmap_unlock();
30550a9569bSbalrog         }
306f01576f1SJuergen Lock #endif
3075cd2c5b6SRichard Henderson     }
30850a9569bSbalrog #endif
30954936004Sbellard }
31054936004Sbellard 
31141c1b1c9SPaul Brook static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
31254936004Sbellard {
31341c1b1c9SPaul Brook     PageDesc *pd;
31441c1b1c9SPaul Brook     void **lp;
31541c1b1c9SPaul Brook     int i;
31641c1b1c9SPaul Brook 
31717e2377aSpbrook #if defined(CONFIG_USER_ONLY)
3187267c094SAnthony Liguori     /* We can't use g_malloc because it may recurse into a locked mutex. */
3195cd2c5b6SRichard Henderson # define ALLOC(P, SIZE)                                 \
3205cd2c5b6SRichard Henderson     do {                                                \
3215cd2c5b6SRichard Henderson         P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
3225cd2c5b6SRichard Henderson                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
3235cd2c5b6SRichard Henderson     } while (0)
3245cd2c5b6SRichard Henderson #else
3255cd2c5b6SRichard Henderson # define ALLOC(P, SIZE) \
3267267c094SAnthony Liguori     do { P = g_malloc0(SIZE); } while (0)
3275cd2c5b6SRichard Henderson #endif
3285cd2c5b6SRichard Henderson 
3295cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
3305cd2c5b6SRichard Henderson     lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
3315cd2c5b6SRichard Henderson 
3325cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
3335cd2c5b6SRichard Henderson     for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
3345cd2c5b6SRichard Henderson         void **p = *lp;
3355cd2c5b6SRichard Henderson 
3365cd2c5b6SRichard Henderson         if (p == NULL) {
3375cd2c5b6SRichard Henderson             if (!alloc) {
3385cd2c5b6SRichard Henderson                 return NULL;
3395cd2c5b6SRichard Henderson             }
3405cd2c5b6SRichard Henderson             ALLOC(p, sizeof(void *) * L2_SIZE);
34154936004Sbellard             *lp = p;
3425cd2c5b6SRichard Henderson         }
3435cd2c5b6SRichard Henderson 
3445cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
3455cd2c5b6SRichard Henderson     }
3465cd2c5b6SRichard Henderson 
3475cd2c5b6SRichard Henderson     pd = *lp;
3485cd2c5b6SRichard Henderson     if (pd == NULL) {
3495cd2c5b6SRichard Henderson         if (!alloc) {
3505cd2c5b6SRichard Henderson             return NULL;
3515cd2c5b6SRichard Henderson         }
3525cd2c5b6SRichard Henderson         ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
3535cd2c5b6SRichard Henderson         *lp = pd;
3545cd2c5b6SRichard Henderson     }
3555cd2c5b6SRichard Henderson 
3565cd2c5b6SRichard Henderson #undef ALLOC
3575cd2c5b6SRichard Henderson 
3585cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
35954936004Sbellard }
36054936004Sbellard 
36141c1b1c9SPaul Brook static inline PageDesc *page_find(tb_page_addr_t index)
36254936004Sbellard {
3635cd2c5b6SRichard Henderson     return page_find_alloc(index, 0);
36454936004Sbellard }
36554936004Sbellard 
3666d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
367d6f2ea22SAvi Kivity 
368f7bf5461SAvi Kivity static void phys_map_node_reserve(unsigned nodes)
369f7bf5461SAvi Kivity {
370f7bf5461SAvi Kivity     if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
371f7bf5461SAvi Kivity         typedef PhysPageEntry Node[L2_SIZE];
372f7bf5461SAvi Kivity         phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
373f7bf5461SAvi Kivity         phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
374f7bf5461SAvi Kivity                                       phys_map_nodes_nb + nodes);
375f7bf5461SAvi Kivity         phys_map_nodes = g_renew(Node, phys_map_nodes,
376f7bf5461SAvi Kivity                                  phys_map_nodes_nb_alloc);
377f7bf5461SAvi Kivity     }
378f7bf5461SAvi Kivity }
379f7bf5461SAvi Kivity 
380f7bf5461SAvi Kivity static uint16_t phys_map_node_alloc(void)
381d6f2ea22SAvi Kivity {
382d6f2ea22SAvi Kivity     unsigned i;
383d6f2ea22SAvi Kivity     uint16_t ret;
384d6f2ea22SAvi Kivity 
385f7bf5461SAvi Kivity     ret = phys_map_nodes_nb++;
386d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
387f7bf5461SAvi Kivity     assert(ret != phys_map_nodes_nb_alloc);
388d6f2ea22SAvi Kivity     for (i = 0; i < L2_SIZE; ++i) {
38907f07b31SAvi Kivity         phys_map_nodes[ret][i].is_leaf = 0;
390c19e8800SAvi Kivity         phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
391d6f2ea22SAvi Kivity     }
392f7bf5461SAvi Kivity     return ret;
393d6f2ea22SAvi Kivity }
394d6f2ea22SAvi Kivity 
395d6f2ea22SAvi Kivity static void phys_map_nodes_reset(void)
396d6f2ea22SAvi Kivity {
397d6f2ea22SAvi Kivity     phys_map_nodes_nb = 0;
398d6f2ea22SAvi Kivity }
399d6f2ea22SAvi Kivity 
400f7bf5461SAvi Kivity 
4012999097bSAvi Kivity static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
4022999097bSAvi Kivity                                 target_phys_addr_t *nb, uint16_t leaf,
4032999097bSAvi Kivity                                 int level)
40492e873b9Sbellard {
405f7bf5461SAvi Kivity     PhysPageEntry *p;
406f7bf5461SAvi Kivity     int i;
40707f07b31SAvi Kivity     target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
4085cd2c5b6SRichard Henderson 
40907f07b31SAvi Kivity     if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
410c19e8800SAvi Kivity         lp->ptr = phys_map_node_alloc();
411c19e8800SAvi Kivity         p = phys_map_nodes[lp->ptr];
412f7bf5461SAvi Kivity         if (level == 0) {
413f7bf5461SAvi Kivity             for (i = 0; i < L2_SIZE; i++) {
41407f07b31SAvi Kivity                 p[i].is_leaf = 1;
415c19e8800SAvi Kivity                 p[i].ptr = phys_section_unassigned;
41667c4d23cSpbrook             }
41792e873b9Sbellard         }
418d6f2ea22SAvi Kivity     } else {
419c19e8800SAvi Kivity         p = phys_map_nodes[lp->ptr];
4204346ae3eSAvi Kivity     }
4212999097bSAvi Kivity     lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
422f7bf5461SAvi Kivity 
4232999097bSAvi Kivity     while (*nb && lp < &p[L2_SIZE]) {
42407f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
42507f07b31SAvi Kivity             lp->is_leaf = true;
426c19e8800SAvi Kivity             lp->ptr = leaf;
42707f07b31SAvi Kivity             *index += step;
42807f07b31SAvi Kivity             *nb -= step;
429f7bf5461SAvi Kivity         } else {
4302999097bSAvi Kivity             phys_page_set_level(lp, index, nb, leaf, level - 1);
4312999097bSAvi Kivity         }
4322999097bSAvi Kivity         ++lp;
433f7bf5461SAvi Kivity     }
4344346ae3eSAvi Kivity }
4355cd2c5b6SRichard Henderson 
436ac1970fbSAvi Kivity static void phys_page_set(AddressSpaceDispatch *d,
437ac1970fbSAvi Kivity                           target_phys_addr_t index, target_phys_addr_t nb,
4382999097bSAvi Kivity                           uint16_t leaf)
439f7bf5461SAvi Kivity {
4402999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
44107f07b31SAvi Kivity     phys_map_node_reserve(3 * P_L2_LEVELS);
442f7bf5461SAvi Kivity 
443ac1970fbSAvi Kivity     phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
44492e873b9Sbellard }
44592e873b9Sbellard 
446ac1970fbSAvi Kivity MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, target_phys_addr_t index)
44792e873b9Sbellard {
448ac1970fbSAvi Kivity     PhysPageEntry lp = d->phys_map;
44931ab2b4aSAvi Kivity     PhysPageEntry *p;
45031ab2b4aSAvi Kivity     int i;
45131ab2b4aSAvi Kivity     uint16_t s_index = phys_section_unassigned;
452f1f6e3b8SAvi Kivity 
45307f07b31SAvi Kivity     for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
454c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
45531ab2b4aSAvi Kivity             goto not_found;
456f1f6e3b8SAvi Kivity         }
457c19e8800SAvi Kivity         p = phys_map_nodes[lp.ptr];
45831ab2b4aSAvi Kivity         lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
45931ab2b4aSAvi Kivity     }
46031ab2b4aSAvi Kivity 
461c19e8800SAvi Kivity     s_index = lp.ptr;
46231ab2b4aSAvi Kivity not_found:
463f3705d53SAvi Kivity     return &phys_sections[s_index];
464f3705d53SAvi Kivity }
465f3705d53SAvi Kivity 
466e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
467e5548617SBlue Swirl {
468e5548617SBlue Swirl     return mr != &io_mem_ram && mr != &io_mem_rom
469e5548617SBlue Swirl         && mr != &io_mem_notdirty && !mr->rom_device
470e5548617SBlue Swirl         && mr != &io_mem_watch;
471e5548617SBlue Swirl }
472e5548617SBlue Swirl 
473c8a706feSpbrook #define mmap_lock() do { } while(0)
474c8a706feSpbrook #define mmap_unlock() do { } while(0)
4759fa3e853Sbellard #endif
476fd6ce8f6Sbellard 
4774369415fSbellard #if defined(CONFIG_USER_ONLY)
478ccbb4d44SStuart Brady /* Currently it is not recommended to allocate big chunks of data in
479f1bc0bccSRichard Henderson    user mode. It will change when a dedicated libc will be used.  */
480f1bc0bccSRichard Henderson /* ??? 64-bit hosts ought to have no problem mmaping data outside the
481f1bc0bccSRichard Henderson    region in which the guest needs to run.  Revisit this.  */
4824369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
4834369415fSbellard #endif
4844369415fSbellard 
485f1bc0bccSRichard Henderson /* ??? Should configure for this, not list operating systems here.  */
486f1bc0bccSRichard Henderson #if (defined(__linux__) \
487f1bc0bccSRichard Henderson     || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
488f1bc0bccSRichard Henderson     || defined(__DragonFly__) || defined(__OpenBSD__) \
489f1bc0bccSRichard Henderson     || defined(__NetBSD__))
490f1bc0bccSRichard Henderson # define USE_MMAP
491f1bc0bccSRichard Henderson #endif
492f1bc0bccSRichard Henderson 
49374d590c8SRichard Henderson /* Minimum size of the code gen buffer.  This number is randomly chosen,
49474d590c8SRichard Henderson    but not so small that we can't have a fair number of TB's live.  */
49574d590c8SRichard Henderson #define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
49674d590c8SRichard Henderson 
497f1bc0bccSRichard Henderson /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
498f1bc0bccSRichard Henderson    indicated, this is constrained by the range of direct branches on the
499f1bc0bccSRichard Henderson    host cpu, as used by the TCG implementation of goto_tb.  */
500f1bc0bccSRichard Henderson #if defined(__x86_64__)
501f1bc0bccSRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
502f1bc0bccSRichard Henderson #elif defined(__sparc__)
503f1bc0bccSRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
504f1bc0bccSRichard Henderson #elif defined(__arm__)
505f1bc0bccSRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE  (16u * 1024 * 1024)
506f1bc0bccSRichard Henderson #elif defined(__s390x__)
507f1bc0bccSRichard Henderson   /* We have a +- 4GB range on the branches; leave some slop.  */
508f1bc0bccSRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
509f1bc0bccSRichard Henderson #else
510f1bc0bccSRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
511f1bc0bccSRichard Henderson #endif
512f1bc0bccSRichard Henderson 
5133d85a72fSRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
5143d85a72fSRichard Henderson 
5153d85a72fSRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE \
5163d85a72fSRichard Henderson   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
5173d85a72fSRichard Henderson    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
518f1bc0bccSRichard Henderson 
519f1bc0bccSRichard Henderson static inline size_t size_code_gen_buffer(size_t tb_size)
520f1bc0bccSRichard Henderson {
521f1bc0bccSRichard Henderson     /* Size the buffer.  */
522f1bc0bccSRichard Henderson     if (tb_size == 0) {
523f1bc0bccSRichard Henderson #ifdef USE_STATIC_CODE_GEN_BUFFER
524f1bc0bccSRichard Henderson         tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
525f1bc0bccSRichard Henderson #else
526f1bc0bccSRichard Henderson         /* ??? Needs adjustments.  */
527f1bc0bccSRichard Henderson         /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
528f1bc0bccSRichard Henderson            static buffer, we could size this on RESERVED_VA, on the text
529f1bc0bccSRichard Henderson            segment size of the executable, or continue to use the default.  */
530f1bc0bccSRichard Henderson         tb_size = (unsigned long)(ram_size / 4);
531f1bc0bccSRichard Henderson #endif
532f1bc0bccSRichard Henderson     }
533f1bc0bccSRichard Henderson     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
534f1bc0bccSRichard Henderson         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
535f1bc0bccSRichard Henderson     }
536f1bc0bccSRichard Henderson     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
537f1bc0bccSRichard Henderson         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
538f1bc0bccSRichard Henderson     }
539f1bc0bccSRichard Henderson     code_gen_buffer_size = tb_size;
540f1bc0bccSRichard Henderson     return tb_size;
541f1bc0bccSRichard Henderson }
542f1bc0bccSRichard Henderson 
5434369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
544ebf50fb3SAurelien Jarno static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
545ebf50fb3SAurelien Jarno     __attribute__((aligned(CODE_GEN_ALIGN)));
5464369415fSbellard 
547f1bc0bccSRichard Henderson static inline void *alloc_code_gen_buffer(void)
54826a5f13bSbellard {
549f1bc0bccSRichard Henderson     map_exec(static_code_gen_buffer, code_gen_buffer_size);
550f1bc0bccSRichard Henderson     return static_code_gen_buffer;
55126a5f13bSbellard }
552f1bc0bccSRichard Henderson #elif defined(USE_MMAP)
553f1bc0bccSRichard Henderson static inline void *alloc_code_gen_buffer(void)
55426a5f13bSbellard {
555f1bc0bccSRichard Henderson     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
556f1bc0bccSRichard Henderson     uintptr_t start = 0;
557f1bc0bccSRichard Henderson     void *buf;
558141ac468Sblueswir1 
559f1bc0bccSRichard Henderson     /* Constrain the position of the buffer based on the host cpu.
560f1bc0bccSRichard Henderson        Note that these addresses are chosen in concert with the
561f1bc0bccSRichard Henderson        addresses assigned in the relevant linker script file.  */
562405def18SRichard Henderson # if defined(__PIE__) || defined(__PIC__)
563405def18SRichard Henderson     /* Don't bother setting a preferred location if we're building
564405def18SRichard Henderson        a position-independent executable.  We're more likely to get
565405def18SRichard Henderson        an address near the main executable if we let the kernel
566405def18SRichard Henderson        choose the address.  */
567405def18SRichard Henderson # elif defined(__x86_64__) && defined(MAP_32BIT)
568f1bc0bccSRichard Henderson     /* Force the memory down into low memory with the executable.
569f1bc0bccSRichard Henderson        Leave the choice of exact location with the kernel.  */
57026a5f13bSbellard     flags |= MAP_32BIT;
571f1bc0bccSRichard Henderson     /* Cannot expect to map more than 800MB in low memory.  */
572f1bc0bccSRichard Henderson     if (code_gen_buffer_size > 800u * 1024 * 1024) {
573f1bc0bccSRichard Henderson         code_gen_buffer_size = 800u * 1024 * 1024;
574f1bc0bccSRichard Henderson     }
575f1bc0bccSRichard Henderson # elif defined(__sparc__)
576f1bc0bccSRichard Henderson     start = 0x40000000ul;
577eba0b893SRichard Henderson # elif defined(__s390x__)
578f1bc0bccSRichard Henderson     start = 0x90000000ul;
57926a5f13bSbellard # endif
580f1bc0bccSRichard Henderson 
581f1bc0bccSRichard Henderson     buf = mmap((void *)start, code_gen_buffer_size,
582f1bc0bccSRichard Henderson                PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
583f1bc0bccSRichard Henderson     return buf == MAP_FAILED ? NULL : buf;
58406e67a82Saliguori }
58526a5f13bSbellard #else
586f1bc0bccSRichard Henderson static inline void *alloc_code_gen_buffer(void)
587f1bc0bccSRichard Henderson {
588f1bc0bccSRichard Henderson     void *buf = g_malloc(code_gen_buffer_size);
589f1bc0bccSRichard Henderson     if (buf) {
590f1bc0bccSRichard Henderson         map_exec(buf, code_gen_buffer_size);
591f1bc0bccSRichard Henderson     }
592f1bc0bccSRichard Henderson     return buf;
593f1bc0bccSRichard Henderson }
594f1bc0bccSRichard Henderson #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
595f1bc0bccSRichard Henderson 
596f1bc0bccSRichard Henderson static inline void code_gen_alloc(size_t tb_size)
597f1bc0bccSRichard Henderson {
598f1bc0bccSRichard Henderson     code_gen_buffer_size = size_code_gen_buffer(tb_size);
599f1bc0bccSRichard Henderson     code_gen_buffer = alloc_code_gen_buffer();
600f1bc0bccSRichard Henderson     if (code_gen_buffer == NULL) {
601f1bc0bccSRichard Henderson         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
602f1bc0bccSRichard Henderson         exit(1);
603f1bc0bccSRichard Henderson     }
604f1bc0bccSRichard Henderson 
6054438c8a9SRichard Henderson     /* Steal room for the prologue at the end of the buffer.  This ensures
6064438c8a9SRichard Henderson        (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
6074438c8a9SRichard Henderson        from TB's to the prologue are going to be in range.  It also means
6084438c8a9SRichard Henderson        that we don't need to mark (additional) portions of the data segment
6094438c8a9SRichard Henderson        as executable.  */
6104438c8a9SRichard Henderson     code_gen_prologue = code_gen_buffer + code_gen_buffer_size - 1024;
6114438c8a9SRichard Henderson     code_gen_buffer_size -= 1024;
6124438c8a9SRichard Henderson 
61326a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
614a884da8aSPeter Maydell         (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
61526a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
6167267c094SAnthony Liguori     tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
61726a5f13bSbellard }
61826a5f13bSbellard 
61926a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
62026a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
62126a5f13bSbellard    size. */
622d5ab9713SJan Kiszka void tcg_exec_init(unsigned long tb_size)
62326a5f13bSbellard {
62426a5f13bSbellard     cpu_gen_init();
62526a5f13bSbellard     code_gen_alloc(tb_size);
62626a5f13bSbellard     code_gen_ptr = code_gen_buffer;
627813da627SRichard Henderson     tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
6284369415fSbellard     page_init();
6299002ec79SRichard Henderson #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
6309002ec79SRichard Henderson     /* There's no guest base to take into account, so go ahead and
6319002ec79SRichard Henderson        initialize the prologue now.  */
6329002ec79SRichard Henderson     tcg_prologue_init(&tcg_ctx);
6339002ec79SRichard Henderson #endif
63426a5f13bSbellard }
63526a5f13bSbellard 
636d5ab9713SJan Kiszka bool tcg_enabled(void)
637d5ab9713SJan Kiszka {
638d5ab9713SJan Kiszka     return code_gen_buffer != NULL;
639d5ab9713SJan Kiszka }
640d5ab9713SJan Kiszka 
641d5ab9713SJan Kiszka void cpu_exec_init_all(void)
642d5ab9713SJan Kiszka {
643d5ab9713SJan Kiszka #if !defined(CONFIG_USER_ONLY)
644d5ab9713SJan Kiszka     memory_map_init();
645d5ab9713SJan Kiszka     io_mem_init();
646d5ab9713SJan Kiszka #endif
647d5ab9713SJan Kiszka }
648d5ab9713SJan Kiszka 
6499656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
6509656f324Spbrook 
651e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
652e7f4eff7SJuan Quintela {
6539349b4f9SAndreas Färber     CPUArchState *env = opaque;
654e7f4eff7SJuan Quintela 
6553098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
6563098dba0Saurel32        version_id is increased. */
6573098dba0Saurel32     env->interrupt_request &= ~0x01;
6589656f324Spbrook     tlb_flush(env, 1);
6599656f324Spbrook 
6609656f324Spbrook     return 0;
6619656f324Spbrook }
662e7f4eff7SJuan Quintela 
663e7f4eff7SJuan Quintela static const VMStateDescription vmstate_cpu_common = {
664e7f4eff7SJuan Quintela     .name = "cpu_common",
665e7f4eff7SJuan Quintela     .version_id = 1,
666e7f4eff7SJuan Quintela     .minimum_version_id = 1,
667e7f4eff7SJuan Quintela     .minimum_version_id_old = 1,
668e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
669e7f4eff7SJuan Quintela     .fields      = (VMStateField []) {
6709349b4f9SAndreas Färber         VMSTATE_UINT32(halted, CPUArchState),
6719349b4f9SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUArchState),
672e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
673e7f4eff7SJuan Quintela     }
674e7f4eff7SJuan Quintela };
6759656f324Spbrook #endif
6769656f324Spbrook 
6779349b4f9SAndreas Färber CPUArchState *qemu_get_cpu(int cpu)
678950f1472SGlauber Costa {
6799349b4f9SAndreas Färber     CPUArchState *env = first_cpu;
680950f1472SGlauber Costa 
681950f1472SGlauber Costa     while (env) {
682950f1472SGlauber Costa         if (env->cpu_index == cpu)
683950f1472SGlauber Costa             break;
684950f1472SGlauber Costa         env = env->next_cpu;
685950f1472SGlauber Costa     }
686950f1472SGlauber Costa 
687950f1472SGlauber Costa     return env;
688950f1472SGlauber Costa }
689950f1472SGlauber Costa 
6909349b4f9SAndreas Färber void cpu_exec_init(CPUArchState *env)
691fd6ce8f6Sbellard {
6929349b4f9SAndreas Färber     CPUArchState **penv;
6936a00d601Sbellard     int cpu_index;
6946a00d601Sbellard 
695c2764719Spbrook #if defined(CONFIG_USER_ONLY)
696c2764719Spbrook     cpu_list_lock();
697c2764719Spbrook #endif
6986a00d601Sbellard     env->next_cpu = NULL;
6996a00d601Sbellard     penv = &first_cpu;
7006a00d601Sbellard     cpu_index = 0;
7016a00d601Sbellard     while (*penv != NULL) {
7021e9fa730SNathan Froyd         penv = &(*penv)->next_cpu;
7036a00d601Sbellard         cpu_index++;
7046a00d601Sbellard     }
7056a00d601Sbellard     env->cpu_index = cpu_index;
706268a362cSaliguori     env->numa_node = 0;
70772cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
70872cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
709dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY
710dc7a09cfSJan Kiszka     env->thread_id = qemu_get_thread_id();
711dc7a09cfSJan Kiszka #endif
7126a00d601Sbellard     *penv = env;
713c2764719Spbrook #if defined(CONFIG_USER_ONLY)
714c2764719Spbrook     cpu_list_unlock();
715c2764719Spbrook #endif
716b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
7170be71e32SAlex Williamson     vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
7180be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
719b3c7724cSpbrook                     cpu_save, cpu_load, env);
720b3c7724cSpbrook #endif
721fd6ce8f6Sbellard }
722fd6ce8f6Sbellard 
723d1a1eb74STristan Gingold /* Allocate a new translation block. Flush the translation buffer if
724d1a1eb74STristan Gingold    too many translation blocks or too much generated code. */
725d1a1eb74STristan Gingold static TranslationBlock *tb_alloc(target_ulong pc)
726d1a1eb74STristan Gingold {
727d1a1eb74STristan Gingold     TranslationBlock *tb;
728d1a1eb74STristan Gingold 
729d1a1eb74STristan Gingold     if (nb_tbs >= code_gen_max_blocks ||
730d1a1eb74STristan Gingold         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
731d1a1eb74STristan Gingold         return NULL;
732d1a1eb74STristan Gingold     tb = &tbs[nb_tbs++];
733d1a1eb74STristan Gingold     tb->pc = pc;
734d1a1eb74STristan Gingold     tb->cflags = 0;
735d1a1eb74STristan Gingold     return tb;
736d1a1eb74STristan Gingold }
737d1a1eb74STristan Gingold 
738d1a1eb74STristan Gingold void tb_free(TranslationBlock *tb)
739d1a1eb74STristan Gingold {
740d1a1eb74STristan Gingold     /* In practice this is mostly used for single use temporary TB
741d1a1eb74STristan Gingold        Ignore the hard cases and just back up if this TB happens to
742d1a1eb74STristan Gingold        be the last one generated.  */
743d1a1eb74STristan Gingold     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
744d1a1eb74STristan Gingold         code_gen_ptr = tb->tc_ptr;
745d1a1eb74STristan Gingold         nb_tbs--;
746d1a1eb74STristan Gingold     }
747d1a1eb74STristan Gingold }
748d1a1eb74STristan Gingold 
7499fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
7509fa3e853Sbellard {
7519fa3e853Sbellard     if (p->code_bitmap) {
7527267c094SAnthony Liguori         g_free(p->code_bitmap);
7539fa3e853Sbellard         p->code_bitmap = NULL;
7549fa3e853Sbellard     }
7559fa3e853Sbellard     p->code_write_count = 0;
7569fa3e853Sbellard }
7579fa3e853Sbellard 
7585cd2c5b6SRichard Henderson /* Set to NULL all the 'first_tb' fields in all PageDescs. */
7595cd2c5b6SRichard Henderson 
7605cd2c5b6SRichard Henderson static void page_flush_tb_1 (int level, void **lp)
7615cd2c5b6SRichard Henderson {
7625cd2c5b6SRichard Henderson     int i;
7635cd2c5b6SRichard Henderson 
7645cd2c5b6SRichard Henderson     if (*lp == NULL) {
7655cd2c5b6SRichard Henderson         return;
7665cd2c5b6SRichard Henderson     }
7675cd2c5b6SRichard Henderson     if (level == 0) {
7685cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
7697296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
7705cd2c5b6SRichard Henderson             pd[i].first_tb = NULL;
7715cd2c5b6SRichard Henderson             invalidate_page_bitmap(pd + i);
7725cd2c5b6SRichard Henderson         }
7735cd2c5b6SRichard Henderson     } else {
7745cd2c5b6SRichard Henderson         void **pp = *lp;
7757296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
7765cd2c5b6SRichard Henderson             page_flush_tb_1 (level - 1, pp + i);
7775cd2c5b6SRichard Henderson         }
7785cd2c5b6SRichard Henderson     }
7795cd2c5b6SRichard Henderson }
7805cd2c5b6SRichard Henderson 
781fd6ce8f6Sbellard static void page_flush_tb(void)
782fd6ce8f6Sbellard {
7835cd2c5b6SRichard Henderson     int i;
7845cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
7855cd2c5b6SRichard Henderson         page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
786fd6ce8f6Sbellard     }
787fd6ce8f6Sbellard }
788fd6ce8f6Sbellard 
789fd6ce8f6Sbellard /* flush all the translation blocks */
790d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
7919349b4f9SAndreas Färber void tb_flush(CPUArchState *env1)
792fd6ce8f6Sbellard {
7939349b4f9SAndreas Färber     CPUArchState *env;
7940124311eSbellard #if defined(DEBUG_FLUSH)
795ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
796ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
797ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
798ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
799fd6ce8f6Sbellard #endif
80026a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
801a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
802a208e54aSpbrook 
803fd6ce8f6Sbellard     nb_tbs = 0;
8046a00d601Sbellard 
8056a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
8068a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
8076a00d601Sbellard     }
8089fa3e853Sbellard 
8098a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
810fd6ce8f6Sbellard     page_flush_tb();
8119fa3e853Sbellard 
812fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
813d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
814d4e8164fSbellard        expensive */
815e3db7226Sbellard     tb_flush_count++;
816fd6ce8f6Sbellard }
817fd6ce8f6Sbellard 
818fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
819fd6ce8f6Sbellard 
820bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
821fd6ce8f6Sbellard {
822fd6ce8f6Sbellard     TranslationBlock *tb;
823fd6ce8f6Sbellard     int i;
824fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
82599773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
82699773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
827fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
828fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
8290bf9e31aSBlue Swirl                 printf("ERROR invalidate: address=" TARGET_FMT_lx
8300bf9e31aSBlue Swirl                        " PC=%08lx size=%04x\n",
83199773bd4Spbrook                        address, (long)tb->pc, tb->size);
832fd6ce8f6Sbellard             }
833fd6ce8f6Sbellard         }
834fd6ce8f6Sbellard     }
835fd6ce8f6Sbellard }
836fd6ce8f6Sbellard 
837fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
838fd6ce8f6Sbellard static void tb_page_check(void)
839fd6ce8f6Sbellard {
840fd6ce8f6Sbellard     TranslationBlock *tb;
841fd6ce8f6Sbellard     int i, flags1, flags2;
842fd6ce8f6Sbellard 
84399773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
84499773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
845fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
846fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
847fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
848fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
84999773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
850fd6ce8f6Sbellard             }
851fd6ce8f6Sbellard         }
852fd6ce8f6Sbellard     }
853fd6ce8f6Sbellard }
854fd6ce8f6Sbellard 
855fd6ce8f6Sbellard #endif
856fd6ce8f6Sbellard 
857fd6ce8f6Sbellard /* invalidate one TB */
858fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
859fd6ce8f6Sbellard                              int next_offset)
860fd6ce8f6Sbellard {
861fd6ce8f6Sbellard     TranslationBlock *tb1;
862fd6ce8f6Sbellard     for(;;) {
863fd6ce8f6Sbellard         tb1 = *ptb;
864fd6ce8f6Sbellard         if (tb1 == tb) {
865fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
866fd6ce8f6Sbellard             break;
867fd6ce8f6Sbellard         }
868fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
869fd6ce8f6Sbellard     }
870fd6ce8f6Sbellard }
871fd6ce8f6Sbellard 
8729fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
8739fa3e853Sbellard {
8749fa3e853Sbellard     TranslationBlock *tb1;
8759fa3e853Sbellard     unsigned int n1;
8769fa3e853Sbellard 
8779fa3e853Sbellard     for(;;) {
8789fa3e853Sbellard         tb1 = *ptb;
8798efe0ca8SStefan Weil         n1 = (uintptr_t)tb1 & 3;
8808efe0ca8SStefan Weil         tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
8819fa3e853Sbellard         if (tb1 == tb) {
8829fa3e853Sbellard             *ptb = tb1->page_next[n1];
8839fa3e853Sbellard             break;
8849fa3e853Sbellard         }
8859fa3e853Sbellard         ptb = &tb1->page_next[n1];
8869fa3e853Sbellard     }
8879fa3e853Sbellard }
8889fa3e853Sbellard 
889d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
890d4e8164fSbellard {
891d4e8164fSbellard     TranslationBlock *tb1, **ptb;
892d4e8164fSbellard     unsigned int n1;
893d4e8164fSbellard 
894d4e8164fSbellard     ptb = &tb->jmp_next[n];
895d4e8164fSbellard     tb1 = *ptb;
896d4e8164fSbellard     if (tb1) {
897d4e8164fSbellard         /* find tb(n) in circular list */
898d4e8164fSbellard         for(;;) {
899d4e8164fSbellard             tb1 = *ptb;
9008efe0ca8SStefan Weil             n1 = (uintptr_t)tb1 & 3;
9018efe0ca8SStefan Weil             tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
902d4e8164fSbellard             if (n1 == n && tb1 == tb)
903d4e8164fSbellard                 break;
904d4e8164fSbellard             if (n1 == 2) {
905d4e8164fSbellard                 ptb = &tb1->jmp_first;
906d4e8164fSbellard             } else {
907d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
908d4e8164fSbellard             }
909d4e8164fSbellard         }
910d4e8164fSbellard         /* now we can suppress tb(n) from the list */
911d4e8164fSbellard         *ptb = tb->jmp_next[n];
912d4e8164fSbellard 
913d4e8164fSbellard         tb->jmp_next[n] = NULL;
914d4e8164fSbellard     }
915d4e8164fSbellard }
916d4e8164fSbellard 
917d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
918d4e8164fSbellard    another TB */
919d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
920d4e8164fSbellard {
9218efe0ca8SStefan Weil     tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
922d4e8164fSbellard }
923d4e8164fSbellard 
92441c1b1c9SPaul Brook void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
925fd6ce8f6Sbellard {
9269349b4f9SAndreas Färber     CPUArchState *env;
927fd6ce8f6Sbellard     PageDesc *p;
9288a40a180Sbellard     unsigned int h, n1;
92941c1b1c9SPaul Brook     tb_page_addr_t phys_pc;
9308a40a180Sbellard     TranslationBlock *tb1, *tb2;
931fd6ce8f6Sbellard 
9329fa3e853Sbellard     /* remove the TB from the hash list */
9339fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
9349fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
9359fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
9369fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
9379fa3e853Sbellard 
9389fa3e853Sbellard     /* remove the TB from the page list */
9399fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
9409fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
9419fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
9429fa3e853Sbellard         invalidate_page_bitmap(p);
9439fa3e853Sbellard     }
9449fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
9459fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
9469fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
9479fa3e853Sbellard         invalidate_page_bitmap(p);
9489fa3e853Sbellard     }
9499fa3e853Sbellard 
9508a40a180Sbellard     tb_invalidated_flag = 1;
9518a40a180Sbellard 
9528a40a180Sbellard     /* remove the TB from the hash list */
9538a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
9546a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
9556a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
9566a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
9576a00d601Sbellard     }
9588a40a180Sbellard 
9598a40a180Sbellard     /* suppress this TB from the two jump lists */
9608a40a180Sbellard     tb_jmp_remove(tb, 0);
9618a40a180Sbellard     tb_jmp_remove(tb, 1);
9628a40a180Sbellard 
9638a40a180Sbellard     /* suppress any remaining jumps to this TB */
9648a40a180Sbellard     tb1 = tb->jmp_first;
9658a40a180Sbellard     for(;;) {
9668efe0ca8SStefan Weil         n1 = (uintptr_t)tb1 & 3;
9678a40a180Sbellard         if (n1 == 2)
9688a40a180Sbellard             break;
9698efe0ca8SStefan Weil         tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
9708a40a180Sbellard         tb2 = tb1->jmp_next[n1];
9718a40a180Sbellard         tb_reset_jump(tb1, n1);
9728a40a180Sbellard         tb1->jmp_next[n1] = NULL;
9738a40a180Sbellard         tb1 = tb2;
9748a40a180Sbellard     }
9758efe0ca8SStefan Weil     tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
9768a40a180Sbellard 
977e3db7226Sbellard     tb_phys_invalidate_count++;
9789fa3e853Sbellard }
9799fa3e853Sbellard 
9809fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
9819fa3e853Sbellard {
9829fa3e853Sbellard     int end, mask, end1;
9839fa3e853Sbellard 
9849fa3e853Sbellard     end = start + len;
9859fa3e853Sbellard     tab += start >> 3;
9869fa3e853Sbellard     mask = 0xff << (start & 7);
9879fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
9889fa3e853Sbellard         if (start < end) {
9899fa3e853Sbellard             mask &= ~(0xff << (end & 7));
9909fa3e853Sbellard             *tab |= mask;
9919fa3e853Sbellard         }
9929fa3e853Sbellard     } else {
9939fa3e853Sbellard         *tab++ |= mask;
9949fa3e853Sbellard         start = (start + 8) & ~7;
9959fa3e853Sbellard         end1 = end & ~7;
9969fa3e853Sbellard         while (start < end1) {
9979fa3e853Sbellard             *tab++ = 0xff;
9989fa3e853Sbellard             start += 8;
9999fa3e853Sbellard         }
10009fa3e853Sbellard         if (start < end) {
10019fa3e853Sbellard             mask = ~(0xff << (end & 7));
10029fa3e853Sbellard             *tab |= mask;
10039fa3e853Sbellard         }
10049fa3e853Sbellard     }
10059fa3e853Sbellard }
10069fa3e853Sbellard 
10079fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
10089fa3e853Sbellard {
10099fa3e853Sbellard     int n, tb_start, tb_end;
10109fa3e853Sbellard     TranslationBlock *tb;
10119fa3e853Sbellard 
10127267c094SAnthony Liguori     p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
10139fa3e853Sbellard 
10149fa3e853Sbellard     tb = p->first_tb;
10159fa3e853Sbellard     while (tb != NULL) {
10168efe0ca8SStefan Weil         n = (uintptr_t)tb & 3;
10178efe0ca8SStefan Weil         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
10189fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
10199fa3e853Sbellard         if (n == 0) {
10209fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
10219fa3e853Sbellard                it is not a problem */
10229fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
10239fa3e853Sbellard             tb_end = tb_start + tb->size;
10249fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
10259fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
10269fa3e853Sbellard         } else {
10279fa3e853Sbellard             tb_start = 0;
10289fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
10299fa3e853Sbellard         }
10309fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
10319fa3e853Sbellard         tb = tb->page_next[n];
10329fa3e853Sbellard     }
10339fa3e853Sbellard }
10349fa3e853Sbellard 
10359349b4f9SAndreas Färber TranslationBlock *tb_gen_code(CPUArchState *env,
10362e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
10372e70f6efSpbrook                               int flags, int cflags)
1038d720b93dSbellard {
1039d720b93dSbellard     TranslationBlock *tb;
1040d720b93dSbellard     uint8_t *tc_ptr;
104141c1b1c9SPaul Brook     tb_page_addr_t phys_pc, phys_page2;
104241c1b1c9SPaul Brook     target_ulong virt_page2;
1043d720b93dSbellard     int code_gen_size;
1044d720b93dSbellard 
104541c1b1c9SPaul Brook     phys_pc = get_page_addr_code(env, pc);
1046c27004ecSbellard     tb = tb_alloc(pc);
1047d720b93dSbellard     if (!tb) {
1048d720b93dSbellard         /* flush must be done */
1049d720b93dSbellard         tb_flush(env);
1050d720b93dSbellard         /* cannot fail at this point */
1051c27004ecSbellard         tb = tb_alloc(pc);
10522e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
10532e70f6efSpbrook         tb_invalidated_flag = 1;
1054d720b93dSbellard     }
1055d720b93dSbellard     tc_ptr = code_gen_ptr;
1056d720b93dSbellard     tb->tc_ptr = tc_ptr;
1057d720b93dSbellard     tb->cs_base = cs_base;
1058d720b93dSbellard     tb->flags = flags;
1059d720b93dSbellard     tb->cflags = cflags;
1060d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
10618efe0ca8SStefan Weil     code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
10628efe0ca8SStefan Weil                              CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1063d720b93dSbellard 
1064d720b93dSbellard     /* check next page if needed */
1065c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1066d720b93dSbellard     phys_page2 = -1;
1067c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
106841c1b1c9SPaul Brook         phys_page2 = get_page_addr_code(env, virt_page2);
1069d720b93dSbellard     }
107041c1b1c9SPaul Brook     tb_link_page(tb, phys_pc, phys_page2);
10712e70f6efSpbrook     return tb;
1072d720b93dSbellard }
1073d720b93dSbellard 
107477a8f1a5SAlexander Graf /*
10758e0fdce3SJan Kiszka  * Invalidate all TBs which intersect with the target physical address range
10768e0fdce3SJan Kiszka  * [start;end[. NOTE: start and end may refer to *different* physical pages.
10778e0fdce3SJan Kiszka  * 'is_cpu_write_access' should be true if called from a real cpu write
10788e0fdce3SJan Kiszka  * access: the virtual CPU will exit the current TB if code is modified inside
10798e0fdce3SJan Kiszka  * this TB.
108077a8f1a5SAlexander Graf  */
108177a8f1a5SAlexander Graf void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
108277a8f1a5SAlexander Graf                               int is_cpu_write_access)
108377a8f1a5SAlexander Graf {
108477a8f1a5SAlexander Graf     while (start < end) {
108577a8f1a5SAlexander Graf         tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
108677a8f1a5SAlexander Graf         start &= TARGET_PAGE_MASK;
108777a8f1a5SAlexander Graf         start += TARGET_PAGE_SIZE;
108877a8f1a5SAlexander Graf     }
108977a8f1a5SAlexander Graf }
109077a8f1a5SAlexander Graf 
10918e0fdce3SJan Kiszka /*
10928e0fdce3SJan Kiszka  * Invalidate all TBs which intersect with the target physical address range
10938e0fdce3SJan Kiszka  * [start;end[. NOTE: start and end must refer to the *same* physical page.
10948e0fdce3SJan Kiszka  * 'is_cpu_write_access' should be true if called from a real cpu write
10958e0fdce3SJan Kiszka  * access: the virtual CPU will exit the current TB if code is modified inside
10968e0fdce3SJan Kiszka  * this TB.
10978e0fdce3SJan Kiszka  */
109841c1b1c9SPaul Brook void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1099d720b93dSbellard                                    int is_cpu_write_access)
11009fa3e853Sbellard {
11016b917547Saliguori     TranslationBlock *tb, *tb_next, *saved_tb;
11029349b4f9SAndreas Färber     CPUArchState *env = cpu_single_env;
110341c1b1c9SPaul Brook     tb_page_addr_t tb_start, tb_end;
11046b917547Saliguori     PageDesc *p;
11056b917547Saliguori     int n;
11066b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC
11076b917547Saliguori     int current_tb_not_found = is_cpu_write_access;
11086b917547Saliguori     TranslationBlock *current_tb = NULL;
11096b917547Saliguori     int current_tb_modified = 0;
11106b917547Saliguori     target_ulong current_pc = 0;
11116b917547Saliguori     target_ulong current_cs_base = 0;
11126b917547Saliguori     int current_flags = 0;
11136b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */
11149fa3e853Sbellard 
11159fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
11169fa3e853Sbellard     if (!p)
11179fa3e853Sbellard         return;
11189fa3e853Sbellard     if (!p->code_bitmap &&
1119d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1120d720b93dSbellard         is_cpu_write_access) {
11219fa3e853Sbellard         /* build code bitmap */
11229fa3e853Sbellard         build_page_bitmap(p);
11239fa3e853Sbellard     }
11249fa3e853Sbellard 
11259fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
11269fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
11279fa3e853Sbellard     tb = p->first_tb;
11289fa3e853Sbellard     while (tb != NULL) {
11298efe0ca8SStefan Weil         n = (uintptr_t)tb & 3;
11308efe0ca8SStefan Weil         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
11319fa3e853Sbellard         tb_next = tb->page_next[n];
11329fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
11339fa3e853Sbellard         if (n == 0) {
11349fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
11359fa3e853Sbellard                it is not a problem */
11369fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
11379fa3e853Sbellard             tb_end = tb_start + tb->size;
11389fa3e853Sbellard         } else {
11399fa3e853Sbellard             tb_start = tb->page_addr[1];
11409fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
11419fa3e853Sbellard         }
11429fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
1143d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1144d720b93dSbellard             if (current_tb_not_found) {
1145d720b93dSbellard                 current_tb_not_found = 0;
1146d720b93dSbellard                 current_tb = NULL;
11472e70f6efSpbrook                 if (env->mem_io_pc) {
1148d720b93dSbellard                     /* now we have a real cpu fault */
11492e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
1150d720b93dSbellard                 }
1151d720b93dSbellard             }
1152d720b93dSbellard             if (current_tb == tb &&
11532e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1154d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1155d720b93dSbellard                 its execution. We could be more precise by checking
1156d720b93dSbellard                 that the modification is after the current PC, but it
1157d720b93dSbellard                 would require a specialized function to partially
1158d720b93dSbellard                 restore the CPU state */
1159d720b93dSbellard 
1160d720b93dSbellard                 current_tb_modified = 1;
1161618ba8e6SStefan Weil                 cpu_restore_state(current_tb, env, env->mem_io_pc);
11626b917547Saliguori                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
11636b917547Saliguori                                      &current_flags);
1164d720b93dSbellard             }
1165d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
11666f5a9f7eSbellard             /* we need to do that to handle the case where a signal
11676f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
11686f5a9f7eSbellard             saved_tb = NULL;
11696f5a9f7eSbellard             if (env) {
1170ea1c1802Sbellard                 saved_tb = env->current_tb;
1171ea1c1802Sbellard                 env->current_tb = NULL;
11726f5a9f7eSbellard             }
11739fa3e853Sbellard             tb_phys_invalidate(tb, -1);
11746f5a9f7eSbellard             if (env) {
1175ea1c1802Sbellard                 env->current_tb = saved_tb;
1176ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
1177ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
11789fa3e853Sbellard             }
11796f5a9f7eSbellard         }
11809fa3e853Sbellard         tb = tb_next;
11819fa3e853Sbellard     }
11829fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
11839fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
11849fa3e853Sbellard     if (!p->first_tb) {
11859fa3e853Sbellard         invalidate_page_bitmap(p);
1186d720b93dSbellard         if (is_cpu_write_access) {
11872e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1188d720b93dSbellard         }
1189d720b93dSbellard     }
1190d720b93dSbellard #endif
1191d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1192d720b93dSbellard     if (current_tb_modified) {
1193d720b93dSbellard         /* we generate a block containing just the instruction
1194d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1195d720b93dSbellard            itself */
1196ea1c1802Sbellard         env->current_tb = NULL;
11972e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1198d720b93dSbellard         cpu_resume_from_signal(env, NULL);
11999fa3e853Sbellard     }
12009fa3e853Sbellard #endif
12019fa3e853Sbellard }
12029fa3e853Sbellard 
12039fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
120441c1b1c9SPaul Brook static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
12059fa3e853Sbellard {
12069fa3e853Sbellard     PageDesc *p;
12079fa3e853Sbellard     int offset, b;
120859817ccbSbellard #if 0
1209a4193c8aSbellard     if (1) {
121093fcfe39Saliguori         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
12112e70f6efSpbrook                   cpu_single_env->mem_io_vaddr, len,
1212a4193c8aSbellard                   cpu_single_env->eip,
12138efe0ca8SStefan Weil                   cpu_single_env->eip +
12148efe0ca8SStefan Weil                   (intptr_t)cpu_single_env->segs[R_CS].base);
1215a4193c8aSbellard     }
121659817ccbSbellard #endif
12179fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
12189fa3e853Sbellard     if (!p)
12199fa3e853Sbellard         return;
12209fa3e853Sbellard     if (p->code_bitmap) {
12219fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
12229fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
12239fa3e853Sbellard         if (b & ((1 << len) - 1))
12249fa3e853Sbellard             goto do_invalidate;
12259fa3e853Sbellard     } else {
12269fa3e853Sbellard     do_invalidate:
1227d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
12289fa3e853Sbellard     }
12299fa3e853Sbellard }
12309fa3e853Sbellard 
12319fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
123241c1b1c9SPaul Brook static void tb_invalidate_phys_page(tb_page_addr_t addr,
123320503968SBlue Swirl                                     uintptr_t pc, void *puc)
12349fa3e853Sbellard {
12356b917547Saliguori     TranslationBlock *tb;
12369fa3e853Sbellard     PageDesc *p;
12376b917547Saliguori     int n;
1238d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
12396b917547Saliguori     TranslationBlock *current_tb = NULL;
12409349b4f9SAndreas Färber     CPUArchState *env = cpu_single_env;
12416b917547Saliguori     int current_tb_modified = 0;
12426b917547Saliguori     target_ulong current_pc = 0;
12436b917547Saliguori     target_ulong current_cs_base = 0;
12446b917547Saliguori     int current_flags = 0;
1245d720b93dSbellard #endif
12469fa3e853Sbellard 
12479fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
12489fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
1249fd6ce8f6Sbellard     if (!p)
1250fd6ce8f6Sbellard         return;
1251fd6ce8f6Sbellard     tb = p->first_tb;
1252d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1253d720b93dSbellard     if (tb && pc != 0) {
1254d720b93dSbellard         current_tb = tb_find_pc(pc);
1255d720b93dSbellard     }
1256d720b93dSbellard #endif
1257fd6ce8f6Sbellard     while (tb != NULL) {
12588efe0ca8SStefan Weil         n = (uintptr_t)tb & 3;
12598efe0ca8SStefan Weil         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1260d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1261d720b93dSbellard         if (current_tb == tb &&
12622e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1263d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1264d720b93dSbellard                    its execution. We could be more precise by checking
1265d720b93dSbellard                    that the modification is after the current PC, but it
1266d720b93dSbellard                    would require a specialized function to partially
1267d720b93dSbellard                    restore the CPU state */
1268d720b93dSbellard 
1269d720b93dSbellard             current_tb_modified = 1;
1270618ba8e6SStefan Weil             cpu_restore_state(current_tb, env, pc);
12716b917547Saliguori             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
12726b917547Saliguori                                  &current_flags);
1273d720b93dSbellard         }
1274d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
12759fa3e853Sbellard         tb_phys_invalidate(tb, addr);
12769fa3e853Sbellard         tb = tb->page_next[n];
1277fd6ce8f6Sbellard     }
1278fd6ce8f6Sbellard     p->first_tb = NULL;
1279d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1280d720b93dSbellard     if (current_tb_modified) {
1281d720b93dSbellard         /* we generate a block containing just the instruction
1282d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1283d720b93dSbellard            itself */
1284ea1c1802Sbellard         env->current_tb = NULL;
12852e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1286d720b93dSbellard         cpu_resume_from_signal(env, puc);
1287d720b93dSbellard     }
1288d720b93dSbellard #endif
1289fd6ce8f6Sbellard }
12909fa3e853Sbellard #endif
1291fd6ce8f6Sbellard 
1292fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
12939fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
129441c1b1c9SPaul Brook                                  unsigned int n, tb_page_addr_t page_addr)
1295fd6ce8f6Sbellard {
1296fd6ce8f6Sbellard     PageDesc *p;
12974429ab44SJuan Quintela #ifndef CONFIG_USER_ONLY
12984429ab44SJuan Quintela     bool page_already_protected;
12994429ab44SJuan Quintela #endif
13009fa3e853Sbellard 
13019fa3e853Sbellard     tb->page_addr[n] = page_addr;
13025cd2c5b6SRichard Henderson     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
13039fa3e853Sbellard     tb->page_next[n] = p->first_tb;
13044429ab44SJuan Quintela #ifndef CONFIG_USER_ONLY
13054429ab44SJuan Quintela     page_already_protected = p->first_tb != NULL;
13064429ab44SJuan Quintela #endif
13078efe0ca8SStefan Weil     p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
13089fa3e853Sbellard     invalidate_page_bitmap(p);
13099fa3e853Sbellard 
1310107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1311d720b93dSbellard 
13129fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
13139fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
131453a5960aSpbrook         target_ulong addr;
131553a5960aSpbrook         PageDesc *p2;
1316fd6ce8f6Sbellard         int prot;
1317fd6ce8f6Sbellard 
1318fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1319fd6ce8f6Sbellard            page fault + mprotect overhead) */
132053a5960aSpbrook         page_addr &= qemu_host_page_mask;
1321fd6ce8f6Sbellard         prot = 0;
132253a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
132353a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
132453a5960aSpbrook 
132553a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
132653a5960aSpbrook             if (!p2)
132753a5960aSpbrook                 continue;
132853a5960aSpbrook             prot |= p2->flags;
132953a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
133053a5960aSpbrook           }
133153a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1332fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1333fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1334ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
133553a5960aSpbrook                page_addr);
1336fd6ce8f6Sbellard #endif
1337fd6ce8f6Sbellard     }
13389fa3e853Sbellard #else
13399fa3e853Sbellard     /* if some code is already present, then the pages are already
13409fa3e853Sbellard        protected. So we handle the case where only the first TB is
13419fa3e853Sbellard        allocated in a physical page */
13424429ab44SJuan Quintela     if (!page_already_protected) {
13436a00d601Sbellard         tlb_protect_code(page_addr);
13449fa3e853Sbellard     }
13459fa3e853Sbellard #endif
1346d720b93dSbellard 
1347d720b93dSbellard #endif /* TARGET_HAS_SMC */
1348fd6ce8f6Sbellard }
1349fd6ce8f6Sbellard 
13509fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
13519fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
135241c1b1c9SPaul Brook void tb_link_page(TranslationBlock *tb,
135341c1b1c9SPaul Brook                   tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1354d4e8164fSbellard {
13559fa3e853Sbellard     unsigned int h;
13569fa3e853Sbellard     TranslationBlock **ptb;
13579fa3e853Sbellard 
1358c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1359c8a706feSpbrook        before we are done.  */
1360c8a706feSpbrook     mmap_lock();
13619fa3e853Sbellard     /* add in the physical hash table */
13629fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
13639fa3e853Sbellard     ptb = &tb_phys_hash[h];
13649fa3e853Sbellard     tb->phys_hash_next = *ptb;
13659fa3e853Sbellard     *ptb = tb;
1366fd6ce8f6Sbellard 
1367fd6ce8f6Sbellard     /* add in the page list */
13689fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
13699fa3e853Sbellard     if (phys_page2 != -1)
13709fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
13719fa3e853Sbellard     else
13729fa3e853Sbellard         tb->page_addr[1] = -1;
13739fa3e853Sbellard 
13748efe0ca8SStefan Weil     tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1375d4e8164fSbellard     tb->jmp_next[0] = NULL;
1376d4e8164fSbellard     tb->jmp_next[1] = NULL;
1377d4e8164fSbellard 
1378d4e8164fSbellard     /* init original jump addresses */
1379d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1380d4e8164fSbellard         tb_reset_jump(tb, 0);
1381d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1382d4e8164fSbellard         tb_reset_jump(tb, 1);
13838a40a180Sbellard 
13848a40a180Sbellard #ifdef DEBUG_TB_CHECK
13858a40a180Sbellard     tb_page_check();
13868a40a180Sbellard #endif
1387c8a706feSpbrook     mmap_unlock();
1388fd6ce8f6Sbellard }
1389fd6ce8f6Sbellard 
1390a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1391a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
13926375e09eSStefan Weil TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1393a513fe19Sbellard {
1394a513fe19Sbellard     int m_min, m_max, m;
13958efe0ca8SStefan Weil     uintptr_t v;
1396a513fe19Sbellard     TranslationBlock *tb;
1397a513fe19Sbellard 
1398a513fe19Sbellard     if (nb_tbs <= 0)
1399a513fe19Sbellard         return NULL;
14008efe0ca8SStefan Weil     if (tc_ptr < (uintptr_t)code_gen_buffer ||
14018efe0ca8SStefan Weil         tc_ptr >= (uintptr_t)code_gen_ptr) {
1402a513fe19Sbellard         return NULL;
14038efe0ca8SStefan Weil     }
1404a513fe19Sbellard     /* binary search (cf Knuth) */
1405a513fe19Sbellard     m_min = 0;
1406a513fe19Sbellard     m_max = nb_tbs - 1;
1407a513fe19Sbellard     while (m_min <= m_max) {
1408a513fe19Sbellard         m = (m_min + m_max) >> 1;
1409a513fe19Sbellard         tb = &tbs[m];
14108efe0ca8SStefan Weil         v = (uintptr_t)tb->tc_ptr;
1411a513fe19Sbellard         if (v == tc_ptr)
1412a513fe19Sbellard             return tb;
1413a513fe19Sbellard         else if (tc_ptr < v) {
1414a513fe19Sbellard             m_max = m - 1;
1415a513fe19Sbellard         } else {
1416a513fe19Sbellard             m_min = m + 1;
1417a513fe19Sbellard         }
1418a513fe19Sbellard     }
1419a513fe19Sbellard     return &tbs[m_max];
1420a513fe19Sbellard }
14217501267eSbellard 
1422ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1423ea041c0eSbellard 
1424ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1425ea041c0eSbellard {
1426ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1427ea041c0eSbellard     unsigned int n1;
1428ea041c0eSbellard 
1429ea041c0eSbellard     tb1 = tb->jmp_next[n];
1430ea041c0eSbellard     if (tb1 != NULL) {
1431ea041c0eSbellard         /* find head of list */
1432ea041c0eSbellard         for(;;) {
14338efe0ca8SStefan Weil             n1 = (uintptr_t)tb1 & 3;
14348efe0ca8SStefan Weil             tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1435ea041c0eSbellard             if (n1 == 2)
1436ea041c0eSbellard                 break;
1437ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1438ea041c0eSbellard         }
1439ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1440ea041c0eSbellard         tb_next = tb1;
1441ea041c0eSbellard 
1442ea041c0eSbellard         /* remove tb from the jmp_first list */
1443ea041c0eSbellard         ptb = &tb_next->jmp_first;
1444ea041c0eSbellard         for(;;) {
1445ea041c0eSbellard             tb1 = *ptb;
14468efe0ca8SStefan Weil             n1 = (uintptr_t)tb1 & 3;
14478efe0ca8SStefan Weil             tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1448ea041c0eSbellard             if (n1 == n && tb1 == tb)
1449ea041c0eSbellard                 break;
1450ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1451ea041c0eSbellard         }
1452ea041c0eSbellard         *ptb = tb->jmp_next[n];
1453ea041c0eSbellard         tb->jmp_next[n] = NULL;
1454ea041c0eSbellard 
1455ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1456ea041c0eSbellard         tb_reset_jump(tb, n);
1457ea041c0eSbellard 
14580124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1459ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1460ea041c0eSbellard     }
1461ea041c0eSbellard }
1462ea041c0eSbellard 
1463ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1464ea041c0eSbellard {
1465ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1466ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1467ea041c0eSbellard }
1468ea041c0eSbellard 
14691fddef4bSbellard #if defined(TARGET_HAS_ICE)
147094df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
14719349b4f9SAndreas Färber static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
147294df27fdSPaul Brook {
147394df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
147494df27fdSPaul Brook }
147594df27fdSPaul Brook #else
14761e7855a5SMax Filippov void tb_invalidate_phys_addr(target_phys_addr_t addr)
1477d720b93dSbellard {
1478c227f099SAnthony Liguori     ram_addr_t ram_addr;
1479f3705d53SAvi Kivity     MemoryRegionSection *section;
1480d720b93dSbellard 
1481ac1970fbSAvi Kivity     section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
1482f3705d53SAvi Kivity     if (!(memory_region_is_ram(section->mr)
1483f3705d53SAvi Kivity           || (section->mr->rom_device && section->mr->readable))) {
148406ef3525SAvi Kivity         return;
148506ef3525SAvi Kivity     }
1486f3705d53SAvi Kivity     ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1487cc5bea60SBlue Swirl         + memory_region_section_addr(section, addr);
1488706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1489d720b93dSbellard }
14901e7855a5SMax Filippov 
14911e7855a5SMax Filippov static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
14921e7855a5SMax Filippov {
14939d70c4b7SMax Filippov     tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
14949d70c4b7SMax Filippov             (pc & ~TARGET_PAGE_MASK));
14951e7855a5SMax Filippov }
1496c27004ecSbellard #endif
149794df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
1498d720b93dSbellard 
1499c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
15009349b4f9SAndreas Färber void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1501c527ee8fSPaul Brook 
1502c527ee8fSPaul Brook {
1503c527ee8fSPaul Brook }
1504c527ee8fSPaul Brook 
15059349b4f9SAndreas Färber int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1506c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
1507c527ee8fSPaul Brook {
1508c527ee8fSPaul Brook     return -ENOSYS;
1509c527ee8fSPaul Brook }
1510c527ee8fSPaul Brook #else
15116658ffb8Spbrook /* Add a watchpoint.  */
15129349b4f9SAndreas Färber int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1513a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
15146658ffb8Spbrook {
1515b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1516c0ce998eSaliguori     CPUWatchpoint *wp;
15176658ffb8Spbrook 
1518b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
15190dc23828SMax Filippov     if ((len & (len - 1)) || (addr & ~len_mask) ||
15200dc23828SMax Filippov             len == 0 || len > TARGET_PAGE_SIZE) {
1521b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1522b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1523b4051334Saliguori         return -EINVAL;
1524b4051334Saliguori     }
15257267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
15266658ffb8Spbrook 
1527a1d1bb31Saliguori     wp->vaddr = addr;
1528b4051334Saliguori     wp->len_mask = len_mask;
1529a1d1bb31Saliguori     wp->flags = flags;
1530a1d1bb31Saliguori 
15312dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
1532c0ce998eSaliguori     if (flags & BP_GDB)
153372cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1534c0ce998eSaliguori     else
153572cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1536a1d1bb31Saliguori 
15376658ffb8Spbrook     tlb_flush_page(env, addr);
1538a1d1bb31Saliguori 
1539a1d1bb31Saliguori     if (watchpoint)
1540a1d1bb31Saliguori         *watchpoint = wp;
1541a1d1bb31Saliguori     return 0;
15426658ffb8Spbrook }
15436658ffb8Spbrook 
1544a1d1bb31Saliguori /* Remove a specific watchpoint.  */
15459349b4f9SAndreas Färber int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
1546a1d1bb31Saliguori                           int flags)
15476658ffb8Spbrook {
1548b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1549a1d1bb31Saliguori     CPUWatchpoint *wp;
15506658ffb8Spbrook 
155172cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1552b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
15536e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1554a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
15556658ffb8Spbrook             return 0;
15566658ffb8Spbrook         }
15576658ffb8Spbrook     }
1558a1d1bb31Saliguori     return -ENOENT;
15596658ffb8Spbrook }
15606658ffb8Spbrook 
1561a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
15629349b4f9SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
1563a1d1bb31Saliguori {
156472cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
15657d03f82fSedgar_igl 
1566a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
1567a1d1bb31Saliguori 
15687267c094SAnthony Liguori     g_free(watchpoint);
15697d03f82fSedgar_igl }
15707d03f82fSedgar_igl 
1571a1d1bb31Saliguori /* Remove all matching watchpoints.  */
15729349b4f9SAndreas Färber void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1573a1d1bb31Saliguori {
1574c0ce998eSaliguori     CPUWatchpoint *wp, *next;
1575a1d1bb31Saliguori 
157672cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1577a1d1bb31Saliguori         if (wp->flags & mask)
1578a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
1579a1d1bb31Saliguori     }
1580c0ce998eSaliguori }
1581c527ee8fSPaul Brook #endif
1582a1d1bb31Saliguori 
1583a1d1bb31Saliguori /* Add a breakpoint.  */
15849349b4f9SAndreas Färber int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
1585a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
15864c3a88a2Sbellard {
15871fddef4bSbellard #if defined(TARGET_HAS_ICE)
1588c0ce998eSaliguori     CPUBreakpoint *bp;
15894c3a88a2Sbellard 
15907267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
15914c3a88a2Sbellard 
1592a1d1bb31Saliguori     bp->pc = pc;
1593a1d1bb31Saliguori     bp->flags = flags;
1594a1d1bb31Saliguori 
15952dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
1596c0ce998eSaliguori     if (flags & BP_GDB)
159772cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1598c0ce998eSaliguori     else
159972cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1600d720b93dSbellard 
1601d720b93dSbellard     breakpoint_invalidate(env, pc);
1602a1d1bb31Saliguori 
1603a1d1bb31Saliguori     if (breakpoint)
1604a1d1bb31Saliguori         *breakpoint = bp;
16054c3a88a2Sbellard     return 0;
16064c3a88a2Sbellard #else
1607a1d1bb31Saliguori     return -ENOSYS;
16084c3a88a2Sbellard #endif
16094c3a88a2Sbellard }
16104c3a88a2Sbellard 
1611a1d1bb31Saliguori /* Remove a specific breakpoint.  */
16129349b4f9SAndreas Färber int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
1613a1d1bb31Saliguori {
16147d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
1615a1d1bb31Saliguori     CPUBreakpoint *bp;
1616a1d1bb31Saliguori 
161772cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1618a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
1619a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1620a1d1bb31Saliguori             return 0;
16217d03f82fSedgar_igl         }
1622a1d1bb31Saliguori     }
1623a1d1bb31Saliguori     return -ENOENT;
1624a1d1bb31Saliguori #else
1625a1d1bb31Saliguori     return -ENOSYS;
16267d03f82fSedgar_igl #endif
16277d03f82fSedgar_igl }
16287d03f82fSedgar_igl 
1629a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
16309349b4f9SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
16314c3a88a2Sbellard {
16321fddef4bSbellard #if defined(TARGET_HAS_ICE)
163372cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1634d720b93dSbellard 
1635a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
1636a1d1bb31Saliguori 
16377267c094SAnthony Liguori     g_free(breakpoint);
1638a1d1bb31Saliguori #endif
1639a1d1bb31Saliguori }
1640a1d1bb31Saliguori 
1641a1d1bb31Saliguori /* Remove all matching breakpoints. */
16429349b4f9SAndreas Färber void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
1643a1d1bb31Saliguori {
1644a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
1645c0ce998eSaliguori     CPUBreakpoint *bp, *next;
1646a1d1bb31Saliguori 
164772cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1648a1d1bb31Saliguori         if (bp->flags & mask)
1649a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1650c0ce998eSaliguori     }
16514c3a88a2Sbellard #endif
16524c3a88a2Sbellard }
16534c3a88a2Sbellard 
1654c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1655c33a346eSbellard    CPU loop after each instruction */
16569349b4f9SAndreas Färber void cpu_single_step(CPUArchState *env, int enabled)
1657c33a346eSbellard {
16581fddef4bSbellard #if defined(TARGET_HAS_ICE)
1659c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1660c33a346eSbellard         env->singlestep_enabled = enabled;
1661e22a25c9Saliguori         if (kvm_enabled())
1662e22a25c9Saliguori             kvm_update_guest_debug(env, 0);
1663e22a25c9Saliguori         else {
1664ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
16659fa3e853Sbellard             /* XXX: only flush what is necessary */
16660124311eSbellard             tb_flush(env);
1667c33a346eSbellard         }
1668e22a25c9Saliguori     }
1669c33a346eSbellard #endif
1670c33a346eSbellard }
1671c33a346eSbellard 
16729349b4f9SAndreas Färber static void cpu_unlink_tb(CPUArchState *env)
1673ea041c0eSbellard {
1674d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1675d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1676d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1677d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
16783098dba0Saurel32     TranslationBlock *tb;
1679c227f099SAnthony Liguori     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
16803098dba0Saurel32 
1681cab1b4bdSRiku Voipio     spin_lock(&interrupt_lock);
16823098dba0Saurel32     tb = env->current_tb;
16833098dba0Saurel32     /* if the cpu is currently executing code, we must unlink it and
16843098dba0Saurel32        all the potentially executing TB */
1685f76cfe56SRiku Voipio     if (tb) {
16863098dba0Saurel32         env->current_tb = NULL;
16873098dba0Saurel32         tb_reset_jump_recursive(tb);
16883098dba0Saurel32     }
1689cab1b4bdSRiku Voipio     spin_unlock(&interrupt_lock);
16903098dba0Saurel32 }
16913098dba0Saurel32 
169297ffbd8dSJan Kiszka #ifndef CONFIG_USER_ONLY
16933098dba0Saurel32 /* mask must never be zero, except for A20 change call */
16949349b4f9SAndreas Färber static void tcg_handle_interrupt(CPUArchState *env, int mask)
16953098dba0Saurel32 {
16963098dba0Saurel32     int old_mask;
16973098dba0Saurel32 
16983098dba0Saurel32     old_mask = env->interrupt_request;
16993098dba0Saurel32     env->interrupt_request |= mask;
17003098dba0Saurel32 
17018edac960Saliguori     /*
17028edac960Saliguori      * If called from iothread context, wake the target cpu in
17038edac960Saliguori      * case its halted.
17048edac960Saliguori      */
1705b7680cb6SJan Kiszka     if (!qemu_cpu_is_self(env)) {
17068edac960Saliguori         qemu_cpu_kick(env);
17078edac960Saliguori         return;
17088edac960Saliguori     }
17098edac960Saliguori 
17102e70f6efSpbrook     if (use_icount) {
1711266910c4Spbrook         env->icount_decr.u16.high = 0xffff;
17122e70f6efSpbrook         if (!can_do_io(env)
1713be214e6cSaurel32             && (mask & ~old_mask) != 0) {
17142e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
17152e70f6efSpbrook         }
17162e70f6efSpbrook     } else {
17173098dba0Saurel32         cpu_unlink_tb(env);
1718ea041c0eSbellard     }
17192e70f6efSpbrook }
1720ea041c0eSbellard 
1721ec6959d0SJan Kiszka CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1722ec6959d0SJan Kiszka 
172397ffbd8dSJan Kiszka #else /* CONFIG_USER_ONLY */
172497ffbd8dSJan Kiszka 
17259349b4f9SAndreas Färber void cpu_interrupt(CPUArchState *env, int mask)
172697ffbd8dSJan Kiszka {
172797ffbd8dSJan Kiszka     env->interrupt_request |= mask;
172897ffbd8dSJan Kiszka     cpu_unlink_tb(env);
172997ffbd8dSJan Kiszka }
173097ffbd8dSJan Kiszka #endif /* CONFIG_USER_ONLY */
173197ffbd8dSJan Kiszka 
17329349b4f9SAndreas Färber void cpu_reset_interrupt(CPUArchState *env, int mask)
1733b54ad049Sbellard {
1734b54ad049Sbellard     env->interrupt_request &= ~mask;
1735b54ad049Sbellard }
1736b54ad049Sbellard 
17379349b4f9SAndreas Färber void cpu_exit(CPUArchState *env)
17383098dba0Saurel32 {
17393098dba0Saurel32     env->exit_request = 1;
17403098dba0Saurel32     cpu_unlink_tb(env);
17413098dba0Saurel32 }
17423098dba0Saurel32 
17439349b4f9SAndreas Färber void cpu_abort(CPUArchState *env, const char *fmt, ...)
17447501267eSbellard {
17457501267eSbellard     va_list ap;
1746493ae1f0Spbrook     va_list ap2;
17477501267eSbellard 
17487501267eSbellard     va_start(ap, fmt);
1749493ae1f0Spbrook     va_copy(ap2, ap);
17507501267eSbellard     fprintf(stderr, "qemu: fatal: ");
17517501267eSbellard     vfprintf(stderr, fmt, ap);
17527501267eSbellard     fprintf(stderr, "\n");
17536fd2a026SPeter Maydell     cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
175493fcfe39Saliguori     if (qemu_log_enabled()) {
175593fcfe39Saliguori         qemu_log("qemu: fatal: ");
175693fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
175793fcfe39Saliguori         qemu_log("\n");
17586fd2a026SPeter Maydell         log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
175931b1a7b4Saliguori         qemu_log_flush();
176093fcfe39Saliguori         qemu_log_close();
1761924edcaeSbalrog     }
1762493ae1f0Spbrook     va_end(ap2);
1763f9373291Sj_mayer     va_end(ap);
1764fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
1765fd052bf6SRiku Voipio     {
1766fd052bf6SRiku Voipio         struct sigaction act;
1767fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
1768fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
1769fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
1770fd052bf6SRiku Voipio     }
1771fd052bf6SRiku Voipio #endif
17727501267eSbellard     abort();
17737501267eSbellard }
17747501267eSbellard 
17759349b4f9SAndreas Färber CPUArchState *cpu_copy(CPUArchState *env)
1776c5be9f08Sths {
17779349b4f9SAndreas Färber     CPUArchState *new_env = cpu_init(env->cpu_model_str);
17789349b4f9SAndreas Färber     CPUArchState *next_cpu = new_env->next_cpu;
1779c5be9f08Sths     int cpu_index = new_env->cpu_index;
17805a38f081Saliguori #if defined(TARGET_HAS_ICE)
17815a38f081Saliguori     CPUBreakpoint *bp;
17825a38f081Saliguori     CPUWatchpoint *wp;
17835a38f081Saliguori #endif
17845a38f081Saliguori 
17859349b4f9SAndreas Färber     memcpy(new_env, env, sizeof(CPUArchState));
17865a38f081Saliguori 
17875a38f081Saliguori     /* Preserve chaining and index. */
1788c5be9f08Sths     new_env->next_cpu = next_cpu;
1789c5be9f08Sths     new_env->cpu_index = cpu_index;
17905a38f081Saliguori 
17915a38f081Saliguori     /* Clone all break/watchpoints.
17925a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
17935a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
179472cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
179572cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
17965a38f081Saliguori #if defined(TARGET_HAS_ICE)
179772cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
17985a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
17995a38f081Saliguori     }
180072cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
18015a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
18025a38f081Saliguori                               wp->flags, NULL);
18035a38f081Saliguori     }
18045a38f081Saliguori #endif
18055a38f081Saliguori 
1806c5be9f08Sths     return new_env;
1807c5be9f08Sths }
1808c5be9f08Sths 
18090124311eSbellard #if !defined(CONFIG_USER_ONLY)
18100cac1b66SBlue Swirl void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
18115c751e99Sedgar_igl {
18125c751e99Sedgar_igl     unsigned int i;
18135c751e99Sedgar_igl 
18145c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
18155c751e99Sedgar_igl        overlap the flushed page.  */
18165c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
18175c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
18185c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
18195c751e99Sedgar_igl 
18205c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
18215c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
18225c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
18235c751e99Sedgar_igl }
18245c751e99Sedgar_igl 
1825d24981d3SJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1826d24981d3SJuan Quintela                                       uintptr_t length)
18271ccde1cbSbellard {
1828d24981d3SJuan Quintela     uintptr_t start1;
1829f23db169Sbellard 
18301ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
18311ccde1cbSbellard        when accessing the range */
18328efe0ca8SStefan Weil     start1 = (uintptr_t)qemu_safe_ram_ptr(start);
1833a57d23e4SStefan Weil     /* Check that we don't span multiple blocks - this breaks the
18345579c7f3Spbrook        address comparisons below.  */
18358efe0ca8SStefan Weil     if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
18365579c7f3Spbrook             != (end - 1) - start) {
18375579c7f3Spbrook         abort();
18385579c7f3Spbrook     }
1839e5548617SBlue Swirl     cpu_tlb_reset_dirty_all(start1, length);
1840d24981d3SJuan Quintela 
1841d24981d3SJuan Quintela }
1842d24981d3SJuan Quintela 
1843d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
1844d24981d3SJuan Quintela void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1845d24981d3SJuan Quintela                                      int dirty_flags)
1846d24981d3SJuan Quintela {
1847d24981d3SJuan Quintela     uintptr_t length;
1848d24981d3SJuan Quintela 
1849d24981d3SJuan Quintela     start &= TARGET_PAGE_MASK;
1850d24981d3SJuan Quintela     end = TARGET_PAGE_ALIGN(end);
1851d24981d3SJuan Quintela 
1852d24981d3SJuan Quintela     length = end - start;
1853d24981d3SJuan Quintela     if (length == 0)
1854d24981d3SJuan Quintela         return;
1855d24981d3SJuan Quintela     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1856d24981d3SJuan Quintela 
1857d24981d3SJuan Quintela     if (tcg_enabled()) {
1858d24981d3SJuan Quintela         tlb_reset_dirty_range_all(start, end, length);
1859d24981d3SJuan Quintela     }
18601ccde1cbSbellard }
18611ccde1cbSbellard 
186274576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable)
186374576198Saliguori {
1864f6f3fbcaSMichael S. Tsirkin     int ret = 0;
186574576198Saliguori     in_migration = enable;
1866f6f3fbcaSMichael S. Tsirkin     return ret;
186774576198Saliguori }
186874576198Saliguori 
1869e5548617SBlue Swirl target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1870e5548617SBlue Swirl                                                    MemoryRegionSection *section,
1871e5548617SBlue Swirl                                                    target_ulong vaddr,
1872e5548617SBlue Swirl                                                    target_phys_addr_t paddr,
1873e5548617SBlue Swirl                                                    int prot,
1874e5548617SBlue Swirl                                                    target_ulong *address)
1875e5548617SBlue Swirl {
1876e5548617SBlue Swirl     target_phys_addr_t iotlb;
1877e5548617SBlue Swirl     CPUWatchpoint *wp;
1878e5548617SBlue Swirl 
1879cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
1880e5548617SBlue Swirl         /* Normal RAM.  */
1881e5548617SBlue Swirl         iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1882cc5bea60SBlue Swirl             + memory_region_section_addr(section, paddr);
1883e5548617SBlue Swirl         if (!section->readonly) {
1884e5548617SBlue Swirl             iotlb |= phys_section_notdirty;
1885e5548617SBlue Swirl         } else {
1886e5548617SBlue Swirl             iotlb |= phys_section_rom;
1887e5548617SBlue Swirl         }
1888e5548617SBlue Swirl     } else {
1889e5548617SBlue Swirl         /* IO handlers are currently passed a physical address.
1890e5548617SBlue Swirl            It would be nice to pass an offset from the base address
1891e5548617SBlue Swirl            of that region.  This would avoid having to special case RAM,
1892e5548617SBlue Swirl            and avoid full address decoding in every device.
1893e5548617SBlue Swirl            We can't use the high bits of pd for this because
1894e5548617SBlue Swirl            IO_MEM_ROMD uses these as a ram address.  */
1895e5548617SBlue Swirl         iotlb = section - phys_sections;
1896cc5bea60SBlue Swirl         iotlb += memory_region_section_addr(section, paddr);
1897e5548617SBlue Swirl     }
1898e5548617SBlue Swirl 
1899e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
1900e5548617SBlue Swirl        watchpoint trap routines.  */
1901e5548617SBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1902e5548617SBlue Swirl         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1903e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
1904e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1905e5548617SBlue Swirl                 iotlb = phys_section_watch + paddr;
1906e5548617SBlue Swirl                 *address |= TLB_MMIO;
1907e5548617SBlue Swirl                 break;
1908e5548617SBlue Swirl             }
1909e5548617SBlue Swirl         }
1910e5548617SBlue Swirl     }
1911e5548617SBlue Swirl 
1912e5548617SBlue Swirl     return iotlb;
1913e5548617SBlue Swirl }
1914e5548617SBlue Swirl 
19150124311eSbellard #else
1916edf8e2afSMika Westerberg /*
1917edf8e2afSMika Westerberg  * Walks guest process memory "regions" one by one
1918edf8e2afSMika Westerberg  * and calls callback function 'fn' for each region.
1919edf8e2afSMika Westerberg  */
19205cd2c5b6SRichard Henderson 
19215cd2c5b6SRichard Henderson struct walk_memory_regions_data
192233417e70Sbellard {
19235cd2c5b6SRichard Henderson     walk_memory_regions_fn fn;
19245cd2c5b6SRichard Henderson     void *priv;
19258efe0ca8SStefan Weil     uintptr_t start;
19265cd2c5b6SRichard Henderson     int prot;
19275cd2c5b6SRichard Henderson };
19289fa3e853Sbellard 
19295cd2c5b6SRichard Henderson static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1930b480d9b7SPaul Brook                                    abi_ulong end, int new_prot)
19315cd2c5b6SRichard Henderson {
19325cd2c5b6SRichard Henderson     if (data->start != -1ul) {
19335cd2c5b6SRichard Henderson         int rc = data->fn(data->priv, data->start, end, data->prot);
19345cd2c5b6SRichard Henderson         if (rc != 0) {
19355cd2c5b6SRichard Henderson             return rc;
19365cd2c5b6SRichard Henderson         }
19375cd2c5b6SRichard Henderson     }
1938edf8e2afSMika Westerberg 
19395cd2c5b6SRichard Henderson     data->start = (new_prot ? end : -1ul);
19405cd2c5b6SRichard Henderson     data->prot = new_prot;
19415cd2c5b6SRichard Henderson 
19425cd2c5b6SRichard Henderson     return 0;
194333417e70Sbellard }
19445cd2c5b6SRichard Henderson 
19455cd2c5b6SRichard Henderson static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1946b480d9b7SPaul Brook                                  abi_ulong base, int level, void **lp)
19475cd2c5b6SRichard Henderson {
1948b480d9b7SPaul Brook     abi_ulong pa;
19495cd2c5b6SRichard Henderson     int i, rc;
19505cd2c5b6SRichard Henderson 
19515cd2c5b6SRichard Henderson     if (*lp == NULL) {
19525cd2c5b6SRichard Henderson         return walk_memory_regions_end(data, base, 0);
19539fa3e853Sbellard     }
19545cd2c5b6SRichard Henderson 
19555cd2c5b6SRichard Henderson     if (level == 0) {
19565cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
19577296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
19585cd2c5b6SRichard Henderson             int prot = pd[i].flags;
19595cd2c5b6SRichard Henderson 
19605cd2c5b6SRichard Henderson             pa = base | (i << TARGET_PAGE_BITS);
19615cd2c5b6SRichard Henderson             if (prot != data->prot) {
19625cd2c5b6SRichard Henderson                 rc = walk_memory_regions_end(data, pa, prot);
19635cd2c5b6SRichard Henderson                 if (rc != 0) {
19645cd2c5b6SRichard Henderson                     return rc;
19659fa3e853Sbellard                 }
19669fa3e853Sbellard             }
19675cd2c5b6SRichard Henderson         }
19685cd2c5b6SRichard Henderson     } else {
19695cd2c5b6SRichard Henderson         void **pp = *lp;
19707296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
1971b480d9b7SPaul Brook             pa = base | ((abi_ulong)i <<
1972b480d9b7SPaul Brook                 (TARGET_PAGE_BITS + L2_BITS * level));
19735cd2c5b6SRichard Henderson             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
19745cd2c5b6SRichard Henderson             if (rc != 0) {
19755cd2c5b6SRichard Henderson                 return rc;
19765cd2c5b6SRichard Henderson             }
19775cd2c5b6SRichard Henderson         }
19785cd2c5b6SRichard Henderson     }
19795cd2c5b6SRichard Henderson 
19805cd2c5b6SRichard Henderson     return 0;
19815cd2c5b6SRichard Henderson }
19825cd2c5b6SRichard Henderson 
19835cd2c5b6SRichard Henderson int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
19845cd2c5b6SRichard Henderson {
19855cd2c5b6SRichard Henderson     struct walk_memory_regions_data data;
19868efe0ca8SStefan Weil     uintptr_t i;
19875cd2c5b6SRichard Henderson 
19885cd2c5b6SRichard Henderson     data.fn = fn;
19895cd2c5b6SRichard Henderson     data.priv = priv;
19905cd2c5b6SRichard Henderson     data.start = -1ul;
19915cd2c5b6SRichard Henderson     data.prot = 0;
19925cd2c5b6SRichard Henderson 
19935cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
1994b480d9b7SPaul Brook         int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
19955cd2c5b6SRichard Henderson                                        V_L1_SHIFT / L2_BITS - 1, l1_map + i);
19965cd2c5b6SRichard Henderson         if (rc != 0) {
19975cd2c5b6SRichard Henderson             return rc;
19985cd2c5b6SRichard Henderson         }
19995cd2c5b6SRichard Henderson     }
20005cd2c5b6SRichard Henderson 
20015cd2c5b6SRichard Henderson     return walk_memory_regions_end(&data, 0, 0);
2002edf8e2afSMika Westerberg }
2003edf8e2afSMika Westerberg 
2004b480d9b7SPaul Brook static int dump_region(void *priv, abi_ulong start,
2005b480d9b7SPaul Brook     abi_ulong end, unsigned long prot)
2006edf8e2afSMika Westerberg {
2007edf8e2afSMika Westerberg     FILE *f = (FILE *)priv;
2008edf8e2afSMika Westerberg 
2009b480d9b7SPaul Brook     (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2010b480d9b7SPaul Brook         " "TARGET_ABI_FMT_lx" %c%c%c\n",
2011edf8e2afSMika Westerberg         start, end, end - start,
2012edf8e2afSMika Westerberg         ((prot & PAGE_READ) ? 'r' : '-'),
2013edf8e2afSMika Westerberg         ((prot & PAGE_WRITE) ? 'w' : '-'),
2014edf8e2afSMika Westerberg         ((prot & PAGE_EXEC) ? 'x' : '-'));
2015edf8e2afSMika Westerberg 
2016edf8e2afSMika Westerberg     return (0);
2017edf8e2afSMika Westerberg }
2018edf8e2afSMika Westerberg 
2019edf8e2afSMika Westerberg /* dump memory mappings */
2020edf8e2afSMika Westerberg void page_dump(FILE *f)
2021edf8e2afSMika Westerberg {
2022edf8e2afSMika Westerberg     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2023edf8e2afSMika Westerberg             "start", "end", "size", "prot");
2024edf8e2afSMika Westerberg     walk_memory_regions(f, dump_region);
20259fa3e853Sbellard }
20269fa3e853Sbellard 
202753a5960aSpbrook int page_get_flags(target_ulong address)
20289fa3e853Sbellard {
20299fa3e853Sbellard     PageDesc *p;
20309fa3e853Sbellard 
20319fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
20329fa3e853Sbellard     if (!p)
20339fa3e853Sbellard         return 0;
20349fa3e853Sbellard     return p->flags;
20359fa3e853Sbellard }
20369fa3e853Sbellard 
2037376a7909SRichard Henderson /* Modify the flags of a page and invalidate the code if necessary.
2038376a7909SRichard Henderson    The flag PAGE_WRITE_ORG is positioned automatically depending
2039376a7909SRichard Henderson    on PAGE_WRITE.  The mmap_lock should already be held.  */
204053a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
20419fa3e853Sbellard {
2042376a7909SRichard Henderson     target_ulong addr, len;
20439fa3e853Sbellard 
2044376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2045376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2046376a7909SRichard Henderson        a missing call to h2g_valid.  */
2047b480d9b7SPaul Brook #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2048b480d9b7SPaul Brook     assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2049376a7909SRichard Henderson #endif
2050376a7909SRichard Henderson     assert(start < end);
2051376a7909SRichard Henderson 
20529fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
20539fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
2054376a7909SRichard Henderson 
2055376a7909SRichard Henderson     if (flags & PAGE_WRITE) {
20569fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
2057376a7909SRichard Henderson     }
2058376a7909SRichard Henderson 
2059376a7909SRichard Henderson     for (addr = start, len = end - start;
2060376a7909SRichard Henderson          len != 0;
2061376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2062376a7909SRichard Henderson         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2063376a7909SRichard Henderson 
2064376a7909SRichard Henderson         /* If the write protection bit is set, then we invalidate
2065376a7909SRichard Henderson            the code inside.  */
20669fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
20679fa3e853Sbellard             (flags & PAGE_WRITE) &&
20689fa3e853Sbellard             p->first_tb) {
2069d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
20709fa3e853Sbellard         }
20719fa3e853Sbellard         p->flags = flags;
20729fa3e853Sbellard     }
20739fa3e853Sbellard }
20749fa3e853Sbellard 
20753d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
20763d97b40bSths {
20773d97b40bSths     PageDesc *p;
20783d97b40bSths     target_ulong end;
20793d97b40bSths     target_ulong addr;
20803d97b40bSths 
2081376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2082376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2083376a7909SRichard Henderson        a missing call to h2g_valid.  */
2084338e9e6cSBlue Swirl #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2085338e9e6cSBlue Swirl     assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2086376a7909SRichard Henderson #endif
2087376a7909SRichard Henderson 
20883e0650a9SRichard Henderson     if (len == 0) {
20893e0650a9SRichard Henderson         return 0;
20903e0650a9SRichard Henderson     }
2091376a7909SRichard Henderson     if (start + len - 1 < start) {
2092376a7909SRichard Henderson         /* We've wrapped around.  */
209355f280c9Sbalrog         return -1;
2094376a7909SRichard Henderson     }
209555f280c9Sbalrog 
20963d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
20973d97b40bSths     start = start & TARGET_PAGE_MASK;
20983d97b40bSths 
2099376a7909SRichard Henderson     for (addr = start, len = end - start;
2100376a7909SRichard Henderson          len != 0;
2101376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
21023d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
21033d97b40bSths         if( !p )
21043d97b40bSths             return -1;
21053d97b40bSths         if( !(p->flags & PAGE_VALID) )
21063d97b40bSths             return -1;
21073d97b40bSths 
2108dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
21093d97b40bSths             return -1;
2110dae3270cSbellard         if (flags & PAGE_WRITE) {
2111dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
21123d97b40bSths                 return -1;
2113dae3270cSbellard             /* unprotect the page if it was put read-only because it
2114dae3270cSbellard                contains translated code */
2115dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2116dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2117dae3270cSbellard                     return -1;
2118dae3270cSbellard             }
2119dae3270cSbellard             return 0;
2120dae3270cSbellard         }
21213d97b40bSths     }
21223d97b40bSths     return 0;
21233d97b40bSths }
21243d97b40bSths 
21259fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
2126ccbb4d44SStuart Brady    page. Return TRUE if the fault was successfully handled. */
21276375e09eSStefan Weil int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
21289fa3e853Sbellard {
212945d679d6SAurelien Jarno     unsigned int prot;
213045d679d6SAurelien Jarno     PageDesc *p;
213153a5960aSpbrook     target_ulong host_start, host_end, addr;
21329fa3e853Sbellard 
2133c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2134c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2135c8a706feSpbrook        practice it seems to be ok.  */
2136c8a706feSpbrook     mmap_lock();
2137c8a706feSpbrook 
213845d679d6SAurelien Jarno     p = page_find(address >> TARGET_PAGE_BITS);
213945d679d6SAurelien Jarno     if (!p) {
2140c8a706feSpbrook         mmap_unlock();
21419fa3e853Sbellard         return 0;
2142c8a706feSpbrook     }
214345d679d6SAurelien Jarno 
21449fa3e853Sbellard     /* if the page was really writable, then we change its
21459fa3e853Sbellard        protection back to writable */
214645d679d6SAurelien Jarno     if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
214745d679d6SAurelien Jarno         host_start = address & qemu_host_page_mask;
214845d679d6SAurelien Jarno         host_end = host_start + qemu_host_page_size;
214945d679d6SAurelien Jarno 
215045d679d6SAurelien Jarno         prot = 0;
215145d679d6SAurelien Jarno         for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
215245d679d6SAurelien Jarno             p = page_find(addr >> TARGET_PAGE_BITS);
215345d679d6SAurelien Jarno             p->flags |= PAGE_WRITE;
215445d679d6SAurelien Jarno             prot |= p->flags;
215545d679d6SAurelien Jarno 
21569fa3e853Sbellard             /* and since the content will be modified, we must invalidate
21579fa3e853Sbellard                the corresponding translated code. */
215845d679d6SAurelien Jarno             tb_invalidate_phys_page(addr, pc, puc);
21599fa3e853Sbellard #ifdef DEBUG_TB_CHECK
216045d679d6SAurelien Jarno             tb_invalidate_check(addr);
21619fa3e853Sbellard #endif
216245d679d6SAurelien Jarno         }
216345d679d6SAurelien Jarno         mprotect((void *)g2h(host_start), qemu_host_page_size,
216445d679d6SAurelien Jarno                  prot & PAGE_BITS);
216545d679d6SAurelien Jarno 
2166c8a706feSpbrook         mmap_unlock();
21679fa3e853Sbellard         return 1;
21689fa3e853Sbellard     }
2169c8a706feSpbrook     mmap_unlock();
21709fa3e853Sbellard     return 0;
21719fa3e853Sbellard }
21729fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
217333417e70Sbellard 
2174e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
21758da3ff18Spbrook 
2176c04b2b78SPaul Brook #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2177c04b2b78SPaul Brook typedef struct subpage_t {
217870c68e44SAvi Kivity     MemoryRegion iomem;
2179c04b2b78SPaul Brook     target_phys_addr_t base;
21805312bd8bSAvi Kivity     uint16_t sub_section[TARGET_PAGE_SIZE];
2181c04b2b78SPaul Brook } subpage_t;
2182c04b2b78SPaul Brook 
2183c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
21845312bd8bSAvi Kivity                              uint16_t section);
21850f0cb164SAvi Kivity static subpage_t *subpage_init(target_phys_addr_t base);
21865312bd8bSAvi Kivity static void destroy_page_desc(uint16_t section_index)
218754688b1eSAvi Kivity {
21885312bd8bSAvi Kivity     MemoryRegionSection *section = &phys_sections[section_index];
21895312bd8bSAvi Kivity     MemoryRegion *mr = section->mr;
219054688b1eSAvi Kivity 
219154688b1eSAvi Kivity     if (mr->subpage) {
219254688b1eSAvi Kivity         subpage_t *subpage = container_of(mr, subpage_t, iomem);
219354688b1eSAvi Kivity         memory_region_destroy(&subpage->iomem);
219454688b1eSAvi Kivity         g_free(subpage);
219554688b1eSAvi Kivity     }
219654688b1eSAvi Kivity }
219754688b1eSAvi Kivity 
21984346ae3eSAvi Kivity static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
219954688b1eSAvi Kivity {
220054688b1eSAvi Kivity     unsigned i;
2201d6f2ea22SAvi Kivity     PhysPageEntry *p;
220254688b1eSAvi Kivity 
2203c19e8800SAvi Kivity     if (lp->ptr == PHYS_MAP_NODE_NIL) {
220454688b1eSAvi Kivity         return;
220554688b1eSAvi Kivity     }
220654688b1eSAvi Kivity 
2207c19e8800SAvi Kivity     p = phys_map_nodes[lp->ptr];
220854688b1eSAvi Kivity     for (i = 0; i < L2_SIZE; ++i) {
220907f07b31SAvi Kivity         if (!p[i].is_leaf) {
221054688b1eSAvi Kivity             destroy_l2_mapping(&p[i], level - 1);
22114346ae3eSAvi Kivity         } else {
2212c19e8800SAvi Kivity             destroy_page_desc(p[i].ptr);
22134346ae3eSAvi Kivity         }
221454688b1eSAvi Kivity     }
221507f07b31SAvi Kivity     lp->is_leaf = 0;
2216c19e8800SAvi Kivity     lp->ptr = PHYS_MAP_NODE_NIL;
221754688b1eSAvi Kivity }
221854688b1eSAvi Kivity 
2219ac1970fbSAvi Kivity static void destroy_all_mappings(AddressSpaceDispatch *d)
222054688b1eSAvi Kivity {
2221ac1970fbSAvi Kivity     destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
2222d6f2ea22SAvi Kivity     phys_map_nodes_reset();
222354688b1eSAvi Kivity }
222454688b1eSAvi Kivity 
22255312bd8bSAvi Kivity static uint16_t phys_section_add(MemoryRegionSection *section)
22265312bd8bSAvi Kivity {
22275312bd8bSAvi Kivity     if (phys_sections_nb == phys_sections_nb_alloc) {
22285312bd8bSAvi Kivity         phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
22295312bd8bSAvi Kivity         phys_sections = g_renew(MemoryRegionSection, phys_sections,
22305312bd8bSAvi Kivity                                 phys_sections_nb_alloc);
22315312bd8bSAvi Kivity     }
22325312bd8bSAvi Kivity     phys_sections[phys_sections_nb] = *section;
22335312bd8bSAvi Kivity     return phys_sections_nb++;
22345312bd8bSAvi Kivity }
22355312bd8bSAvi Kivity 
22365312bd8bSAvi Kivity static void phys_sections_clear(void)
22375312bd8bSAvi Kivity {
22385312bd8bSAvi Kivity     phys_sections_nb = 0;
22395312bd8bSAvi Kivity }
22405312bd8bSAvi Kivity 
2241ac1970fbSAvi Kivity static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
22420f0cb164SAvi Kivity {
22430f0cb164SAvi Kivity     subpage_t *subpage;
22440f0cb164SAvi Kivity     target_phys_addr_t base = section->offset_within_address_space
22450f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
2246ac1970fbSAvi Kivity     MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
22470f0cb164SAvi Kivity     MemoryRegionSection subsection = {
22480f0cb164SAvi Kivity         .offset_within_address_space = base,
22490f0cb164SAvi Kivity         .size = TARGET_PAGE_SIZE,
22500f0cb164SAvi Kivity     };
22510f0cb164SAvi Kivity     target_phys_addr_t start, end;
22520f0cb164SAvi Kivity 
2253f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
22540f0cb164SAvi Kivity 
2255f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
22560f0cb164SAvi Kivity         subpage = subpage_init(base);
22570f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
2258ac1970fbSAvi Kivity         phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
22592999097bSAvi Kivity                       phys_section_add(&subsection));
22600f0cb164SAvi Kivity     } else {
2261f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
22620f0cb164SAvi Kivity     }
22630f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2264adb2a9b5STyler Hall     end = start + section->size - 1;
22650f0cb164SAvi Kivity     subpage_register(subpage, start, end, phys_section_add(section));
22660f0cb164SAvi Kivity }
22670f0cb164SAvi Kivity 
22680f0cb164SAvi Kivity 
2269ac1970fbSAvi Kivity static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
227033417e70Sbellard {
2271dd81124bSAvi Kivity     target_phys_addr_t start_addr = section->offset_within_address_space;
2272dd81124bSAvi Kivity     ram_addr_t size = section->size;
22732999097bSAvi Kivity     target_phys_addr_t addr;
22745312bd8bSAvi Kivity     uint16_t section_index = phys_section_add(section);
2275dd81124bSAvi Kivity 
22763b8e6a2dSEdgar E. Iglesias     assert(size);
2277f6f3fbcaSMichael S. Tsirkin 
22783b8e6a2dSEdgar E. Iglesias     addr = start_addr;
2279ac1970fbSAvi Kivity     phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
22802999097bSAvi Kivity                   section_index);
228133417e70Sbellard }
228233417e70Sbellard 
2283ac1970fbSAvi Kivity static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
22840f0cb164SAvi Kivity {
2285ac1970fbSAvi Kivity     AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
22860f0cb164SAvi Kivity     MemoryRegionSection now = *section, remain = *section;
22870f0cb164SAvi Kivity 
22880f0cb164SAvi Kivity     if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
22890f0cb164SAvi Kivity         || (now.size < TARGET_PAGE_SIZE)) {
22900f0cb164SAvi Kivity         now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
22910f0cb164SAvi Kivity                        - now.offset_within_address_space,
22920f0cb164SAvi Kivity                        now.size);
2293ac1970fbSAvi Kivity         register_subpage(d, &now);
22940f0cb164SAvi Kivity         remain.size -= now.size;
22950f0cb164SAvi Kivity         remain.offset_within_address_space += now.size;
22960f0cb164SAvi Kivity         remain.offset_within_region += now.size;
22970f0cb164SAvi Kivity     }
229869b67646STyler Hall     while (remain.size >= TARGET_PAGE_SIZE) {
22990f0cb164SAvi Kivity         now = remain;
230069b67646STyler Hall         if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
230169b67646STyler Hall             now.size = TARGET_PAGE_SIZE;
2302ac1970fbSAvi Kivity             register_subpage(d, &now);
230369b67646STyler Hall         } else {
23040f0cb164SAvi Kivity             now.size &= TARGET_PAGE_MASK;
2305ac1970fbSAvi Kivity             register_multipage(d, &now);
230669b67646STyler Hall         }
23070f0cb164SAvi Kivity         remain.size -= now.size;
23080f0cb164SAvi Kivity         remain.offset_within_address_space += now.size;
23090f0cb164SAvi Kivity         remain.offset_within_region += now.size;
23100f0cb164SAvi Kivity     }
23110f0cb164SAvi Kivity     now = remain;
23120f0cb164SAvi Kivity     if (now.size) {
2313ac1970fbSAvi Kivity         register_subpage(d, &now);
23140f0cb164SAvi Kivity     }
23150f0cb164SAvi Kivity }
23160f0cb164SAvi Kivity 
231762a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
231862a2744cSSheng Yang {
231962a2744cSSheng Yang     if (kvm_enabled())
232062a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
232162a2744cSSheng Yang }
232262a2744cSSheng Yang 
2323c902760fSMarcelo Tosatti #if defined(__linux__) && !defined(TARGET_S390X)
2324c902760fSMarcelo Tosatti 
2325c902760fSMarcelo Tosatti #include <sys/vfs.h>
2326c902760fSMarcelo Tosatti 
2327c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
2328c902760fSMarcelo Tosatti 
2329c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
2330c902760fSMarcelo Tosatti {
2331c902760fSMarcelo Tosatti     struct statfs fs;
2332c902760fSMarcelo Tosatti     int ret;
2333c902760fSMarcelo Tosatti 
2334c902760fSMarcelo Tosatti     do {
2335c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
2336c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
2337c902760fSMarcelo Tosatti 
2338c902760fSMarcelo Tosatti     if (ret != 0) {
23396adc0549SMichael Tokarev         perror(path);
2340c902760fSMarcelo Tosatti         return 0;
2341c902760fSMarcelo Tosatti     }
2342c902760fSMarcelo Tosatti 
2343c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
2344c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2345c902760fSMarcelo Tosatti 
2346c902760fSMarcelo Tosatti     return fs.f_bsize;
2347c902760fSMarcelo Tosatti }
2348c902760fSMarcelo Tosatti 
234904b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
235004b16653SAlex Williamson                             ram_addr_t memory,
235104b16653SAlex Williamson                             const char *path)
2352c902760fSMarcelo Tosatti {
2353c902760fSMarcelo Tosatti     char *filename;
2354c902760fSMarcelo Tosatti     void *area;
2355c902760fSMarcelo Tosatti     int fd;
2356c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2357c902760fSMarcelo Tosatti     int flags;
2358c902760fSMarcelo Tosatti #endif
2359c902760fSMarcelo Tosatti     unsigned long hpagesize;
2360c902760fSMarcelo Tosatti 
2361c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
2362c902760fSMarcelo Tosatti     if (!hpagesize) {
2363c902760fSMarcelo Tosatti         return NULL;
2364c902760fSMarcelo Tosatti     }
2365c902760fSMarcelo Tosatti 
2366c902760fSMarcelo Tosatti     if (memory < hpagesize) {
2367c902760fSMarcelo Tosatti         return NULL;
2368c902760fSMarcelo Tosatti     }
2369c902760fSMarcelo Tosatti 
2370c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
2371c902760fSMarcelo Tosatti         fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2372c902760fSMarcelo Tosatti         return NULL;
2373c902760fSMarcelo Tosatti     }
2374c902760fSMarcelo Tosatti 
2375c902760fSMarcelo Tosatti     if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2376c902760fSMarcelo Tosatti         return NULL;
2377c902760fSMarcelo Tosatti     }
2378c902760fSMarcelo Tosatti 
2379c902760fSMarcelo Tosatti     fd = mkstemp(filename);
2380c902760fSMarcelo Tosatti     if (fd < 0) {
23816adc0549SMichael Tokarev         perror("unable to create backing store for hugepages");
2382c902760fSMarcelo Tosatti         free(filename);
2383c902760fSMarcelo Tosatti         return NULL;
2384c902760fSMarcelo Tosatti     }
2385c902760fSMarcelo Tosatti     unlink(filename);
2386c902760fSMarcelo Tosatti     free(filename);
2387c902760fSMarcelo Tosatti 
2388c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
2389c902760fSMarcelo Tosatti 
2390c902760fSMarcelo Tosatti     /*
2391c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
2392c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
2393c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
2394c902760fSMarcelo Tosatti      * mmap will fail.
2395c902760fSMarcelo Tosatti      */
2396c902760fSMarcelo Tosatti     if (ftruncate(fd, memory))
2397c902760fSMarcelo Tosatti         perror("ftruncate");
2398c902760fSMarcelo Tosatti 
2399c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2400c902760fSMarcelo Tosatti     /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2401c902760fSMarcelo Tosatti      * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2402c902760fSMarcelo Tosatti      * to sidestep this quirk.
2403c902760fSMarcelo Tosatti      */
2404c902760fSMarcelo Tosatti     flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2405c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2406c902760fSMarcelo Tosatti #else
2407c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2408c902760fSMarcelo Tosatti #endif
2409c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
2410c902760fSMarcelo Tosatti         perror("file_ram_alloc: can't mmap RAM pages");
2411c902760fSMarcelo Tosatti         close(fd);
2412c902760fSMarcelo Tosatti         return (NULL);
2413c902760fSMarcelo Tosatti     }
241404b16653SAlex Williamson     block->fd = fd;
2415c902760fSMarcelo Tosatti     return area;
2416c902760fSMarcelo Tosatti }
2417c902760fSMarcelo Tosatti #endif
2418c902760fSMarcelo Tosatti 
2419d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
2420d17b5288SAlex Williamson {
242104b16653SAlex Williamson     RAMBlock *block, *next_block;
24223e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
242304b16653SAlex Williamson 
242404b16653SAlex Williamson     if (QLIST_EMPTY(&ram_list.blocks))
242504b16653SAlex Williamson         return 0;
242604b16653SAlex Williamson 
242704b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
2428f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
242904b16653SAlex Williamson 
243004b16653SAlex Williamson         end = block->offset + block->length;
243104b16653SAlex Williamson 
243204b16653SAlex Williamson         QLIST_FOREACH(next_block, &ram_list.blocks, next) {
243304b16653SAlex Williamson             if (next_block->offset >= end) {
243404b16653SAlex Williamson                 next = MIN(next, next_block->offset);
243504b16653SAlex Williamson             }
243604b16653SAlex Williamson         }
243704b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
243804b16653SAlex Williamson             offset = end;
243904b16653SAlex Williamson             mingap = next - end;
244004b16653SAlex Williamson         }
244104b16653SAlex Williamson     }
24423e837b2cSAlex Williamson 
24433e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
24443e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
24453e837b2cSAlex Williamson                 (uint64_t)size);
24463e837b2cSAlex Williamson         abort();
24473e837b2cSAlex Williamson     }
24483e837b2cSAlex Williamson 
244904b16653SAlex Williamson     return offset;
245004b16653SAlex Williamson }
245104b16653SAlex Williamson 
2452652d7ec2SJuan Quintela ram_addr_t last_ram_offset(void)
245304b16653SAlex Williamson {
2454d17b5288SAlex Williamson     RAMBlock *block;
2455d17b5288SAlex Williamson     ram_addr_t last = 0;
2456d17b5288SAlex Williamson 
2457d17b5288SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next)
2458d17b5288SAlex Williamson         last = MAX(last, block->offset + block->length);
2459d17b5288SAlex Williamson 
2460d17b5288SAlex Williamson     return last;
2461d17b5288SAlex Williamson }
2462d17b5288SAlex Williamson 
2463ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2464ddb97f1dSJason Baron {
2465ddb97f1dSJason Baron     int ret;
2466ddb97f1dSJason Baron     QemuOpts *machine_opts;
2467ddb97f1dSJason Baron 
2468ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2469ddb97f1dSJason Baron     machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2470ddb97f1dSJason Baron     if (machine_opts &&
2471ddb97f1dSJason Baron         !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2472ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2473ddb97f1dSJason Baron         if (ret) {
2474ddb97f1dSJason Baron             perror("qemu_madvise");
2475ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2476ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
2477ddb97f1dSJason Baron         }
2478ddb97f1dSJason Baron     }
2479ddb97f1dSJason Baron }
2480ddb97f1dSJason Baron 
2481c5705a77SAvi Kivity void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
248284b89d78SCam Macdonell {
248384b89d78SCam Macdonell     RAMBlock *new_block, *block;
248484b89d78SCam Macdonell 
2485c5705a77SAvi Kivity     new_block = NULL;
2486c5705a77SAvi Kivity     QLIST_FOREACH(block, &ram_list.blocks, next) {
2487c5705a77SAvi Kivity         if (block->offset == addr) {
2488c5705a77SAvi Kivity             new_block = block;
2489c5705a77SAvi Kivity             break;
2490c5705a77SAvi Kivity         }
2491c5705a77SAvi Kivity     }
2492c5705a77SAvi Kivity     assert(new_block);
2493c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
249484b89d78SCam Macdonell 
249509e5ab63SAnthony Liguori     if (dev) {
249609e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
249784b89d78SCam Macdonell         if (id) {
249884b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
24997267c094SAnthony Liguori             g_free(id);
250084b89d78SCam Macdonell         }
250184b89d78SCam Macdonell     }
250284b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
250384b89d78SCam Macdonell 
250484b89d78SCam Macdonell     QLIST_FOREACH(block, &ram_list.blocks, next) {
2505c5705a77SAvi Kivity         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
250684b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
250784b89d78SCam Macdonell                     new_block->idstr);
250884b89d78SCam Macdonell             abort();
250984b89d78SCam Macdonell         }
251084b89d78SCam Macdonell     }
2511c5705a77SAvi Kivity }
2512c5705a77SAvi Kivity 
25138490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
25148490fc78SLuiz Capitulino {
25158490fc78SLuiz Capitulino     QemuOpts *opts;
25168490fc78SLuiz Capitulino 
25178490fc78SLuiz Capitulino     opts = qemu_opts_find(qemu_find_opts("machine"), 0);
25188490fc78SLuiz Capitulino     if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
25198490fc78SLuiz Capitulino         /* disabled by the user */
25208490fc78SLuiz Capitulino         return 0;
25218490fc78SLuiz Capitulino     }
25228490fc78SLuiz Capitulino 
25238490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
25248490fc78SLuiz Capitulino }
25258490fc78SLuiz Capitulino 
2526c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2527c5705a77SAvi Kivity                                    MemoryRegion *mr)
2528c5705a77SAvi Kivity {
2529c5705a77SAvi Kivity     RAMBlock *new_block;
2530c5705a77SAvi Kivity 
2531c5705a77SAvi Kivity     size = TARGET_PAGE_ALIGN(size);
2532c5705a77SAvi Kivity     new_block = g_malloc0(sizeof(*new_block));
253384b89d78SCam Macdonell 
25347c637366SAvi Kivity     new_block->mr = mr;
2535432d268cSJun Nakajima     new_block->offset = find_ram_offset(size);
25366977dfe6SYoshiaki Tamura     if (host) {
253784b89d78SCam Macdonell         new_block->host = host;
2538cd19cfa2SHuang Ying         new_block->flags |= RAM_PREALLOC_MASK;
25396977dfe6SYoshiaki Tamura     } else {
2540c902760fSMarcelo Tosatti         if (mem_path) {
2541c902760fSMarcelo Tosatti #if defined (__linux__) && !defined(TARGET_S390X)
254204b16653SAlex Williamson             new_block->host = file_ram_alloc(new_block, size, mem_path);
2543618a568dSMarcelo Tosatti             if (!new_block->host) {
2544618a568dSMarcelo Tosatti                 new_block->host = qemu_vmalloc(size);
25458490fc78SLuiz Capitulino                 memory_try_enable_merging(new_block->host, size);
2546618a568dSMarcelo Tosatti             }
2547c902760fSMarcelo Tosatti #else
2548c902760fSMarcelo Tosatti             fprintf(stderr, "-mem-path option unsupported\n");
2549c902760fSMarcelo Tosatti             exit(1);
2550c902760fSMarcelo Tosatti #endif
2551c902760fSMarcelo Tosatti         } else {
2552868bb33fSJan Kiszka             if (xen_enabled()) {
2553fce537d4SAvi Kivity                 xen_ram_alloc(new_block->offset, size, mr);
2554fdec9918SChristian Borntraeger             } else if (kvm_enabled()) {
2555fdec9918SChristian Borntraeger                 /* some s390/kvm configurations have special constraints */
2556fdec9918SChristian Borntraeger                 new_block->host = kvm_vmalloc(size);
2557432d268cSJun Nakajima             } else {
255894a6b54fSpbrook                 new_block->host = qemu_vmalloc(size);
2559432d268cSJun Nakajima             }
25608490fc78SLuiz Capitulino             memory_try_enable_merging(new_block->host, size);
2561c902760fSMarcelo Tosatti         }
25626977dfe6SYoshiaki Tamura     }
256394a6b54fSpbrook     new_block->length = size;
256494a6b54fSpbrook 
2565f471a17eSAlex Williamson     QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
256694a6b54fSpbrook 
25677267c094SAnthony Liguori     ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
256804b16653SAlex Williamson                                        last_ram_offset() >> TARGET_PAGE_BITS);
25695fda043fSIgor Mitsyanko     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
25705fda043fSIgor Mitsyanko            0, size >> TARGET_PAGE_BITS);
25711720aeeeSJuan Quintela     cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
257294a6b54fSpbrook 
2573ddb97f1dSJason Baron     qemu_ram_setup_dump(new_block->host, size);
2574ad0b5321SLuiz Capitulino     qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
2575ddb97f1dSJason Baron 
25766f0437e8SJan Kiszka     if (kvm_enabled())
25776f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
25786f0437e8SJan Kiszka 
257994a6b54fSpbrook     return new_block->offset;
258094a6b54fSpbrook }
2581e9a1ab19Sbellard 
2582c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
25836977dfe6SYoshiaki Tamura {
2584c5705a77SAvi Kivity     return qemu_ram_alloc_from_ptr(size, NULL, mr);
25856977dfe6SYoshiaki Tamura }
25866977dfe6SYoshiaki Tamura 
25871f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr)
25881f2e98b6SAlex Williamson {
25891f2e98b6SAlex Williamson     RAMBlock *block;
25901f2e98b6SAlex Williamson 
25911f2e98b6SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
25921f2e98b6SAlex Williamson         if (addr == block->offset) {
25931f2e98b6SAlex Williamson             QLIST_REMOVE(block, next);
25947267c094SAnthony Liguori             g_free(block);
25951f2e98b6SAlex Williamson             return;
25961f2e98b6SAlex Williamson         }
25971f2e98b6SAlex Williamson     }
25981f2e98b6SAlex Williamson }
25991f2e98b6SAlex Williamson 
2600c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
2601e9a1ab19Sbellard {
260204b16653SAlex Williamson     RAMBlock *block;
260304b16653SAlex Williamson 
260404b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
260504b16653SAlex Williamson         if (addr == block->offset) {
260604b16653SAlex Williamson             QLIST_REMOVE(block, next);
2607cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
2608cd19cfa2SHuang Ying                 ;
2609cd19cfa2SHuang Ying             } else if (mem_path) {
261004b16653SAlex Williamson #if defined (__linux__) && !defined(TARGET_S390X)
261104b16653SAlex Williamson                 if (block->fd) {
261204b16653SAlex Williamson                     munmap(block->host, block->length);
261304b16653SAlex Williamson                     close(block->fd);
261404b16653SAlex Williamson                 } else {
261504b16653SAlex Williamson                     qemu_vfree(block->host);
261604b16653SAlex Williamson                 }
2617fd28aa13SJan Kiszka #else
2618fd28aa13SJan Kiszka                 abort();
261904b16653SAlex Williamson #endif
262004b16653SAlex Williamson             } else {
262104b16653SAlex Williamson #if defined(TARGET_S390X) && defined(CONFIG_KVM)
262204b16653SAlex Williamson                 munmap(block->host, block->length);
262304b16653SAlex Williamson #else
2624868bb33fSJan Kiszka                 if (xen_enabled()) {
2625e41d7c69SJan Kiszka                     xen_invalidate_map_cache_entry(block->host);
2626432d268cSJun Nakajima                 } else {
262704b16653SAlex Williamson                     qemu_vfree(block->host);
2628432d268cSJun Nakajima                 }
262904b16653SAlex Williamson #endif
263004b16653SAlex Williamson             }
26317267c094SAnthony Liguori             g_free(block);
263204b16653SAlex Williamson             return;
263304b16653SAlex Williamson         }
263404b16653SAlex Williamson     }
263504b16653SAlex Williamson 
2636e9a1ab19Sbellard }
2637e9a1ab19Sbellard 
2638cd19cfa2SHuang Ying #ifndef _WIN32
2639cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2640cd19cfa2SHuang Ying {
2641cd19cfa2SHuang Ying     RAMBlock *block;
2642cd19cfa2SHuang Ying     ram_addr_t offset;
2643cd19cfa2SHuang Ying     int flags;
2644cd19cfa2SHuang Ying     void *area, *vaddr;
2645cd19cfa2SHuang Ying 
2646cd19cfa2SHuang Ying     QLIST_FOREACH(block, &ram_list.blocks, next) {
2647cd19cfa2SHuang Ying         offset = addr - block->offset;
2648cd19cfa2SHuang Ying         if (offset < block->length) {
2649cd19cfa2SHuang Ying             vaddr = block->host + offset;
2650cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
2651cd19cfa2SHuang Ying                 ;
2652cd19cfa2SHuang Ying             } else {
2653cd19cfa2SHuang Ying                 flags = MAP_FIXED;
2654cd19cfa2SHuang Ying                 munmap(vaddr, length);
2655cd19cfa2SHuang Ying                 if (mem_path) {
2656cd19cfa2SHuang Ying #if defined(__linux__) && !defined(TARGET_S390X)
2657cd19cfa2SHuang Ying                     if (block->fd) {
2658cd19cfa2SHuang Ying #ifdef MAP_POPULATE
2659cd19cfa2SHuang Ying                         flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2660cd19cfa2SHuang Ying                             MAP_PRIVATE;
2661cd19cfa2SHuang Ying #else
2662cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE;
2663cd19cfa2SHuang Ying #endif
2664cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2665cd19cfa2SHuang Ying                                     flags, block->fd, offset);
2666cd19cfa2SHuang Ying                     } else {
2667cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2668cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2669cd19cfa2SHuang Ying                                     flags, -1, 0);
2670cd19cfa2SHuang Ying                     }
2671fd28aa13SJan Kiszka #else
2672fd28aa13SJan Kiszka                     abort();
2673cd19cfa2SHuang Ying #endif
2674cd19cfa2SHuang Ying                 } else {
2675cd19cfa2SHuang Ying #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2676cd19cfa2SHuang Ying                     flags |= MAP_SHARED | MAP_ANONYMOUS;
2677cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2678cd19cfa2SHuang Ying                                 flags, -1, 0);
2679cd19cfa2SHuang Ying #else
2680cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2681cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2682cd19cfa2SHuang Ying                                 flags, -1, 0);
2683cd19cfa2SHuang Ying #endif
2684cd19cfa2SHuang Ying                 }
2685cd19cfa2SHuang Ying                 if (area != vaddr) {
2686f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
2687f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2688cd19cfa2SHuang Ying                             length, addr);
2689cd19cfa2SHuang Ying                     exit(1);
2690cd19cfa2SHuang Ying                 }
26918490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
2692ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
2693cd19cfa2SHuang Ying             }
2694cd19cfa2SHuang Ying             return;
2695cd19cfa2SHuang Ying         }
2696cd19cfa2SHuang Ying     }
2697cd19cfa2SHuang Ying }
2698cd19cfa2SHuang Ying #endif /* !_WIN32 */
2699cd19cfa2SHuang Ying 
2700dc828ca1Spbrook /* Return a host pointer to ram allocated with qemu_ram_alloc.
27015579c7f3Spbrook    With the exception of the softmmu code in this file, this should
27025579c7f3Spbrook    only be used for local memory (e.g. video ram) that the device owns,
27035579c7f3Spbrook    and knows it isn't going to access beyond the end of the block.
27045579c7f3Spbrook 
27055579c7f3Spbrook    It should not be used for general purpose DMA.
27065579c7f3Spbrook    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
27075579c7f3Spbrook  */
2708c227f099SAnthony Liguori void *qemu_get_ram_ptr(ram_addr_t addr)
2709dc828ca1Spbrook {
271094a6b54fSpbrook     RAMBlock *block;
271194a6b54fSpbrook 
2712f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
2713f471a17eSAlex Williamson         if (addr - block->offset < block->length) {
27147d82af38SVincent Palatin             /* Move this entry to to start of the list.  */
27157d82af38SVincent Palatin             if (block != QLIST_FIRST(&ram_list.blocks)) {
2716f471a17eSAlex Williamson                 QLIST_REMOVE(block, next);
2717f471a17eSAlex Williamson                 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
27187d82af38SVincent Palatin             }
2719868bb33fSJan Kiszka             if (xen_enabled()) {
2720432d268cSJun Nakajima                 /* We need to check if the requested address is in the RAM
2721432d268cSJun Nakajima                  * because we don't want to map the entire memory in QEMU.
2722712c2b41SStefano Stabellini                  * In that case just map until the end of the page.
2723432d268cSJun Nakajima                  */
2724432d268cSJun Nakajima                 if (block->offset == 0) {
2725e41d7c69SJan Kiszka                     return xen_map_cache(addr, 0, 0);
2726432d268cSJun Nakajima                 } else if (block->host == NULL) {
2727e41d7c69SJan Kiszka                     block->host =
2728e41d7c69SJan Kiszka                         xen_map_cache(block->offset, block->length, 1);
2729432d268cSJun Nakajima                 }
2730432d268cSJun Nakajima             }
2731f471a17eSAlex Williamson             return block->host + (addr - block->offset);
273294a6b54fSpbrook         }
2733f471a17eSAlex Williamson     }
2734f471a17eSAlex Williamson 
273594a6b54fSpbrook     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
273694a6b54fSpbrook     abort();
2737f471a17eSAlex Williamson 
2738f471a17eSAlex Williamson     return NULL;
2739dc828ca1Spbrook }
2740dc828ca1Spbrook 
2741b2e0a138SMichael S. Tsirkin /* Return a host pointer to ram allocated with qemu_ram_alloc.
2742b2e0a138SMichael S. Tsirkin  * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2743b2e0a138SMichael S. Tsirkin  */
2744b2e0a138SMichael S. Tsirkin void *qemu_safe_ram_ptr(ram_addr_t addr)
2745b2e0a138SMichael S. Tsirkin {
2746b2e0a138SMichael S. Tsirkin     RAMBlock *block;
2747b2e0a138SMichael S. Tsirkin 
2748b2e0a138SMichael S. Tsirkin     QLIST_FOREACH(block, &ram_list.blocks, next) {
2749b2e0a138SMichael S. Tsirkin         if (addr - block->offset < block->length) {
2750868bb33fSJan Kiszka             if (xen_enabled()) {
2751432d268cSJun Nakajima                 /* We need to check if the requested address is in the RAM
2752432d268cSJun Nakajima                  * because we don't want to map the entire memory in QEMU.
2753712c2b41SStefano Stabellini                  * In that case just map until the end of the page.
2754432d268cSJun Nakajima                  */
2755432d268cSJun Nakajima                 if (block->offset == 0) {
2756e41d7c69SJan Kiszka                     return xen_map_cache(addr, 0, 0);
2757432d268cSJun Nakajima                 } else if (block->host == NULL) {
2758e41d7c69SJan Kiszka                     block->host =
2759e41d7c69SJan Kiszka                         xen_map_cache(block->offset, block->length, 1);
2760432d268cSJun Nakajima                 }
2761432d268cSJun Nakajima             }
2762b2e0a138SMichael S. Tsirkin             return block->host + (addr - block->offset);
2763b2e0a138SMichael S. Tsirkin         }
2764b2e0a138SMichael S. Tsirkin     }
2765b2e0a138SMichael S. Tsirkin 
2766b2e0a138SMichael S. Tsirkin     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2767b2e0a138SMichael S. Tsirkin     abort();
2768b2e0a138SMichael S. Tsirkin 
2769b2e0a138SMichael S. Tsirkin     return NULL;
2770b2e0a138SMichael S. Tsirkin }
2771b2e0a138SMichael S. Tsirkin 
277238bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
277338bee5dcSStefano Stabellini  * but takes a size argument */
27748ab934f9SStefano Stabellini void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
277538bee5dcSStefano Stabellini {
27768ab934f9SStefano Stabellini     if (*size == 0) {
27778ab934f9SStefano Stabellini         return NULL;
27788ab934f9SStefano Stabellini     }
2779868bb33fSJan Kiszka     if (xen_enabled()) {
2780e41d7c69SJan Kiszka         return xen_map_cache(addr, *size, 1);
2781868bb33fSJan Kiszka     } else {
278238bee5dcSStefano Stabellini         RAMBlock *block;
278338bee5dcSStefano Stabellini 
278438bee5dcSStefano Stabellini         QLIST_FOREACH(block, &ram_list.blocks, next) {
278538bee5dcSStefano Stabellini             if (addr - block->offset < block->length) {
278638bee5dcSStefano Stabellini                 if (addr - block->offset + *size > block->length)
278738bee5dcSStefano Stabellini                     *size = block->length - addr + block->offset;
278838bee5dcSStefano Stabellini                 return block->host + (addr - block->offset);
278938bee5dcSStefano Stabellini             }
279038bee5dcSStefano Stabellini         }
279138bee5dcSStefano Stabellini 
279238bee5dcSStefano Stabellini         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
279338bee5dcSStefano Stabellini         abort();
279438bee5dcSStefano Stabellini     }
279538bee5dcSStefano Stabellini }
279638bee5dcSStefano Stabellini 
2797050a0ddfSAnthony PERARD void qemu_put_ram_ptr(void *addr)
2798050a0ddfSAnthony PERARD {
2799050a0ddfSAnthony PERARD     trace_qemu_put_ram_ptr(addr);
2800050a0ddfSAnthony PERARD }
2801050a0ddfSAnthony PERARD 
2802e890261fSMarcelo Tosatti int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
28035579c7f3Spbrook {
280494a6b54fSpbrook     RAMBlock *block;
280594a6b54fSpbrook     uint8_t *host = ptr;
280694a6b54fSpbrook 
2807868bb33fSJan Kiszka     if (xen_enabled()) {
2808e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
2809712c2b41SStefano Stabellini         return 0;
2810712c2b41SStefano Stabellini     }
2811712c2b41SStefano Stabellini 
2812f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
2813432d268cSJun Nakajima         /* This case append when the block is not mapped. */
2814432d268cSJun Nakajima         if (block->host == NULL) {
2815432d268cSJun Nakajima             continue;
2816432d268cSJun Nakajima         }
2817f471a17eSAlex Williamson         if (host - block->host < block->length) {
2818e890261fSMarcelo Tosatti             *ram_addr = block->offset + (host - block->host);
2819e890261fSMarcelo Tosatti             return 0;
282094a6b54fSpbrook         }
2821f471a17eSAlex Williamson     }
2822432d268cSJun Nakajima 
2823e890261fSMarcelo Tosatti     return -1;
2824e890261fSMarcelo Tosatti }
2825f471a17eSAlex Williamson 
2826e890261fSMarcelo Tosatti /* Some of the softmmu routines need to translate from a host pointer
2827e890261fSMarcelo Tosatti    (typically a TLB entry) back to a ram offset.  */
2828e890261fSMarcelo Tosatti ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2829e890261fSMarcelo Tosatti {
2830e890261fSMarcelo Tosatti     ram_addr_t ram_addr;
2831e890261fSMarcelo Tosatti 
2832e890261fSMarcelo Tosatti     if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
283394a6b54fSpbrook         fprintf(stderr, "Bad ram pointer %p\n", ptr);
283494a6b54fSpbrook         abort();
2835e890261fSMarcelo Tosatti     }
2836e890261fSMarcelo Tosatti     return ram_addr;
28375579c7f3Spbrook }
28385579c7f3Spbrook 
28390e0df1e2SAvi Kivity static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
28400e0df1e2SAvi Kivity                                     unsigned size)
284133417e70Sbellard {
284267d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2843ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
284467d3b957Spbrook #endif
28455b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
28460e0df1e2SAvi Kivity     cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
2847e18231a3Sblueswir1 #endif
2848e18231a3Sblueswir1     return 0;
2849e18231a3Sblueswir1 }
2850e18231a3Sblueswir1 
28510e0df1e2SAvi Kivity static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
28520e0df1e2SAvi Kivity                                  uint64_t val, unsigned size)
2853e18231a3Sblueswir1 {
2854e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
28550e0df1e2SAvi Kivity     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
2856e18231a3Sblueswir1 #endif
28575b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
28580e0df1e2SAvi Kivity     cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
2859e18231a3Sblueswir1 #endif
2860e18231a3Sblueswir1 }
2861e18231a3Sblueswir1 
28620e0df1e2SAvi Kivity static const MemoryRegionOps unassigned_mem_ops = {
28630e0df1e2SAvi Kivity     .read = unassigned_mem_read,
28640e0df1e2SAvi Kivity     .write = unassigned_mem_write,
28650e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
286633417e70Sbellard };
286733417e70Sbellard 
28680e0df1e2SAvi Kivity static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
28690e0df1e2SAvi Kivity                                unsigned size)
28700e0df1e2SAvi Kivity {
28710e0df1e2SAvi Kivity     abort();
28720e0df1e2SAvi Kivity }
28730e0df1e2SAvi Kivity 
28740e0df1e2SAvi Kivity static void error_mem_write(void *opaque, target_phys_addr_t addr,
28750e0df1e2SAvi Kivity                             uint64_t value, unsigned size)
28760e0df1e2SAvi Kivity {
28770e0df1e2SAvi Kivity     abort();
28780e0df1e2SAvi Kivity }
28790e0df1e2SAvi Kivity 
28800e0df1e2SAvi Kivity static const MemoryRegionOps error_mem_ops = {
28810e0df1e2SAvi Kivity     .read = error_mem_read,
28820e0df1e2SAvi Kivity     .write = error_mem_write,
28830e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
288433417e70Sbellard };
288533417e70Sbellard 
28860e0df1e2SAvi Kivity static const MemoryRegionOps rom_mem_ops = {
28870e0df1e2SAvi Kivity     .read = error_mem_read,
28880e0df1e2SAvi Kivity     .write = unassigned_mem_write,
28890e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
28900e0df1e2SAvi Kivity };
28910e0df1e2SAvi Kivity 
28920e0df1e2SAvi Kivity static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
28930e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
28941ccde1cbSbellard {
28953a7d929eSbellard     int dirty_flags;
2896f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
28973a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
28983a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
28990e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
2900f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
29013a7d929eSbellard #endif
29023a7d929eSbellard     }
29030e0df1e2SAvi Kivity     switch (size) {
29040e0df1e2SAvi Kivity     case 1:
29055579c7f3Spbrook         stb_p(qemu_get_ram_ptr(ram_addr), val);
29060e0df1e2SAvi Kivity         break;
29070e0df1e2SAvi Kivity     case 2:
29085579c7f3Spbrook         stw_p(qemu_get_ram_ptr(ram_addr), val);
29090e0df1e2SAvi Kivity         break;
29100e0df1e2SAvi Kivity     case 4:
29115579c7f3Spbrook         stl_p(qemu_get_ram_ptr(ram_addr), val);
29120e0df1e2SAvi Kivity         break;
29130e0df1e2SAvi Kivity     default:
29140e0df1e2SAvi Kivity         abort();
29150e0df1e2SAvi Kivity     }
2916f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2917f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2918f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2919f23db169Sbellard        flushed */
2920f23db169Sbellard     if (dirty_flags == 0xff)
29212e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
29221ccde1cbSbellard }
29231ccde1cbSbellard 
29240e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
29250e0df1e2SAvi Kivity     .read = error_mem_read,
29260e0df1e2SAvi Kivity     .write = notdirty_mem_write,
29270e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
29281ccde1cbSbellard };
29291ccde1cbSbellard 
29300f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
2931b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
29320f459d16Spbrook {
29339349b4f9SAndreas Färber     CPUArchState *env = cpu_single_env;
293406d55cc1Saliguori     target_ulong pc, cs_base;
293506d55cc1Saliguori     TranslationBlock *tb;
29360f459d16Spbrook     target_ulong vaddr;
2937a1d1bb31Saliguori     CPUWatchpoint *wp;
293806d55cc1Saliguori     int cpu_flags;
29390f459d16Spbrook 
294006d55cc1Saliguori     if (env->watchpoint_hit) {
294106d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
294206d55cc1Saliguori          * the debug interrupt so that is will trigger after the
294306d55cc1Saliguori          * current instruction. */
294406d55cc1Saliguori         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
294506d55cc1Saliguori         return;
294606d55cc1Saliguori     }
29472e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
294872cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2949b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
2950b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
29516e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
29526e140f28Saliguori             if (!env->watchpoint_hit) {
2953a1d1bb31Saliguori                 env->watchpoint_hit = wp;
295406d55cc1Saliguori                 tb = tb_find_pc(env->mem_io_pc);
295506d55cc1Saliguori                 if (!tb) {
29566e140f28Saliguori                     cpu_abort(env, "check_watchpoint: could not find TB for "
29576e140f28Saliguori                               "pc=%p", (void *)env->mem_io_pc);
295806d55cc1Saliguori                 }
2959618ba8e6SStefan Weil                 cpu_restore_state(tb, env, env->mem_io_pc);
296006d55cc1Saliguori                 tb_phys_invalidate(tb, -1);
296106d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
296206d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
2963488d6577SMax Filippov                     cpu_loop_exit(env);
296406d55cc1Saliguori                 } else {
296506d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
296606d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
296706d55cc1Saliguori                     cpu_resume_from_signal(env, NULL);
29680f459d16Spbrook                 }
2969488d6577SMax Filippov             }
29706e140f28Saliguori         } else {
29716e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
29726e140f28Saliguori         }
29730f459d16Spbrook     }
29740f459d16Spbrook }
29750f459d16Spbrook 
29766658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
29776658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
29786658ffb8Spbrook    phys routines.  */
29791ec9b909SAvi Kivity static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
29801ec9b909SAvi Kivity                                unsigned size)
29816658ffb8Spbrook {
29821ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
29831ec9b909SAvi Kivity     switch (size) {
29841ec9b909SAvi Kivity     case 1: return ldub_phys(addr);
29851ec9b909SAvi Kivity     case 2: return lduw_phys(addr);
29861ec9b909SAvi Kivity     case 4: return ldl_phys(addr);
29871ec9b909SAvi Kivity     default: abort();
29881ec9b909SAvi Kivity     }
29896658ffb8Spbrook }
29906658ffb8Spbrook 
29911ec9b909SAvi Kivity static void watch_mem_write(void *opaque, target_phys_addr_t addr,
29921ec9b909SAvi Kivity                             uint64_t val, unsigned size)
29936658ffb8Spbrook {
29941ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
29951ec9b909SAvi Kivity     switch (size) {
299667364150SMax Filippov     case 1:
299767364150SMax Filippov         stb_phys(addr, val);
299867364150SMax Filippov         break;
299967364150SMax Filippov     case 2:
300067364150SMax Filippov         stw_phys(addr, val);
300167364150SMax Filippov         break;
300267364150SMax Filippov     case 4:
300367364150SMax Filippov         stl_phys(addr, val);
300467364150SMax Filippov         break;
30051ec9b909SAvi Kivity     default: abort();
30061ec9b909SAvi Kivity     }
30076658ffb8Spbrook }
30086658ffb8Spbrook 
30091ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
30101ec9b909SAvi Kivity     .read = watch_mem_read,
30111ec9b909SAvi Kivity     .write = watch_mem_write,
30121ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
30136658ffb8Spbrook };
30146658ffb8Spbrook 
301570c68e44SAvi Kivity static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
301670c68e44SAvi Kivity                              unsigned len)
3017db7b5426Sblueswir1 {
301870c68e44SAvi Kivity     subpage_t *mmio = opaque;
3019f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
30205312bd8bSAvi Kivity     MemoryRegionSection *section;
3021db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3022db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3023db7b5426Sblueswir1            mmio, len, addr, idx);
3024db7b5426Sblueswir1 #endif
3025db7b5426Sblueswir1 
30265312bd8bSAvi Kivity     section = &phys_sections[mmio->sub_section[idx]];
30275312bd8bSAvi Kivity     addr += mmio->base;
30285312bd8bSAvi Kivity     addr -= section->offset_within_address_space;
30295312bd8bSAvi Kivity     addr += section->offset_within_region;
303037ec01d4SAvi Kivity     return io_mem_read(section->mr, addr, len);
3031db7b5426Sblueswir1 }
3032db7b5426Sblueswir1 
303370c68e44SAvi Kivity static void subpage_write(void *opaque, target_phys_addr_t addr,
303470c68e44SAvi Kivity                           uint64_t value, unsigned len)
3035db7b5426Sblueswir1 {
303670c68e44SAvi Kivity     subpage_t *mmio = opaque;
3037f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
30385312bd8bSAvi Kivity     MemoryRegionSection *section;
3039db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
304070c68e44SAvi Kivity     printf("%s: subpage %p len %d addr " TARGET_FMT_plx
304170c68e44SAvi Kivity            " idx %d value %"PRIx64"\n",
3042f6405247SRichard Henderson            __func__, mmio, len, addr, idx, value);
3043db7b5426Sblueswir1 #endif
3044f6405247SRichard Henderson 
30455312bd8bSAvi Kivity     section = &phys_sections[mmio->sub_section[idx]];
30465312bd8bSAvi Kivity     addr += mmio->base;
30475312bd8bSAvi Kivity     addr -= section->offset_within_address_space;
30485312bd8bSAvi Kivity     addr += section->offset_within_region;
304937ec01d4SAvi Kivity     io_mem_write(section->mr, addr, value, len);
3050db7b5426Sblueswir1 }
3051db7b5426Sblueswir1 
305270c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
305370c68e44SAvi Kivity     .read = subpage_read,
305470c68e44SAvi Kivity     .write = subpage_write,
305570c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
3056db7b5426Sblueswir1 };
3057db7b5426Sblueswir1 
3058de712f94SAvi Kivity static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3059de712f94SAvi Kivity                                  unsigned size)
306056384e8bSAndreas Färber {
306156384e8bSAndreas Färber     ram_addr_t raddr = addr;
306256384e8bSAndreas Färber     void *ptr = qemu_get_ram_ptr(raddr);
3063de712f94SAvi Kivity     switch (size) {
3064de712f94SAvi Kivity     case 1: return ldub_p(ptr);
3065de712f94SAvi Kivity     case 2: return lduw_p(ptr);
3066de712f94SAvi Kivity     case 4: return ldl_p(ptr);
3067de712f94SAvi Kivity     default: abort();
3068de712f94SAvi Kivity     }
306956384e8bSAndreas Färber }
307056384e8bSAndreas Färber 
3071de712f94SAvi Kivity static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3072de712f94SAvi Kivity                               uint64_t value, unsigned size)
307356384e8bSAndreas Färber {
307456384e8bSAndreas Färber     ram_addr_t raddr = addr;
307556384e8bSAndreas Färber     void *ptr = qemu_get_ram_ptr(raddr);
3076de712f94SAvi Kivity     switch (size) {
3077de712f94SAvi Kivity     case 1: return stb_p(ptr, value);
3078de712f94SAvi Kivity     case 2: return stw_p(ptr, value);
3079de712f94SAvi Kivity     case 4: return stl_p(ptr, value);
3080de712f94SAvi Kivity     default: abort();
3081de712f94SAvi Kivity     }
308256384e8bSAndreas Färber }
308356384e8bSAndreas Färber 
3084de712f94SAvi Kivity static const MemoryRegionOps subpage_ram_ops = {
3085de712f94SAvi Kivity     .read = subpage_ram_read,
3086de712f94SAvi Kivity     .write = subpage_ram_write,
3087de712f94SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
308856384e8bSAndreas Färber };
308956384e8bSAndreas Färber 
3090c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
30915312bd8bSAvi Kivity                              uint16_t section)
3092db7b5426Sblueswir1 {
3093db7b5426Sblueswir1     int idx, eidx;
3094db7b5426Sblueswir1 
3095db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3096db7b5426Sblueswir1         return -1;
3097db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
3098db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
3099db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
31000bf9e31aSBlue Swirl     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3101db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
3102db7b5426Sblueswir1 #endif
31035312bd8bSAvi Kivity     if (memory_region_is_ram(phys_sections[section].mr)) {
31045312bd8bSAvi Kivity         MemoryRegionSection new_section = phys_sections[section];
31055312bd8bSAvi Kivity         new_section.mr = &io_mem_subpage_ram;
31065312bd8bSAvi Kivity         section = phys_section_add(&new_section);
310756384e8bSAndreas Färber     }
3108db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
31095312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
3110db7b5426Sblueswir1     }
3111db7b5426Sblueswir1 
3112db7b5426Sblueswir1     return 0;
3113db7b5426Sblueswir1 }
3114db7b5426Sblueswir1 
31150f0cb164SAvi Kivity static subpage_t *subpage_init(target_phys_addr_t base)
3116db7b5426Sblueswir1 {
3117c227f099SAnthony Liguori     subpage_t *mmio;
3118db7b5426Sblueswir1 
31197267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
31201eec614bSaliguori 
3121db7b5426Sblueswir1     mmio->base = base;
312270c68e44SAvi Kivity     memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
312370c68e44SAvi Kivity                           "subpage", TARGET_PAGE_SIZE);
3124b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
3125db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3126db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3127db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3128db7b5426Sblueswir1 #endif
31290f0cb164SAvi Kivity     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
3130db7b5426Sblueswir1 
3131db7b5426Sblueswir1     return mmio;
3132db7b5426Sblueswir1 }
3133db7b5426Sblueswir1 
31345312bd8bSAvi Kivity static uint16_t dummy_section(MemoryRegion *mr)
31355312bd8bSAvi Kivity {
31365312bd8bSAvi Kivity     MemoryRegionSection section = {
31375312bd8bSAvi Kivity         .mr = mr,
31385312bd8bSAvi Kivity         .offset_within_address_space = 0,
31395312bd8bSAvi Kivity         .offset_within_region = 0,
31405312bd8bSAvi Kivity         .size = UINT64_MAX,
31415312bd8bSAvi Kivity     };
31425312bd8bSAvi Kivity 
31435312bd8bSAvi Kivity     return phys_section_add(&section);
31445312bd8bSAvi Kivity }
31455312bd8bSAvi Kivity 
314637ec01d4SAvi Kivity MemoryRegion *iotlb_to_region(target_phys_addr_t index)
3147aa102231SAvi Kivity {
314837ec01d4SAvi Kivity     return phys_sections[index & ~TARGET_PAGE_MASK].mr;
3149aa102231SAvi Kivity }
3150aa102231SAvi Kivity 
3151e9179ce1SAvi Kivity static void io_mem_init(void)
3152e9179ce1SAvi Kivity {
31530e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
31540e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
31550e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
31560e0df1e2SAvi Kivity                           "unassigned", UINT64_MAX);
31570e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
31580e0df1e2SAvi Kivity                           "notdirty", UINT64_MAX);
3159de712f94SAvi Kivity     memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3160de712f94SAvi Kivity                           "subpage-ram", UINT64_MAX);
31611ec9b909SAvi Kivity     memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
31621ec9b909SAvi Kivity                           "watch", UINT64_MAX);
3163e9179ce1SAvi Kivity }
3164e9179ce1SAvi Kivity 
3165ac1970fbSAvi Kivity static void mem_begin(MemoryListener *listener)
3166ac1970fbSAvi Kivity {
3167ac1970fbSAvi Kivity     AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
3168ac1970fbSAvi Kivity 
3169ac1970fbSAvi Kivity     destroy_all_mappings(d);
3170ac1970fbSAvi Kivity     d->phys_map.ptr = PHYS_MAP_NODE_NIL;
3171ac1970fbSAvi Kivity }
3172ac1970fbSAvi Kivity 
317350c1e149SAvi Kivity static void core_begin(MemoryListener *listener)
317450c1e149SAvi Kivity {
31755312bd8bSAvi Kivity     phys_sections_clear();
31765312bd8bSAvi Kivity     phys_section_unassigned = dummy_section(&io_mem_unassigned);
3177aa102231SAvi Kivity     phys_section_notdirty = dummy_section(&io_mem_notdirty);
3178aa102231SAvi Kivity     phys_section_rom = dummy_section(&io_mem_rom);
3179aa102231SAvi Kivity     phys_section_watch = dummy_section(&io_mem_watch);
318050c1e149SAvi Kivity }
318150c1e149SAvi Kivity 
31821d71148eSAvi Kivity static void tcg_commit(MemoryListener *listener)
318350c1e149SAvi Kivity {
31849349b4f9SAndreas Färber     CPUArchState *env;
3185117712c3SAvi Kivity 
3186117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
3187117712c3SAvi Kivity        reset the modified entries */
3188117712c3SAvi Kivity     /* XXX: slow ! */
3189117712c3SAvi Kivity     for(env = first_cpu; env != NULL; env = env->next_cpu) {
3190117712c3SAvi Kivity         tlb_flush(env, 1);
3191117712c3SAvi Kivity     }
319250c1e149SAvi Kivity }
319350c1e149SAvi Kivity 
319493632747SAvi Kivity static void core_log_global_start(MemoryListener *listener)
319593632747SAvi Kivity {
319693632747SAvi Kivity     cpu_physical_memory_set_dirty_tracking(1);
319793632747SAvi Kivity }
319893632747SAvi Kivity 
319993632747SAvi Kivity static void core_log_global_stop(MemoryListener *listener)
320093632747SAvi Kivity {
320193632747SAvi Kivity     cpu_physical_memory_set_dirty_tracking(0);
320293632747SAvi Kivity }
320393632747SAvi Kivity 
32044855d41aSAvi Kivity static void io_region_add(MemoryListener *listener,
32054855d41aSAvi Kivity                           MemoryRegionSection *section)
32064855d41aSAvi Kivity {
3207a2d33521SAvi Kivity     MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3208a2d33521SAvi Kivity 
3209a2d33521SAvi Kivity     mrio->mr = section->mr;
3210a2d33521SAvi Kivity     mrio->offset = section->offset_within_region;
3211a2d33521SAvi Kivity     iorange_init(&mrio->iorange, &memory_region_iorange_ops,
32124855d41aSAvi Kivity                  section->offset_within_address_space, section->size);
3213a2d33521SAvi Kivity     ioport_register(&mrio->iorange);
32144855d41aSAvi Kivity }
32154855d41aSAvi Kivity 
32164855d41aSAvi Kivity static void io_region_del(MemoryListener *listener,
32174855d41aSAvi Kivity                           MemoryRegionSection *section)
32184855d41aSAvi Kivity {
32194855d41aSAvi Kivity     isa_unassign_ioport(section->offset_within_address_space, section->size);
32204855d41aSAvi Kivity }
32214855d41aSAvi Kivity 
322293632747SAvi Kivity static MemoryListener core_memory_listener = {
322350c1e149SAvi Kivity     .begin = core_begin,
322493632747SAvi Kivity     .log_global_start = core_log_global_start,
322593632747SAvi Kivity     .log_global_stop = core_log_global_stop,
3226ac1970fbSAvi Kivity     .priority = 1,
322793632747SAvi Kivity };
322893632747SAvi Kivity 
32294855d41aSAvi Kivity static MemoryListener io_memory_listener = {
32304855d41aSAvi Kivity     .region_add = io_region_add,
32314855d41aSAvi Kivity     .region_del = io_region_del,
32324855d41aSAvi Kivity     .priority = 0,
32334855d41aSAvi Kivity };
32344855d41aSAvi Kivity 
32351d71148eSAvi Kivity static MemoryListener tcg_memory_listener = {
32361d71148eSAvi Kivity     .commit = tcg_commit,
32371d71148eSAvi Kivity };
32381d71148eSAvi Kivity 
3239ac1970fbSAvi Kivity void address_space_init_dispatch(AddressSpace *as)
3240ac1970fbSAvi Kivity {
3241ac1970fbSAvi Kivity     AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
3242ac1970fbSAvi Kivity 
3243ac1970fbSAvi Kivity     d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
3244ac1970fbSAvi Kivity     d->listener = (MemoryListener) {
3245ac1970fbSAvi Kivity         .begin = mem_begin,
3246ac1970fbSAvi Kivity         .region_add = mem_add,
3247ac1970fbSAvi Kivity         .region_nop = mem_add,
3248ac1970fbSAvi Kivity         .priority = 0,
3249ac1970fbSAvi Kivity     };
3250ac1970fbSAvi Kivity     as->dispatch = d;
3251ac1970fbSAvi Kivity     memory_listener_register(&d->listener, as);
3252ac1970fbSAvi Kivity }
3253ac1970fbSAvi Kivity 
325483f3c251SAvi Kivity void address_space_destroy_dispatch(AddressSpace *as)
325583f3c251SAvi Kivity {
325683f3c251SAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
325783f3c251SAvi Kivity 
325883f3c251SAvi Kivity     memory_listener_unregister(&d->listener);
325983f3c251SAvi Kivity     destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
326083f3c251SAvi Kivity     g_free(d);
326183f3c251SAvi Kivity     as->dispatch = NULL;
326283f3c251SAvi Kivity }
326383f3c251SAvi Kivity 
326462152b8aSAvi Kivity static void memory_map_init(void)
326562152b8aSAvi Kivity {
32667267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
32678417cebfSAvi Kivity     memory_region_init(system_memory, "system", INT64_MAX);
32682673a5daSAvi Kivity     address_space_init(&address_space_memory, system_memory);
32692673a5daSAvi Kivity     address_space_memory.name = "memory";
3270309cb471SAvi Kivity 
32717267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
3272309cb471SAvi Kivity     memory_region_init(system_io, "io", 65536);
32732673a5daSAvi Kivity     address_space_init(&address_space_io, system_io);
32742673a5daSAvi Kivity     address_space_io.name = "I/O";
327593632747SAvi Kivity 
3276f6790af6SAvi Kivity     memory_listener_register(&core_memory_listener, &address_space_memory);
3277f6790af6SAvi Kivity     memory_listener_register(&io_memory_listener, &address_space_io);
3278f6790af6SAvi Kivity     memory_listener_register(&tcg_memory_listener, &address_space_memory);
327962152b8aSAvi Kivity }
328062152b8aSAvi Kivity 
328162152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
328262152b8aSAvi Kivity {
328362152b8aSAvi Kivity     return system_memory;
328462152b8aSAvi Kivity }
328562152b8aSAvi Kivity 
3286309cb471SAvi Kivity MemoryRegion *get_system_io(void)
3287309cb471SAvi Kivity {
3288309cb471SAvi Kivity     return system_io;
3289309cb471SAvi Kivity }
3290309cb471SAvi Kivity 
3291e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
3292e2eef170Spbrook 
329313eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
329413eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
32959349b4f9SAndreas Färber int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
3296a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
329713eb76e0Sbellard {
329813eb76e0Sbellard     int l, flags;
329913eb76e0Sbellard     target_ulong page;
330053a5960aSpbrook     void * p;
330113eb76e0Sbellard 
330213eb76e0Sbellard     while (len > 0) {
330313eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
330413eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
330513eb76e0Sbellard         if (l > len)
330613eb76e0Sbellard             l = len;
330713eb76e0Sbellard         flags = page_get_flags(page);
330813eb76e0Sbellard         if (!(flags & PAGE_VALID))
3309a68fe89cSPaul Brook             return -1;
331013eb76e0Sbellard         if (is_write) {
331113eb76e0Sbellard             if (!(flags & PAGE_WRITE))
3312a68fe89cSPaul Brook                 return -1;
3313579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
331472fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3315a68fe89cSPaul Brook                 return -1;
331672fb7daaSaurel32             memcpy(p, buf, l);
331772fb7daaSaurel32             unlock_user(p, addr, l);
331813eb76e0Sbellard         } else {
331913eb76e0Sbellard             if (!(flags & PAGE_READ))
3320a68fe89cSPaul Brook                 return -1;
3321579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
332272fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3323a68fe89cSPaul Brook                 return -1;
332472fb7daaSaurel32             memcpy(buf, p, l);
33255b257578Saurel32             unlock_user(p, addr, 0);
332613eb76e0Sbellard         }
332713eb76e0Sbellard         len -= l;
332813eb76e0Sbellard         buf += l;
332913eb76e0Sbellard         addr += l;
333013eb76e0Sbellard     }
3331a68fe89cSPaul Brook     return 0;
333213eb76e0Sbellard }
33338df1cd07Sbellard 
333413eb76e0Sbellard #else
333551d7a9ebSAnthony PERARD 
333651d7a9ebSAnthony PERARD static void invalidate_and_set_dirty(target_phys_addr_t addr,
333751d7a9ebSAnthony PERARD                                      target_phys_addr_t length)
333851d7a9ebSAnthony PERARD {
333951d7a9ebSAnthony PERARD     if (!cpu_physical_memory_is_dirty(addr)) {
334051d7a9ebSAnthony PERARD         /* invalidate code */
334151d7a9ebSAnthony PERARD         tb_invalidate_phys_page_range(addr, addr + length, 0);
334251d7a9ebSAnthony PERARD         /* set dirty bit */
334351d7a9ebSAnthony PERARD         cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
334451d7a9ebSAnthony PERARD     }
3345e226939dSAnthony PERARD     xen_modified_memory(addr, length);
334651d7a9ebSAnthony PERARD }
334751d7a9ebSAnthony PERARD 
3348ac1970fbSAvi Kivity void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf,
3349ac1970fbSAvi Kivity                       int len, bool is_write)
335013eb76e0Sbellard {
3351ac1970fbSAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
335237ec01d4SAvi Kivity     int l;
335313eb76e0Sbellard     uint8_t *ptr;
335413eb76e0Sbellard     uint32_t val;
3355c227f099SAnthony Liguori     target_phys_addr_t page;
3356f3705d53SAvi Kivity     MemoryRegionSection *section;
335713eb76e0Sbellard 
335813eb76e0Sbellard     while (len > 0) {
335913eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
336013eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
336113eb76e0Sbellard         if (l > len)
336213eb76e0Sbellard             l = len;
3363ac1970fbSAvi Kivity         section = phys_page_find(d, page >> TARGET_PAGE_BITS);
336413eb76e0Sbellard 
336513eb76e0Sbellard         if (is_write) {
3366f3705d53SAvi Kivity             if (!memory_region_is_ram(section->mr)) {
3367f1f6e3b8SAvi Kivity                 target_phys_addr_t addr1;
3368cc5bea60SBlue Swirl                 addr1 = memory_region_section_addr(section, addr);
33696a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
33706a00d601Sbellard                    potential bugs */
33716c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
33721c213d19Sbellard                     /* 32 bit write access */
3373c27004ecSbellard                     val = ldl_p(buf);
337437ec01d4SAvi Kivity                     io_mem_write(section->mr, addr1, val, 4);
337513eb76e0Sbellard                     l = 4;
33766c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
33771c213d19Sbellard                     /* 16 bit write access */
3378c27004ecSbellard                     val = lduw_p(buf);
337937ec01d4SAvi Kivity                     io_mem_write(section->mr, addr1, val, 2);
338013eb76e0Sbellard                     l = 2;
338113eb76e0Sbellard                 } else {
33821c213d19Sbellard                     /* 8 bit write access */
3383c27004ecSbellard                     val = ldub_p(buf);
338437ec01d4SAvi Kivity                     io_mem_write(section->mr, addr1, val, 1);
338513eb76e0Sbellard                     l = 1;
338613eb76e0Sbellard                 }
3387f3705d53SAvi Kivity             } else if (!section->readonly) {
33888ca5692dSAnthony PERARD                 ram_addr_t addr1;
3389f3705d53SAvi Kivity                 addr1 = memory_region_get_ram_addr(section->mr)
3390cc5bea60SBlue Swirl                     + memory_region_section_addr(section, addr);
339113eb76e0Sbellard                 /* RAM case */
33925579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
339313eb76e0Sbellard                 memcpy(ptr, buf, l);
339451d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
3395050a0ddfSAnthony PERARD                 qemu_put_ram_ptr(ptr);
33963a7d929eSbellard             }
339713eb76e0Sbellard         } else {
3398cc5bea60SBlue Swirl             if (!(memory_region_is_ram(section->mr) ||
3399cc5bea60SBlue Swirl                   memory_region_is_romd(section->mr))) {
3400f1f6e3b8SAvi Kivity                 target_phys_addr_t addr1;
340113eb76e0Sbellard                 /* I/O case */
3402cc5bea60SBlue Swirl                 addr1 = memory_region_section_addr(section, addr);
34036c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
340413eb76e0Sbellard                     /* 32 bit read access */
340537ec01d4SAvi Kivity                     val = io_mem_read(section->mr, addr1, 4);
3406c27004ecSbellard                     stl_p(buf, val);
340713eb76e0Sbellard                     l = 4;
34086c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
340913eb76e0Sbellard                     /* 16 bit read access */
341037ec01d4SAvi Kivity                     val = io_mem_read(section->mr, addr1, 2);
3411c27004ecSbellard                     stw_p(buf, val);
341213eb76e0Sbellard                     l = 2;
341313eb76e0Sbellard                 } else {
34141c213d19Sbellard                     /* 8 bit read access */
341537ec01d4SAvi Kivity                     val = io_mem_read(section->mr, addr1, 1);
3416c27004ecSbellard                     stb_p(buf, val);
341713eb76e0Sbellard                     l = 1;
341813eb76e0Sbellard                 }
341913eb76e0Sbellard             } else {
342013eb76e0Sbellard                 /* RAM case */
34210a1b357fSAnthony PERARD                 ptr = qemu_get_ram_ptr(section->mr->ram_addr
3422cc5bea60SBlue Swirl                                        + memory_region_section_addr(section,
3423cc5bea60SBlue Swirl                                                                     addr));
3424f3705d53SAvi Kivity                 memcpy(buf, ptr, l);
3425050a0ddfSAnthony PERARD                 qemu_put_ram_ptr(ptr);
342613eb76e0Sbellard             }
342713eb76e0Sbellard         }
342813eb76e0Sbellard         len -= l;
342913eb76e0Sbellard         buf += l;
343013eb76e0Sbellard         addr += l;
343113eb76e0Sbellard     }
343213eb76e0Sbellard }
34338df1cd07Sbellard 
3434ac1970fbSAvi Kivity void address_space_write(AddressSpace *as, target_phys_addr_t addr,
3435ac1970fbSAvi Kivity                          const uint8_t *buf, int len)
3436ac1970fbSAvi Kivity {
3437ac1970fbSAvi Kivity     address_space_rw(as, addr, (uint8_t *)buf, len, true);
3438ac1970fbSAvi Kivity }
3439ac1970fbSAvi Kivity 
3440ac1970fbSAvi Kivity /**
3441ac1970fbSAvi Kivity  * address_space_read: read from an address space.
3442ac1970fbSAvi Kivity  *
3443ac1970fbSAvi Kivity  * @as: #AddressSpace to be accessed
3444ac1970fbSAvi Kivity  * @addr: address within that address space
3445ac1970fbSAvi Kivity  * @buf: buffer with the data transferred
3446ac1970fbSAvi Kivity  */
3447ac1970fbSAvi Kivity void address_space_read(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, int len)
3448ac1970fbSAvi Kivity {
3449ac1970fbSAvi Kivity     address_space_rw(as, addr, buf, len, false);
3450ac1970fbSAvi Kivity }
3451ac1970fbSAvi Kivity 
3452ac1970fbSAvi Kivity 
3453ac1970fbSAvi Kivity void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3454ac1970fbSAvi Kivity                             int len, int is_write)
3455ac1970fbSAvi Kivity {
3456ac1970fbSAvi Kivity     return address_space_rw(&address_space_memory, addr, buf, len, is_write);
3457ac1970fbSAvi Kivity }
3458ac1970fbSAvi Kivity 
3459d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
3460c227f099SAnthony Liguori void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3461d0ecd2aaSbellard                                    const uint8_t *buf, int len)
3462d0ecd2aaSbellard {
3463ac1970fbSAvi Kivity     AddressSpaceDispatch *d = address_space_memory.dispatch;
3464d0ecd2aaSbellard     int l;
3465d0ecd2aaSbellard     uint8_t *ptr;
3466c227f099SAnthony Liguori     target_phys_addr_t page;
3467f3705d53SAvi Kivity     MemoryRegionSection *section;
3468d0ecd2aaSbellard 
3469d0ecd2aaSbellard     while (len > 0) {
3470d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
3471d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
3472d0ecd2aaSbellard         if (l > len)
3473d0ecd2aaSbellard             l = len;
3474ac1970fbSAvi Kivity         section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3475d0ecd2aaSbellard 
3476cc5bea60SBlue Swirl         if (!(memory_region_is_ram(section->mr) ||
3477cc5bea60SBlue Swirl               memory_region_is_romd(section->mr))) {
3478d0ecd2aaSbellard             /* do nothing */
3479d0ecd2aaSbellard         } else {
3480d0ecd2aaSbellard             unsigned long addr1;
3481f3705d53SAvi Kivity             addr1 = memory_region_get_ram_addr(section->mr)
3482cc5bea60SBlue Swirl                 + memory_region_section_addr(section, addr);
3483d0ecd2aaSbellard             /* ROM/RAM case */
34845579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
3485d0ecd2aaSbellard             memcpy(ptr, buf, l);
348651d7a9ebSAnthony PERARD             invalidate_and_set_dirty(addr1, l);
3487050a0ddfSAnthony PERARD             qemu_put_ram_ptr(ptr);
3488d0ecd2aaSbellard         }
3489d0ecd2aaSbellard         len -= l;
3490d0ecd2aaSbellard         buf += l;
3491d0ecd2aaSbellard         addr += l;
3492d0ecd2aaSbellard     }
3493d0ecd2aaSbellard }
3494d0ecd2aaSbellard 
34956d16c2f8Saliguori typedef struct {
34966d16c2f8Saliguori     void *buffer;
3497c227f099SAnthony Liguori     target_phys_addr_t addr;
3498c227f099SAnthony Liguori     target_phys_addr_t len;
34996d16c2f8Saliguori } BounceBuffer;
35006d16c2f8Saliguori 
35016d16c2f8Saliguori static BounceBuffer bounce;
35026d16c2f8Saliguori 
3503ba223c29Saliguori typedef struct MapClient {
3504ba223c29Saliguori     void *opaque;
3505ba223c29Saliguori     void (*callback)(void *opaque);
350672cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
3507ba223c29Saliguori } MapClient;
3508ba223c29Saliguori 
350972cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
351072cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
3511ba223c29Saliguori 
3512ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3513ba223c29Saliguori {
35147267c094SAnthony Liguori     MapClient *client = g_malloc(sizeof(*client));
3515ba223c29Saliguori 
3516ba223c29Saliguori     client->opaque = opaque;
3517ba223c29Saliguori     client->callback = callback;
351872cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
3519ba223c29Saliguori     return client;
3520ba223c29Saliguori }
3521ba223c29Saliguori 
3522ba223c29Saliguori void cpu_unregister_map_client(void *_client)
3523ba223c29Saliguori {
3524ba223c29Saliguori     MapClient *client = (MapClient *)_client;
3525ba223c29Saliguori 
352672cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
35277267c094SAnthony Liguori     g_free(client);
3528ba223c29Saliguori }
3529ba223c29Saliguori 
3530ba223c29Saliguori static void cpu_notify_map_clients(void)
3531ba223c29Saliguori {
3532ba223c29Saliguori     MapClient *client;
3533ba223c29Saliguori 
353472cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
353572cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
3536ba223c29Saliguori         client->callback(client->opaque);
353734d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
3538ba223c29Saliguori     }
3539ba223c29Saliguori }
3540ba223c29Saliguori 
35416d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
35426d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
35436d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
35446d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
3545ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
3546ba223c29Saliguori  * likely to succeed.
35476d16c2f8Saliguori  */
3548ac1970fbSAvi Kivity void *address_space_map(AddressSpace *as,
3549ac1970fbSAvi Kivity                         target_phys_addr_t addr,
3550c227f099SAnthony Liguori                         target_phys_addr_t *plen,
3551ac1970fbSAvi Kivity                         bool is_write)
35526d16c2f8Saliguori {
3553ac1970fbSAvi Kivity     AddressSpaceDispatch *d = as->dispatch;
3554c227f099SAnthony Liguori     target_phys_addr_t len = *plen;
355538bee5dcSStefano Stabellini     target_phys_addr_t todo = 0;
35566d16c2f8Saliguori     int l;
3557c227f099SAnthony Liguori     target_phys_addr_t page;
3558f3705d53SAvi Kivity     MemoryRegionSection *section;
3559f15fbc4bSAnthony PERARD     ram_addr_t raddr = RAM_ADDR_MAX;
35608ab934f9SStefano Stabellini     ram_addr_t rlen;
35618ab934f9SStefano Stabellini     void *ret;
35626d16c2f8Saliguori 
35636d16c2f8Saliguori     while (len > 0) {
35646d16c2f8Saliguori         page = addr & TARGET_PAGE_MASK;
35656d16c2f8Saliguori         l = (page + TARGET_PAGE_SIZE) - addr;
35666d16c2f8Saliguori         if (l > len)
35676d16c2f8Saliguori             l = len;
3568ac1970fbSAvi Kivity         section = phys_page_find(d, page >> TARGET_PAGE_BITS);
35696d16c2f8Saliguori 
3570f3705d53SAvi Kivity         if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
357138bee5dcSStefano Stabellini             if (todo || bounce.buffer) {
35726d16c2f8Saliguori                 break;
35736d16c2f8Saliguori             }
35746d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
35756d16c2f8Saliguori             bounce.addr = addr;
35766d16c2f8Saliguori             bounce.len = l;
35776d16c2f8Saliguori             if (!is_write) {
3578ac1970fbSAvi Kivity                 address_space_read(as, addr, bounce.buffer, l);
35796d16c2f8Saliguori             }
358038bee5dcSStefano Stabellini 
358138bee5dcSStefano Stabellini             *plen = l;
358238bee5dcSStefano Stabellini             return bounce.buffer;
35836d16c2f8Saliguori         }
35848ab934f9SStefano Stabellini         if (!todo) {
3585f3705d53SAvi Kivity             raddr = memory_region_get_ram_addr(section->mr)
3586cc5bea60SBlue Swirl                 + memory_region_section_addr(section, addr);
35878ab934f9SStefano Stabellini         }
35886d16c2f8Saliguori 
35896d16c2f8Saliguori         len -= l;
35906d16c2f8Saliguori         addr += l;
359138bee5dcSStefano Stabellini         todo += l;
35926d16c2f8Saliguori     }
35938ab934f9SStefano Stabellini     rlen = todo;
35948ab934f9SStefano Stabellini     ret = qemu_ram_ptr_length(raddr, &rlen);
35958ab934f9SStefano Stabellini     *plen = rlen;
35968ab934f9SStefano Stabellini     return ret;
35976d16c2f8Saliguori }
35986d16c2f8Saliguori 
3599ac1970fbSAvi Kivity /* Unmaps a memory region previously mapped by address_space_map().
36006d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
36016d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
36026d16c2f8Saliguori  */
3603ac1970fbSAvi Kivity void address_space_unmap(AddressSpace *as, void *buffer, target_phys_addr_t len,
3604c227f099SAnthony Liguori                          int is_write, target_phys_addr_t access_len)
36056d16c2f8Saliguori {
36066d16c2f8Saliguori     if (buffer != bounce.buffer) {
36076d16c2f8Saliguori         if (is_write) {
3608e890261fSMarcelo Tosatti             ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
36096d16c2f8Saliguori             while (access_len) {
36106d16c2f8Saliguori                 unsigned l;
36116d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
36126d16c2f8Saliguori                 if (l > access_len)
36136d16c2f8Saliguori                     l = access_len;
361451d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
36156d16c2f8Saliguori                 addr1 += l;
36166d16c2f8Saliguori                 access_len -= l;
36176d16c2f8Saliguori             }
36186d16c2f8Saliguori         }
3619868bb33fSJan Kiszka         if (xen_enabled()) {
3620e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
3621050a0ddfSAnthony PERARD         }
36226d16c2f8Saliguori         return;
36236d16c2f8Saliguori     }
36246d16c2f8Saliguori     if (is_write) {
3625ac1970fbSAvi Kivity         address_space_write(as, bounce.addr, bounce.buffer, access_len);
36266d16c2f8Saliguori     }
3627f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
36286d16c2f8Saliguori     bounce.buffer = NULL;
3629ba223c29Saliguori     cpu_notify_map_clients();
36306d16c2f8Saliguori }
3631d0ecd2aaSbellard 
3632ac1970fbSAvi Kivity void *cpu_physical_memory_map(target_phys_addr_t addr,
3633ac1970fbSAvi Kivity                               target_phys_addr_t *plen,
3634ac1970fbSAvi Kivity                               int is_write)
3635ac1970fbSAvi Kivity {
3636ac1970fbSAvi Kivity     return address_space_map(&address_space_memory, addr, plen, is_write);
3637ac1970fbSAvi Kivity }
3638ac1970fbSAvi Kivity 
3639ac1970fbSAvi Kivity void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3640ac1970fbSAvi Kivity                                int is_write, target_phys_addr_t access_len)
3641ac1970fbSAvi Kivity {
3642ac1970fbSAvi Kivity     return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3643ac1970fbSAvi Kivity }
3644ac1970fbSAvi Kivity 
36458df1cd07Sbellard /* warning: addr must be aligned */
36461e78bcc1SAlexander Graf static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
36471e78bcc1SAlexander Graf                                          enum device_endian endian)
36488df1cd07Sbellard {
36498df1cd07Sbellard     uint8_t *ptr;
36508df1cd07Sbellard     uint32_t val;
3651f3705d53SAvi Kivity     MemoryRegionSection *section;
36528df1cd07Sbellard 
3653ac1970fbSAvi Kivity     section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
36548df1cd07Sbellard 
3655cc5bea60SBlue Swirl     if (!(memory_region_is_ram(section->mr) ||
3656cc5bea60SBlue Swirl           memory_region_is_romd(section->mr))) {
36578df1cd07Sbellard         /* I/O case */
3658cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
365937ec01d4SAvi Kivity         val = io_mem_read(section->mr, addr, 4);
36601e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
36611e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
36621e78bcc1SAlexander Graf             val = bswap32(val);
36631e78bcc1SAlexander Graf         }
36641e78bcc1SAlexander Graf #else
36651e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
36661e78bcc1SAlexander Graf             val = bswap32(val);
36671e78bcc1SAlexander Graf         }
36681e78bcc1SAlexander Graf #endif
36698df1cd07Sbellard     } else {
36708df1cd07Sbellard         /* RAM case */
3671f3705d53SAvi Kivity         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
367206ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3673cc5bea60SBlue Swirl                                + memory_region_section_addr(section, addr));
36741e78bcc1SAlexander Graf         switch (endian) {
36751e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
36761e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
36771e78bcc1SAlexander Graf             break;
36781e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
36791e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
36801e78bcc1SAlexander Graf             break;
36811e78bcc1SAlexander Graf         default:
36828df1cd07Sbellard             val = ldl_p(ptr);
36831e78bcc1SAlexander Graf             break;
36841e78bcc1SAlexander Graf         }
36858df1cd07Sbellard     }
36868df1cd07Sbellard     return val;
36878df1cd07Sbellard }
36888df1cd07Sbellard 
36891e78bcc1SAlexander Graf uint32_t ldl_phys(target_phys_addr_t addr)
36901e78bcc1SAlexander Graf {
36911e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
36921e78bcc1SAlexander Graf }
36931e78bcc1SAlexander Graf 
36941e78bcc1SAlexander Graf uint32_t ldl_le_phys(target_phys_addr_t addr)
36951e78bcc1SAlexander Graf {
36961e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
36971e78bcc1SAlexander Graf }
36981e78bcc1SAlexander Graf 
36991e78bcc1SAlexander Graf uint32_t ldl_be_phys(target_phys_addr_t addr)
37001e78bcc1SAlexander Graf {
37011e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
37021e78bcc1SAlexander Graf }
37031e78bcc1SAlexander Graf 
370484b7b8e7Sbellard /* warning: addr must be aligned */
37051e78bcc1SAlexander Graf static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
37061e78bcc1SAlexander Graf                                          enum device_endian endian)
370784b7b8e7Sbellard {
370884b7b8e7Sbellard     uint8_t *ptr;
370984b7b8e7Sbellard     uint64_t val;
3710f3705d53SAvi Kivity     MemoryRegionSection *section;
371184b7b8e7Sbellard 
3712ac1970fbSAvi Kivity     section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
371384b7b8e7Sbellard 
3714cc5bea60SBlue Swirl     if (!(memory_region_is_ram(section->mr) ||
3715cc5bea60SBlue Swirl           memory_region_is_romd(section->mr))) {
371684b7b8e7Sbellard         /* I/O case */
3717cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
37181e78bcc1SAlexander Graf 
37191e78bcc1SAlexander Graf         /* XXX This is broken when device endian != cpu endian.
37201e78bcc1SAlexander Graf                Fix and add "endian" variable check */
372184b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
372237ec01d4SAvi Kivity         val = io_mem_read(section->mr, addr, 4) << 32;
372337ec01d4SAvi Kivity         val |= io_mem_read(section->mr, addr + 4, 4);
372484b7b8e7Sbellard #else
372537ec01d4SAvi Kivity         val = io_mem_read(section->mr, addr, 4);
372637ec01d4SAvi Kivity         val |= io_mem_read(section->mr, addr + 4, 4) << 32;
372784b7b8e7Sbellard #endif
372884b7b8e7Sbellard     } else {
372984b7b8e7Sbellard         /* RAM case */
3730f3705d53SAvi Kivity         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
373106ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3732cc5bea60SBlue Swirl                                + memory_region_section_addr(section, addr));
37331e78bcc1SAlexander Graf         switch (endian) {
37341e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
37351e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
37361e78bcc1SAlexander Graf             break;
37371e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
37381e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
37391e78bcc1SAlexander Graf             break;
37401e78bcc1SAlexander Graf         default:
374184b7b8e7Sbellard             val = ldq_p(ptr);
37421e78bcc1SAlexander Graf             break;
37431e78bcc1SAlexander Graf         }
374484b7b8e7Sbellard     }
374584b7b8e7Sbellard     return val;
374684b7b8e7Sbellard }
374784b7b8e7Sbellard 
37481e78bcc1SAlexander Graf uint64_t ldq_phys(target_phys_addr_t addr)
37491e78bcc1SAlexander Graf {
37501e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
37511e78bcc1SAlexander Graf }
37521e78bcc1SAlexander Graf 
37531e78bcc1SAlexander Graf uint64_t ldq_le_phys(target_phys_addr_t addr)
37541e78bcc1SAlexander Graf {
37551e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
37561e78bcc1SAlexander Graf }
37571e78bcc1SAlexander Graf 
37581e78bcc1SAlexander Graf uint64_t ldq_be_phys(target_phys_addr_t addr)
37591e78bcc1SAlexander Graf {
37601e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
37611e78bcc1SAlexander Graf }
37621e78bcc1SAlexander Graf 
3763aab33094Sbellard /* XXX: optimize */
3764c227f099SAnthony Liguori uint32_t ldub_phys(target_phys_addr_t addr)
3765aab33094Sbellard {
3766aab33094Sbellard     uint8_t val;
3767aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
3768aab33094Sbellard     return val;
3769aab33094Sbellard }
3770aab33094Sbellard 
3771733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
37721e78bcc1SAlexander Graf static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
37731e78bcc1SAlexander Graf                                           enum device_endian endian)
3774aab33094Sbellard {
3775733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3776733f0b02SMichael S. Tsirkin     uint64_t val;
3777f3705d53SAvi Kivity     MemoryRegionSection *section;
3778733f0b02SMichael S. Tsirkin 
3779ac1970fbSAvi Kivity     section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3780733f0b02SMichael S. Tsirkin 
3781cc5bea60SBlue Swirl     if (!(memory_region_is_ram(section->mr) ||
3782cc5bea60SBlue Swirl           memory_region_is_romd(section->mr))) {
3783733f0b02SMichael S. Tsirkin         /* I/O case */
3784cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
378537ec01d4SAvi Kivity         val = io_mem_read(section->mr, addr, 2);
37861e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
37871e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
37881e78bcc1SAlexander Graf             val = bswap16(val);
37891e78bcc1SAlexander Graf         }
37901e78bcc1SAlexander Graf #else
37911e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
37921e78bcc1SAlexander Graf             val = bswap16(val);
37931e78bcc1SAlexander Graf         }
37941e78bcc1SAlexander Graf #endif
3795733f0b02SMichael S. Tsirkin     } else {
3796733f0b02SMichael S. Tsirkin         /* RAM case */
3797f3705d53SAvi Kivity         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
379806ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3799cc5bea60SBlue Swirl                                + memory_region_section_addr(section, addr));
38001e78bcc1SAlexander Graf         switch (endian) {
38011e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
38021e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
38031e78bcc1SAlexander Graf             break;
38041e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
38051e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
38061e78bcc1SAlexander Graf             break;
38071e78bcc1SAlexander Graf         default:
3808733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
38091e78bcc1SAlexander Graf             break;
38101e78bcc1SAlexander Graf         }
3811733f0b02SMichael S. Tsirkin     }
3812733f0b02SMichael S. Tsirkin     return val;
3813aab33094Sbellard }
3814aab33094Sbellard 
38151e78bcc1SAlexander Graf uint32_t lduw_phys(target_phys_addr_t addr)
38161e78bcc1SAlexander Graf {
38171e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
38181e78bcc1SAlexander Graf }
38191e78bcc1SAlexander Graf 
38201e78bcc1SAlexander Graf uint32_t lduw_le_phys(target_phys_addr_t addr)
38211e78bcc1SAlexander Graf {
38221e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
38231e78bcc1SAlexander Graf }
38241e78bcc1SAlexander Graf 
38251e78bcc1SAlexander Graf uint32_t lduw_be_phys(target_phys_addr_t addr)
38261e78bcc1SAlexander Graf {
38271e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
38281e78bcc1SAlexander Graf }
38291e78bcc1SAlexander Graf 
38308df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
38318df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
38328df1cd07Sbellard    bits are used to track modified PTEs */
3833c227f099SAnthony Liguori void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
38348df1cd07Sbellard {
38358df1cd07Sbellard     uint8_t *ptr;
3836f3705d53SAvi Kivity     MemoryRegionSection *section;
38378df1cd07Sbellard 
3838ac1970fbSAvi Kivity     section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
38398df1cd07Sbellard 
3840f3705d53SAvi Kivity     if (!memory_region_is_ram(section->mr) || section->readonly) {
3841cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
384237ec01d4SAvi Kivity         if (memory_region_is_ram(section->mr)) {
384337ec01d4SAvi Kivity             section = &phys_sections[phys_section_rom];
384437ec01d4SAvi Kivity         }
384537ec01d4SAvi Kivity         io_mem_write(section->mr, addr, val, 4);
38468df1cd07Sbellard     } else {
3847f3705d53SAvi Kivity         unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
384806ef3525SAvi Kivity                                & TARGET_PAGE_MASK)
3849cc5bea60SBlue Swirl             + memory_region_section_addr(section, addr);
38505579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
38518df1cd07Sbellard         stl_p(ptr, val);
385274576198Saliguori 
385374576198Saliguori         if (unlikely(in_migration)) {
385474576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
385574576198Saliguori                 /* invalidate code */
385674576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
385774576198Saliguori                 /* set dirty bit */
3858f7c11b53SYoshiaki Tamura                 cpu_physical_memory_set_dirty_flags(
3859f7c11b53SYoshiaki Tamura                     addr1, (0xff & ~CODE_DIRTY_FLAG));
386074576198Saliguori             }
386174576198Saliguori         }
38628df1cd07Sbellard     }
38638df1cd07Sbellard }
38648df1cd07Sbellard 
3865c227f099SAnthony Liguori void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3866bc98a7efSj_mayer {
3867bc98a7efSj_mayer     uint8_t *ptr;
3868f3705d53SAvi Kivity     MemoryRegionSection *section;
3869bc98a7efSj_mayer 
3870ac1970fbSAvi Kivity     section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3871bc98a7efSj_mayer 
3872f3705d53SAvi Kivity     if (!memory_region_is_ram(section->mr) || section->readonly) {
3873cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
387437ec01d4SAvi Kivity         if (memory_region_is_ram(section->mr)) {
387537ec01d4SAvi Kivity             section = &phys_sections[phys_section_rom];
387637ec01d4SAvi Kivity         }
3877bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
387837ec01d4SAvi Kivity         io_mem_write(section->mr, addr, val >> 32, 4);
387937ec01d4SAvi Kivity         io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
3880bc98a7efSj_mayer #else
388137ec01d4SAvi Kivity         io_mem_write(section->mr, addr, (uint32_t)val, 4);
388237ec01d4SAvi Kivity         io_mem_write(section->mr, addr + 4, val >> 32, 4);
3883bc98a7efSj_mayer #endif
3884bc98a7efSj_mayer     } else {
3885f3705d53SAvi Kivity         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
388606ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3887cc5bea60SBlue Swirl                                + memory_region_section_addr(section, addr));
3888bc98a7efSj_mayer         stq_p(ptr, val);
3889bc98a7efSj_mayer     }
3890bc98a7efSj_mayer }
3891bc98a7efSj_mayer 
38928df1cd07Sbellard /* warning: addr must be aligned */
38931e78bcc1SAlexander Graf static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
38941e78bcc1SAlexander Graf                                      enum device_endian endian)
38958df1cd07Sbellard {
38968df1cd07Sbellard     uint8_t *ptr;
3897f3705d53SAvi Kivity     MemoryRegionSection *section;
38988df1cd07Sbellard 
3899ac1970fbSAvi Kivity     section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
39008df1cd07Sbellard 
3901f3705d53SAvi Kivity     if (!memory_region_is_ram(section->mr) || section->readonly) {
3902cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
390337ec01d4SAvi Kivity         if (memory_region_is_ram(section->mr)) {
390437ec01d4SAvi Kivity             section = &phys_sections[phys_section_rom];
390537ec01d4SAvi Kivity         }
39061e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
39071e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
39081e78bcc1SAlexander Graf             val = bswap32(val);
39091e78bcc1SAlexander Graf         }
39101e78bcc1SAlexander Graf #else
39111e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
39121e78bcc1SAlexander Graf             val = bswap32(val);
39131e78bcc1SAlexander Graf         }
39141e78bcc1SAlexander Graf #endif
391537ec01d4SAvi Kivity         io_mem_write(section->mr, addr, val, 4);
39168df1cd07Sbellard     } else {
39178df1cd07Sbellard         unsigned long addr1;
3918f3705d53SAvi Kivity         addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
3919cc5bea60SBlue Swirl             + memory_region_section_addr(section, addr);
39208df1cd07Sbellard         /* RAM case */
39215579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
39221e78bcc1SAlexander Graf         switch (endian) {
39231e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
39241e78bcc1SAlexander Graf             stl_le_p(ptr, val);
39251e78bcc1SAlexander Graf             break;
39261e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
39271e78bcc1SAlexander Graf             stl_be_p(ptr, val);
39281e78bcc1SAlexander Graf             break;
39291e78bcc1SAlexander Graf         default:
39308df1cd07Sbellard             stl_p(ptr, val);
39311e78bcc1SAlexander Graf             break;
39321e78bcc1SAlexander Graf         }
393351d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 4);
39348df1cd07Sbellard     }
39353a7d929eSbellard }
39368df1cd07Sbellard 
39371e78bcc1SAlexander Graf void stl_phys(target_phys_addr_t addr, uint32_t val)
39381e78bcc1SAlexander Graf {
39391e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
39401e78bcc1SAlexander Graf }
39411e78bcc1SAlexander Graf 
39421e78bcc1SAlexander Graf void stl_le_phys(target_phys_addr_t addr, uint32_t val)
39431e78bcc1SAlexander Graf {
39441e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
39451e78bcc1SAlexander Graf }
39461e78bcc1SAlexander Graf 
39471e78bcc1SAlexander Graf void stl_be_phys(target_phys_addr_t addr, uint32_t val)
39481e78bcc1SAlexander Graf {
39491e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
39501e78bcc1SAlexander Graf }
39511e78bcc1SAlexander Graf 
3952aab33094Sbellard /* XXX: optimize */
3953c227f099SAnthony Liguori void stb_phys(target_phys_addr_t addr, uint32_t val)
3954aab33094Sbellard {
3955aab33094Sbellard     uint8_t v = val;
3956aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
3957aab33094Sbellard }
3958aab33094Sbellard 
3959733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
39601e78bcc1SAlexander Graf static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
39611e78bcc1SAlexander Graf                                      enum device_endian endian)
3962aab33094Sbellard {
3963733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3964f3705d53SAvi Kivity     MemoryRegionSection *section;
3965733f0b02SMichael S. Tsirkin 
3966ac1970fbSAvi Kivity     section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3967733f0b02SMichael S. Tsirkin 
3968f3705d53SAvi Kivity     if (!memory_region_is_ram(section->mr) || section->readonly) {
3969cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
397037ec01d4SAvi Kivity         if (memory_region_is_ram(section->mr)) {
397137ec01d4SAvi Kivity             section = &phys_sections[phys_section_rom];
397237ec01d4SAvi Kivity         }
39731e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
39741e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
39751e78bcc1SAlexander Graf             val = bswap16(val);
39761e78bcc1SAlexander Graf         }
39771e78bcc1SAlexander Graf #else
39781e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
39791e78bcc1SAlexander Graf             val = bswap16(val);
39801e78bcc1SAlexander Graf         }
39811e78bcc1SAlexander Graf #endif
398237ec01d4SAvi Kivity         io_mem_write(section->mr, addr, val, 2);
3983733f0b02SMichael S. Tsirkin     } else {
3984733f0b02SMichael S. Tsirkin         unsigned long addr1;
3985f3705d53SAvi Kivity         addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
3986cc5bea60SBlue Swirl             + memory_region_section_addr(section, addr);
3987733f0b02SMichael S. Tsirkin         /* RAM case */
3988733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
39891e78bcc1SAlexander Graf         switch (endian) {
39901e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
39911e78bcc1SAlexander Graf             stw_le_p(ptr, val);
39921e78bcc1SAlexander Graf             break;
39931e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
39941e78bcc1SAlexander Graf             stw_be_p(ptr, val);
39951e78bcc1SAlexander Graf             break;
39961e78bcc1SAlexander Graf         default:
3997733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
39981e78bcc1SAlexander Graf             break;
39991e78bcc1SAlexander Graf         }
400051d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 2);
4001733f0b02SMichael S. Tsirkin     }
4002aab33094Sbellard }
4003aab33094Sbellard 
40041e78bcc1SAlexander Graf void stw_phys(target_phys_addr_t addr, uint32_t val)
40051e78bcc1SAlexander Graf {
40061e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
40071e78bcc1SAlexander Graf }
40081e78bcc1SAlexander Graf 
40091e78bcc1SAlexander Graf void stw_le_phys(target_phys_addr_t addr, uint32_t val)
40101e78bcc1SAlexander Graf {
40111e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
40121e78bcc1SAlexander Graf }
40131e78bcc1SAlexander Graf 
40141e78bcc1SAlexander Graf void stw_be_phys(target_phys_addr_t addr, uint32_t val)
40151e78bcc1SAlexander Graf {
40161e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
40171e78bcc1SAlexander Graf }
40181e78bcc1SAlexander Graf 
4019aab33094Sbellard /* XXX: optimize */
4020c227f099SAnthony Liguori void stq_phys(target_phys_addr_t addr, uint64_t val)
4021aab33094Sbellard {
4022aab33094Sbellard     val = tswap64(val);
402371d2b725SStefan Weil     cpu_physical_memory_write(addr, &val, 8);
4024aab33094Sbellard }
4025aab33094Sbellard 
40261e78bcc1SAlexander Graf void stq_le_phys(target_phys_addr_t addr, uint64_t val)
40271e78bcc1SAlexander Graf {
40281e78bcc1SAlexander Graf     val = cpu_to_le64(val);
40291e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
40301e78bcc1SAlexander Graf }
40311e78bcc1SAlexander Graf 
40321e78bcc1SAlexander Graf void stq_be_phys(target_phys_addr_t addr, uint64_t val)
40331e78bcc1SAlexander Graf {
40341e78bcc1SAlexander Graf     val = cpu_to_be64(val);
40351e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
40361e78bcc1SAlexander Graf }
40371e78bcc1SAlexander Graf 
40385e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
40399349b4f9SAndreas Färber int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
4040b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
404113eb76e0Sbellard {
404213eb76e0Sbellard     int l;
4043c227f099SAnthony Liguori     target_phys_addr_t phys_addr;
40449b3c35e0Sj_mayer     target_ulong page;
404513eb76e0Sbellard 
404613eb76e0Sbellard     while (len > 0) {
404713eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
404813eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
404913eb76e0Sbellard         /* if no physical page mapped, return an error */
405013eb76e0Sbellard         if (phys_addr == -1)
405113eb76e0Sbellard             return -1;
405213eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
405313eb76e0Sbellard         if (l > len)
405413eb76e0Sbellard             l = len;
40555e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
40565e2972fdSaliguori         if (is_write)
40575e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
40585e2972fdSaliguori         else
40595e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
406013eb76e0Sbellard         len -= l;
406113eb76e0Sbellard         buf += l;
406213eb76e0Sbellard         addr += l;
406313eb76e0Sbellard     }
406413eb76e0Sbellard     return 0;
406513eb76e0Sbellard }
4066a68fe89cSPaul Brook #endif
406713eb76e0Sbellard 
40682e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
40692e70f6efSpbrook    must be at the end of the TB */
407020503968SBlue Swirl void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
40712e70f6efSpbrook {
40722e70f6efSpbrook     TranslationBlock *tb;
40732e70f6efSpbrook     uint32_t n, cflags;
40742e70f6efSpbrook     target_ulong pc, cs_base;
40752e70f6efSpbrook     uint64_t flags;
40762e70f6efSpbrook 
407720503968SBlue Swirl     tb = tb_find_pc(retaddr);
40782e70f6efSpbrook     if (!tb) {
40792e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
408020503968SBlue Swirl                   (void *)retaddr);
40812e70f6efSpbrook     }
40822e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
408320503968SBlue Swirl     cpu_restore_state(tb, env, retaddr);
40842e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
4085bf20dc07Sths        occurred.  */
40862e70f6efSpbrook     n = n - env->icount_decr.u16.low;
40872e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
40882e70f6efSpbrook     n++;
40892e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
40902e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
4091bf20dc07Sths        the first instruction in a TB then re-execute the preceding
40922e70f6efSpbrook        branch.  */
40932e70f6efSpbrook #if defined(TARGET_MIPS)
40942e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
40952e70f6efSpbrook         env->active_tc.PC -= 4;
40962e70f6efSpbrook         env->icount_decr.u16.low++;
40972e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
40982e70f6efSpbrook     }
40992e70f6efSpbrook #elif defined(TARGET_SH4)
41002e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
41012e70f6efSpbrook             && n > 1) {
41022e70f6efSpbrook         env->pc -= 2;
41032e70f6efSpbrook         env->icount_decr.u16.low++;
41042e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
41052e70f6efSpbrook     }
41062e70f6efSpbrook #endif
41072e70f6efSpbrook     /* This should never happen.  */
41082e70f6efSpbrook     if (n > CF_COUNT_MASK)
41092e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
41102e70f6efSpbrook 
41112e70f6efSpbrook     cflags = n | CF_LAST_IO;
41122e70f6efSpbrook     pc = tb->pc;
41132e70f6efSpbrook     cs_base = tb->cs_base;
41142e70f6efSpbrook     flags = tb->flags;
41152e70f6efSpbrook     tb_phys_invalidate(tb, -1);
41162e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
41172e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
41182e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
4119bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
41202e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
41212e70f6efSpbrook        repeating the fault, which is horribly inefficient.
41222e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
41232e70f6efSpbrook        second new TB.  */
41242e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
41252e70f6efSpbrook }
41262e70f6efSpbrook 
4127b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY)
4128b3755a91SPaul Brook 
4129055403b2SStefan Weil void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4130e3db7226Sbellard {
4131e3db7226Sbellard     int i, target_code_size, max_target_code_size;
4132e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
4133e3db7226Sbellard     TranslationBlock *tb;
4134e3db7226Sbellard 
4135e3db7226Sbellard     target_code_size = 0;
4136e3db7226Sbellard     max_target_code_size = 0;
4137e3db7226Sbellard     cross_page = 0;
4138e3db7226Sbellard     direct_jmp_count = 0;
4139e3db7226Sbellard     direct_jmp2_count = 0;
4140e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
4141e3db7226Sbellard         tb = &tbs[i];
4142e3db7226Sbellard         target_code_size += tb->size;
4143e3db7226Sbellard         if (tb->size > max_target_code_size)
4144e3db7226Sbellard             max_target_code_size = tb->size;
4145e3db7226Sbellard         if (tb->page_addr[1] != -1)
4146e3db7226Sbellard             cross_page++;
4147e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
4148e3db7226Sbellard             direct_jmp_count++;
4149e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
4150e3db7226Sbellard                 direct_jmp2_count++;
4151e3db7226Sbellard             }
4152e3db7226Sbellard         }
4153e3db7226Sbellard     }
4154e3db7226Sbellard     /* XXX: avoid using doubles ? */
415557fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
4156f1bc0bccSRichard Henderson     cpu_fprintf(f, "gen code size       %td/%zd\n",
415726a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
415826a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
415926a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
4160e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4161e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
4162e3db7226Sbellard                 max_target_code_size);
4163055403b2SStefan Weil     cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4164e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4165e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4166e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4167e3db7226Sbellard             cross_page,
4168e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4169e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4170e3db7226Sbellard                 direct_jmp_count,
4171e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4172e3db7226Sbellard                 direct_jmp2_count,
4173e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
417457fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
4175e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4176e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4177e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4178b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
4179e3db7226Sbellard }
4180e3db7226Sbellard 
418182afa586SBenjamin Herrenschmidt /*
418282afa586SBenjamin Herrenschmidt  * A helper function for the _utterly broken_ virtio device model to find out if
418382afa586SBenjamin Herrenschmidt  * it's running on a big endian machine. Don't do this at home kids!
418482afa586SBenjamin Herrenschmidt  */
418582afa586SBenjamin Herrenschmidt bool virtio_is_big_endian(void);
418682afa586SBenjamin Herrenschmidt bool virtio_is_big_endian(void)
418782afa586SBenjamin Herrenschmidt {
418882afa586SBenjamin Herrenschmidt #if defined(TARGET_WORDS_BIGENDIAN)
418982afa586SBenjamin Herrenschmidt     return true;
419082afa586SBenjamin Herrenschmidt #else
419182afa586SBenjamin Herrenschmidt     return false;
419282afa586SBenjamin Herrenschmidt #endif
419382afa586SBenjamin Herrenschmidt }
419482afa586SBenjamin Herrenschmidt 
419561382a50Sbellard #endif
419676f35538SWen Congyang 
419776f35538SWen Congyang #ifndef CONFIG_USER_ONLY
419876f35538SWen Congyang bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
419976f35538SWen Congyang {
420076f35538SWen Congyang     MemoryRegionSection *section;
420176f35538SWen Congyang 
4202ac1970fbSAvi Kivity     section = phys_page_find(address_space_memory.dispatch,
4203ac1970fbSAvi Kivity                              phys_addr >> TARGET_PAGE_BITS);
420476f35538SWen Congyang 
420576f35538SWen Congyang     return !(memory_region_is_ram(section->mr) ||
420676f35538SWen Congyang              memory_region_is_romd(section->mr));
420776f35538SWen Congyang }
420876f35538SWen Congyang #endif
4209