xref: /qemu/system/physmem.c (revision 405def18466d0cbd84e6a0edb598466b0a5e15c3)
154936004Sbellard /*
2fd6ce8f6Sbellard  *  virtual page mapping and translated block handling
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  * This library is free software; you can redistribute it and/or
754936004Sbellard  * modify it under the terms of the GNU Lesser General Public
854936004Sbellard  * License as published by the Free Software Foundation; either
954936004Sbellard  * version 2 of the License, or (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  * This library is distributed in the hope that it will be useful,
1254936004Sbellard  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1454936004Sbellard  * Lesser General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  * You should have received a copy of the GNU Lesser General Public
178167ee88SBlue Swirl  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
1967b915a5Sbellard #include "config.h"
20d5a8f07cSbellard #ifdef _WIN32
21d5a8f07cSbellard #include <windows.h>
22d5a8f07cSbellard #else
23a98d49b1Sbellard #include <sys/types.h>
24d5a8f07cSbellard #include <sys/mman.h>
25d5a8f07cSbellard #endif
2654936004Sbellard 
27055403b2SStefan Weil #include "qemu-common.h"
286180a181Sbellard #include "cpu.h"
29b67d9a52Sbellard #include "tcg.h"
30b3c7724cSpbrook #include "hw/hw.h"
31cc9e98cbSAlex Williamson #include "hw/qdev.h"
3274576198Saliguori #include "osdep.h"
337ba1e619Saliguori #include "kvm.h"
34432d268cSJun Nakajima #include "hw/xen.h"
3529e922b6SBlue Swirl #include "qemu-timer.h"
3662152b8aSAvi Kivity #include "memory.h"
3762152b8aSAvi Kivity #include "exec-memory.h"
3853a5960aSpbrook #if defined(CONFIG_USER_ONLY)
3953a5960aSpbrook #include <qemu.h>
40f01576f1SJuergen Lock #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41f01576f1SJuergen Lock #include <sys/param.h>
42f01576f1SJuergen Lock #if __FreeBSD_version >= 700104
43f01576f1SJuergen Lock #define HAVE_KINFO_GETVMMAP
44f01576f1SJuergen Lock #define sigqueue sigqueue_freebsd  /* avoid redefinition */
45f01576f1SJuergen Lock #include <sys/time.h>
46f01576f1SJuergen Lock #include <sys/proc.h>
47f01576f1SJuergen Lock #include <machine/profile.h>
48f01576f1SJuergen Lock #define _KERNEL
49f01576f1SJuergen Lock #include <sys/user.h>
50f01576f1SJuergen Lock #undef _KERNEL
51f01576f1SJuergen Lock #undef sigqueue
52f01576f1SJuergen Lock #include <libutil.h>
53f01576f1SJuergen Lock #endif
54f01576f1SJuergen Lock #endif
55432d268cSJun Nakajima #else /* !CONFIG_USER_ONLY */
56432d268cSJun Nakajima #include "xen-mapcache.h"
576506e4f9SStefano Stabellini #include "trace.h"
5853a5960aSpbrook #endif
5954936004Sbellard 
600cac1b66SBlue Swirl #include "cputlb.h"
610cac1b66SBlue Swirl 
6267d95c15SAvi Kivity #define WANT_EXEC_OBSOLETE
6367d95c15SAvi Kivity #include "exec-obsolete.h"
6467d95c15SAvi Kivity 
65fd6ce8f6Sbellard //#define DEBUG_TB_INVALIDATE
6666e85a21Sbellard //#define DEBUG_FLUSH
6767d3b957Spbrook //#define DEBUG_UNASSIGNED
68fd6ce8f6Sbellard 
69fd6ce8f6Sbellard /* make various TB consistency checks */
70fd6ce8f6Sbellard //#define DEBUG_TB_CHECK
71fd6ce8f6Sbellard 
721196be37Sths //#define DEBUG_IOPORT
73db7b5426Sblueswir1 //#define DEBUG_SUBPAGE
741196be37Sths 
7599773bd4Spbrook #if !defined(CONFIG_USER_ONLY)
7699773bd4Spbrook /* TB consistency checks only implemented for usermode emulation.  */
7799773bd4Spbrook #undef DEBUG_TB_CHECK
7899773bd4Spbrook #endif
7999773bd4Spbrook 
809fa3e853Sbellard #define SMC_BITMAP_USE_THRESHOLD 10
819fa3e853Sbellard 
82bdaf78e0Sblueswir1 static TranslationBlock *tbs;
8324ab68acSStefan Weil static int code_gen_max_blocks;
849fa3e853Sbellard TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85bdaf78e0Sblueswir1 static int nb_tbs;
86eb51d102Sbellard /* any access to the tbs or the page table must use this lock */
87c227f099SAnthony Liguori spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88fd6ce8f6Sbellard 
899b9c37c3SRichard Henderson #if defined(__arm__) || defined(__sparc__)
90141ac468Sblueswir1 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91141ac468Sblueswir1  have limited branch ranges (possibly also PPC) so place it in a
92d03d860bSblueswir1  section close to code segment. */
93d03d860bSblueswir1 #define code_gen_section                                \
94d03d860bSblueswir1     __attribute__((__section__(".gen_code")))           \
95d03d860bSblueswir1     __attribute__((aligned (32)))
966840981dSStefan Weil #elif defined(_WIN32) && !defined(_WIN64)
97f8e2af11SStefan Weil #define code_gen_section                                \
98f8e2af11SStefan Weil     __attribute__((aligned (16)))
99d03d860bSblueswir1 #else
100d03d860bSblueswir1 #define code_gen_section                                \
101d03d860bSblueswir1     __attribute__((aligned (32)))
102d03d860bSblueswir1 #endif
103d03d860bSblueswir1 
104d03d860bSblueswir1 uint8_t code_gen_prologue[1024] code_gen_section;
105bdaf78e0Sblueswir1 static uint8_t *code_gen_buffer;
106f1bc0bccSRichard Henderson static size_t code_gen_buffer_size;
10726a5f13bSbellard /* threshold to flush the translated code buffer */
108f1bc0bccSRichard Henderson static size_t code_gen_buffer_max_size;
10924ab68acSStefan Weil static uint8_t *code_gen_ptr;
110fd6ce8f6Sbellard 
111e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1129fa3e853Sbellard int phys_ram_fd;
11374576198Saliguori static int in_migration;
11494a6b54fSpbrook 
11585d59fefSPaolo Bonzini RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
11662152b8aSAvi Kivity 
11762152b8aSAvi Kivity static MemoryRegion *system_memory;
118309cb471SAvi Kivity static MemoryRegion *system_io;
11962152b8aSAvi Kivity 
1200e0df1e2SAvi Kivity MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
121de712f94SAvi Kivity static MemoryRegion io_mem_subpage_ram;
1220e0df1e2SAvi Kivity 
123e2eef170Spbrook #endif
1249fa3e853Sbellard 
1259349b4f9SAndreas Färber CPUArchState *first_cpu;
1266a00d601Sbellard /* current CPU in the current thread. It is only valid inside
1276a00d601Sbellard    cpu_exec() */
1289349b4f9SAndreas Färber DEFINE_TLS(CPUArchState *,cpu_single_env);
1292e70f6efSpbrook /* 0 = Do not count executed instructions.
130bf20dc07Sths    1 = Precise instruction counting.
1312e70f6efSpbrook    2 = Adaptive rate instruction counting.  */
1322e70f6efSpbrook int use_icount = 0;
1336a00d601Sbellard 
13454936004Sbellard typedef struct PageDesc {
13592e873b9Sbellard     /* list of TBs intersecting this ram page */
136fd6ce8f6Sbellard     TranslationBlock *first_tb;
1379fa3e853Sbellard     /* in order to optimize self modifying code, we count the number
1389fa3e853Sbellard        of lookups we do to a given page to use a bitmap */
1399fa3e853Sbellard     unsigned int code_write_count;
1409fa3e853Sbellard     uint8_t *code_bitmap;
1419fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
1429fa3e853Sbellard     unsigned long flags;
1439fa3e853Sbellard #endif
14454936004Sbellard } PageDesc;
14554936004Sbellard 
14641c1b1c9SPaul Brook /* In system mode we want L1_MAP to be based on ram offsets,
1475cd2c5b6SRichard Henderson    while in user mode we want it to be based on virtual addresses.  */
1485cd2c5b6SRichard Henderson #if !defined(CONFIG_USER_ONLY)
14941c1b1c9SPaul Brook #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
15041c1b1c9SPaul Brook # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
15141c1b1c9SPaul Brook #else
1525cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
15341c1b1c9SPaul Brook #endif
154bedb69eaSj_mayer #else
1555cd2c5b6SRichard Henderson # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
156bedb69eaSj_mayer #endif
15754936004Sbellard 
1585cd2c5b6SRichard Henderson /* Size of the L2 (and L3, etc) page tables.  */
1595cd2c5b6SRichard Henderson #define L2_BITS 10
16054936004Sbellard #define L2_SIZE (1 << L2_BITS)
16154936004Sbellard 
1623eef53dfSAvi Kivity #define P_L2_LEVELS \
1633eef53dfSAvi Kivity     (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
1643eef53dfSAvi Kivity 
1655cd2c5b6SRichard Henderson /* The bits remaining after N lower levels of page tables.  */
1665cd2c5b6SRichard Henderson #define V_L1_BITS_REM \
1675cd2c5b6SRichard Henderson     ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
1685cd2c5b6SRichard Henderson 
1695cd2c5b6SRichard Henderson #if V_L1_BITS_REM < 4
1705cd2c5b6SRichard Henderson #define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
1715cd2c5b6SRichard Henderson #else
1725cd2c5b6SRichard Henderson #define V_L1_BITS  V_L1_BITS_REM
1735cd2c5b6SRichard Henderson #endif
1745cd2c5b6SRichard Henderson 
1755cd2c5b6SRichard Henderson #define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
1765cd2c5b6SRichard Henderson 
1775cd2c5b6SRichard Henderson #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
1785cd2c5b6SRichard Henderson 
179c6d50674SStefan Weil uintptr_t qemu_real_host_page_size;
180c6d50674SStefan Weil uintptr_t qemu_host_page_size;
181c6d50674SStefan Weil uintptr_t qemu_host_page_mask;
18254936004Sbellard 
1835cd2c5b6SRichard Henderson /* This is a multi-level map on the virtual address space.
1845cd2c5b6SRichard Henderson    The bottom level has pointers to PageDesc.  */
1855cd2c5b6SRichard Henderson static void *l1_map[V_L1_SIZE];
18654936004Sbellard 
187e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
1884346ae3eSAvi Kivity typedef struct PhysPageEntry PhysPageEntry;
1894346ae3eSAvi Kivity 
1905312bd8bSAvi Kivity static MemoryRegionSection *phys_sections;
1915312bd8bSAvi Kivity static unsigned phys_sections_nb, phys_sections_nb_alloc;
1925312bd8bSAvi Kivity static uint16_t phys_section_unassigned;
193aa102231SAvi Kivity static uint16_t phys_section_notdirty;
194aa102231SAvi Kivity static uint16_t phys_section_rom;
195aa102231SAvi Kivity static uint16_t phys_section_watch;
1965312bd8bSAvi Kivity 
1974346ae3eSAvi Kivity struct PhysPageEntry {
19807f07b31SAvi Kivity     uint16_t is_leaf : 1;
19907f07b31SAvi Kivity      /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
20007f07b31SAvi Kivity     uint16_t ptr : 15;
2014346ae3eSAvi Kivity };
2024346ae3eSAvi Kivity 
203d6f2ea22SAvi Kivity /* Simple allocator for PhysPageEntry nodes */
204d6f2ea22SAvi Kivity static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205d6f2ea22SAvi Kivity static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206d6f2ea22SAvi Kivity 
20707f07b31SAvi Kivity #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
208d6f2ea22SAvi Kivity 
2095cd2c5b6SRichard Henderson /* This is a multi-level map on the physical address space.
21006ef3525SAvi Kivity    The bottom level has pointers to MemoryRegionSections.  */
21107f07b31SAvi Kivity static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
2126d9a1304SPaul Brook 
213e2eef170Spbrook static void io_mem_init(void);
21462152b8aSAvi Kivity static void memory_map_init(void);
215e2eef170Spbrook 
2161ec9b909SAvi Kivity static MemoryRegion io_mem_watch;
2176658ffb8Spbrook #endif
21833417e70Sbellard 
219e3db7226Sbellard /* statistics */
220e3db7226Sbellard static int tb_flush_count;
221e3db7226Sbellard static int tb_phys_invalidate_count;
222e3db7226Sbellard 
2237cb69caeSbellard #ifdef _WIN32
2247cb69caeSbellard static void map_exec(void *addr, long size)
2257cb69caeSbellard {
2267cb69caeSbellard     DWORD old_protect;
2277cb69caeSbellard     VirtualProtect(addr, size,
2287cb69caeSbellard                    PAGE_EXECUTE_READWRITE, &old_protect);
2297cb69caeSbellard 
2307cb69caeSbellard }
2317cb69caeSbellard #else
2327cb69caeSbellard static void map_exec(void *addr, long size)
2337cb69caeSbellard {
2344369415fSbellard     unsigned long start, end, page_size;
2357cb69caeSbellard 
2364369415fSbellard     page_size = getpagesize();
2377cb69caeSbellard     start = (unsigned long)addr;
2384369415fSbellard     start &= ~(page_size - 1);
2397cb69caeSbellard 
2407cb69caeSbellard     end = (unsigned long)addr + size;
2414369415fSbellard     end += page_size - 1;
2424369415fSbellard     end &= ~(page_size - 1);
2437cb69caeSbellard 
2447cb69caeSbellard     mprotect((void *)start, end - start,
2457cb69caeSbellard              PROT_READ | PROT_WRITE | PROT_EXEC);
2467cb69caeSbellard }
2477cb69caeSbellard #endif
2487cb69caeSbellard 
249b346ff46Sbellard static void page_init(void)
25054936004Sbellard {
25183fb7adfSbellard     /* NOTE: we can always suppose that qemu_host_page_size >=
25254936004Sbellard        TARGET_PAGE_SIZE */
253c2b48b69Saliguori #ifdef _WIN32
254c2b48b69Saliguori     {
255c2b48b69Saliguori         SYSTEM_INFO system_info;
256c2b48b69Saliguori 
257c2b48b69Saliguori         GetSystemInfo(&system_info);
258c2b48b69Saliguori         qemu_real_host_page_size = system_info.dwPageSize;
259c2b48b69Saliguori     }
260c2b48b69Saliguori #else
261c2b48b69Saliguori     qemu_real_host_page_size = getpagesize();
262c2b48b69Saliguori #endif
26383fb7adfSbellard     if (qemu_host_page_size == 0)
26483fb7adfSbellard         qemu_host_page_size = qemu_real_host_page_size;
26583fb7adfSbellard     if (qemu_host_page_size < TARGET_PAGE_SIZE)
26683fb7adfSbellard         qemu_host_page_size = TARGET_PAGE_SIZE;
26783fb7adfSbellard     qemu_host_page_mask = ~(qemu_host_page_size - 1);
26850a9569bSbalrog 
2692e9a5713SPaul Brook #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
27050a9569bSbalrog     {
271f01576f1SJuergen Lock #ifdef HAVE_KINFO_GETVMMAP
272f01576f1SJuergen Lock         struct kinfo_vmentry *freep;
273f01576f1SJuergen Lock         int i, cnt;
274f01576f1SJuergen Lock 
275f01576f1SJuergen Lock         freep = kinfo_getvmmap(getpid(), &cnt);
276f01576f1SJuergen Lock         if (freep) {
277f01576f1SJuergen Lock             mmap_lock();
278f01576f1SJuergen Lock             for (i = 0; i < cnt; i++) {
279f01576f1SJuergen Lock                 unsigned long startaddr, endaddr;
280f01576f1SJuergen Lock 
281f01576f1SJuergen Lock                 startaddr = freep[i].kve_start;
282f01576f1SJuergen Lock                 endaddr = freep[i].kve_end;
283f01576f1SJuergen Lock                 if (h2g_valid(startaddr)) {
284f01576f1SJuergen Lock                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
285f01576f1SJuergen Lock 
286f01576f1SJuergen Lock                     if (h2g_valid(endaddr)) {
287f01576f1SJuergen Lock                         endaddr = h2g(endaddr);
288fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
289f01576f1SJuergen Lock                     } else {
290f01576f1SJuergen Lock #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
291f01576f1SJuergen Lock                         endaddr = ~0ul;
292fd436907SAurelien Jarno                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
293f01576f1SJuergen Lock #endif
294f01576f1SJuergen Lock                     }
295f01576f1SJuergen Lock                 }
296f01576f1SJuergen Lock             }
297f01576f1SJuergen Lock             free(freep);
298f01576f1SJuergen Lock             mmap_unlock();
299f01576f1SJuergen Lock         }
300f01576f1SJuergen Lock #else
30150a9569bSbalrog         FILE *f;
30250a9569bSbalrog 
3030776590dSpbrook         last_brk = (unsigned long)sbrk(0);
3045cd2c5b6SRichard Henderson 
305fd436907SAurelien Jarno         f = fopen("/compat/linux/proc/self/maps", "r");
30650a9569bSbalrog         if (f) {
3075cd2c5b6SRichard Henderson             mmap_lock();
3085cd2c5b6SRichard Henderson 
30950a9569bSbalrog             do {
3105cd2c5b6SRichard Henderson                 unsigned long startaddr, endaddr;
3115cd2c5b6SRichard Henderson                 int n;
3125cd2c5b6SRichard Henderson 
3135cd2c5b6SRichard Henderson                 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
3145cd2c5b6SRichard Henderson 
3155cd2c5b6SRichard Henderson                 if (n == 2 && h2g_valid(startaddr)) {
3165cd2c5b6SRichard Henderson                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
3175cd2c5b6SRichard Henderson 
3185cd2c5b6SRichard Henderson                     if (h2g_valid(endaddr)) {
3195cd2c5b6SRichard Henderson                         endaddr = h2g(endaddr);
3205cd2c5b6SRichard Henderson                     } else {
3215cd2c5b6SRichard Henderson                         endaddr = ~0ul;
3225cd2c5b6SRichard Henderson                     }
3235cd2c5b6SRichard Henderson                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
32450a9569bSbalrog                 }
32550a9569bSbalrog             } while (!feof(f));
3265cd2c5b6SRichard Henderson 
32750a9569bSbalrog             fclose(f);
328c8a706feSpbrook             mmap_unlock();
32950a9569bSbalrog         }
330f01576f1SJuergen Lock #endif
3315cd2c5b6SRichard Henderson     }
33250a9569bSbalrog #endif
33354936004Sbellard }
33454936004Sbellard 
33541c1b1c9SPaul Brook static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
33654936004Sbellard {
33741c1b1c9SPaul Brook     PageDesc *pd;
33841c1b1c9SPaul Brook     void **lp;
33941c1b1c9SPaul Brook     int i;
34041c1b1c9SPaul Brook 
34117e2377aSpbrook #if defined(CONFIG_USER_ONLY)
3427267c094SAnthony Liguori     /* We can't use g_malloc because it may recurse into a locked mutex. */
3435cd2c5b6SRichard Henderson # define ALLOC(P, SIZE)                                 \
3445cd2c5b6SRichard Henderson     do {                                                \
3455cd2c5b6SRichard Henderson         P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
3465cd2c5b6SRichard Henderson                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
3475cd2c5b6SRichard Henderson     } while (0)
3485cd2c5b6SRichard Henderson #else
3495cd2c5b6SRichard Henderson # define ALLOC(P, SIZE) \
3507267c094SAnthony Liguori     do { P = g_malloc0(SIZE); } while (0)
3515cd2c5b6SRichard Henderson #endif
3525cd2c5b6SRichard Henderson 
3535cd2c5b6SRichard Henderson     /* Level 1.  Always allocated.  */
3545cd2c5b6SRichard Henderson     lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
3555cd2c5b6SRichard Henderson 
3565cd2c5b6SRichard Henderson     /* Level 2..N-1.  */
3575cd2c5b6SRichard Henderson     for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
3585cd2c5b6SRichard Henderson         void **p = *lp;
3595cd2c5b6SRichard Henderson 
3605cd2c5b6SRichard Henderson         if (p == NULL) {
3615cd2c5b6SRichard Henderson             if (!alloc) {
3625cd2c5b6SRichard Henderson                 return NULL;
3635cd2c5b6SRichard Henderson             }
3645cd2c5b6SRichard Henderson             ALLOC(p, sizeof(void *) * L2_SIZE);
36554936004Sbellard             *lp = p;
3665cd2c5b6SRichard Henderson         }
3675cd2c5b6SRichard Henderson 
3685cd2c5b6SRichard Henderson         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
3695cd2c5b6SRichard Henderson     }
3705cd2c5b6SRichard Henderson 
3715cd2c5b6SRichard Henderson     pd = *lp;
3725cd2c5b6SRichard Henderson     if (pd == NULL) {
3735cd2c5b6SRichard Henderson         if (!alloc) {
3745cd2c5b6SRichard Henderson             return NULL;
3755cd2c5b6SRichard Henderson         }
3765cd2c5b6SRichard Henderson         ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
3775cd2c5b6SRichard Henderson         *lp = pd;
3785cd2c5b6SRichard Henderson     }
3795cd2c5b6SRichard Henderson 
3805cd2c5b6SRichard Henderson #undef ALLOC
3815cd2c5b6SRichard Henderson 
3825cd2c5b6SRichard Henderson     return pd + (index & (L2_SIZE - 1));
38354936004Sbellard }
38454936004Sbellard 
38541c1b1c9SPaul Brook static inline PageDesc *page_find(tb_page_addr_t index)
38654936004Sbellard {
3875cd2c5b6SRichard Henderson     return page_find_alloc(index, 0);
38854936004Sbellard }
38954936004Sbellard 
3906d9a1304SPaul Brook #if !defined(CONFIG_USER_ONLY)
391d6f2ea22SAvi Kivity 
392f7bf5461SAvi Kivity static void phys_map_node_reserve(unsigned nodes)
393f7bf5461SAvi Kivity {
394f7bf5461SAvi Kivity     if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
395f7bf5461SAvi Kivity         typedef PhysPageEntry Node[L2_SIZE];
396f7bf5461SAvi Kivity         phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
397f7bf5461SAvi Kivity         phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
398f7bf5461SAvi Kivity                                       phys_map_nodes_nb + nodes);
399f7bf5461SAvi Kivity         phys_map_nodes = g_renew(Node, phys_map_nodes,
400f7bf5461SAvi Kivity                                  phys_map_nodes_nb_alloc);
401f7bf5461SAvi Kivity     }
402f7bf5461SAvi Kivity }
403f7bf5461SAvi Kivity 
404f7bf5461SAvi Kivity static uint16_t phys_map_node_alloc(void)
405d6f2ea22SAvi Kivity {
406d6f2ea22SAvi Kivity     unsigned i;
407d6f2ea22SAvi Kivity     uint16_t ret;
408d6f2ea22SAvi Kivity 
409f7bf5461SAvi Kivity     ret = phys_map_nodes_nb++;
410d6f2ea22SAvi Kivity     assert(ret != PHYS_MAP_NODE_NIL);
411f7bf5461SAvi Kivity     assert(ret != phys_map_nodes_nb_alloc);
412d6f2ea22SAvi Kivity     for (i = 0; i < L2_SIZE; ++i) {
41307f07b31SAvi Kivity         phys_map_nodes[ret][i].is_leaf = 0;
414c19e8800SAvi Kivity         phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
415d6f2ea22SAvi Kivity     }
416f7bf5461SAvi Kivity     return ret;
417d6f2ea22SAvi Kivity }
418d6f2ea22SAvi Kivity 
419d6f2ea22SAvi Kivity static void phys_map_nodes_reset(void)
420d6f2ea22SAvi Kivity {
421d6f2ea22SAvi Kivity     phys_map_nodes_nb = 0;
422d6f2ea22SAvi Kivity }
423d6f2ea22SAvi Kivity 
424f7bf5461SAvi Kivity 
4252999097bSAvi Kivity static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
4262999097bSAvi Kivity                                 target_phys_addr_t *nb, uint16_t leaf,
4272999097bSAvi Kivity                                 int level)
42892e873b9Sbellard {
429f7bf5461SAvi Kivity     PhysPageEntry *p;
430f7bf5461SAvi Kivity     int i;
43107f07b31SAvi Kivity     target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
4325cd2c5b6SRichard Henderson 
43307f07b31SAvi Kivity     if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
434c19e8800SAvi Kivity         lp->ptr = phys_map_node_alloc();
435c19e8800SAvi Kivity         p = phys_map_nodes[lp->ptr];
436f7bf5461SAvi Kivity         if (level == 0) {
437f7bf5461SAvi Kivity             for (i = 0; i < L2_SIZE; i++) {
43807f07b31SAvi Kivity                 p[i].is_leaf = 1;
439c19e8800SAvi Kivity                 p[i].ptr = phys_section_unassigned;
44067c4d23cSpbrook             }
44192e873b9Sbellard         }
442d6f2ea22SAvi Kivity     } else {
443c19e8800SAvi Kivity         p = phys_map_nodes[lp->ptr];
4444346ae3eSAvi Kivity     }
4452999097bSAvi Kivity     lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
446f7bf5461SAvi Kivity 
4472999097bSAvi Kivity     while (*nb && lp < &p[L2_SIZE]) {
44807f07b31SAvi Kivity         if ((*index & (step - 1)) == 0 && *nb >= step) {
44907f07b31SAvi Kivity             lp->is_leaf = true;
450c19e8800SAvi Kivity             lp->ptr = leaf;
45107f07b31SAvi Kivity             *index += step;
45207f07b31SAvi Kivity             *nb -= step;
453f7bf5461SAvi Kivity         } else {
4542999097bSAvi Kivity             phys_page_set_level(lp, index, nb, leaf, level - 1);
4552999097bSAvi Kivity         }
4562999097bSAvi Kivity         ++lp;
457f7bf5461SAvi Kivity     }
4584346ae3eSAvi Kivity }
4595cd2c5b6SRichard Henderson 
4602999097bSAvi Kivity static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
4612999097bSAvi Kivity                           uint16_t leaf)
462f7bf5461SAvi Kivity {
4632999097bSAvi Kivity     /* Wildly overreserve - it doesn't matter much. */
46407f07b31SAvi Kivity     phys_map_node_reserve(3 * P_L2_LEVELS);
465f7bf5461SAvi Kivity 
4662999097bSAvi Kivity     phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
46792e873b9Sbellard }
46892e873b9Sbellard 
4690cac1b66SBlue Swirl MemoryRegionSection *phys_page_find(target_phys_addr_t index)
47092e873b9Sbellard {
47131ab2b4aSAvi Kivity     PhysPageEntry lp = phys_map;
47231ab2b4aSAvi Kivity     PhysPageEntry *p;
47331ab2b4aSAvi Kivity     int i;
47431ab2b4aSAvi Kivity     uint16_t s_index = phys_section_unassigned;
475f1f6e3b8SAvi Kivity 
47607f07b31SAvi Kivity     for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
477c19e8800SAvi Kivity         if (lp.ptr == PHYS_MAP_NODE_NIL) {
47831ab2b4aSAvi Kivity             goto not_found;
479f1f6e3b8SAvi Kivity         }
480c19e8800SAvi Kivity         p = phys_map_nodes[lp.ptr];
48131ab2b4aSAvi Kivity         lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
48231ab2b4aSAvi Kivity     }
48331ab2b4aSAvi Kivity 
484c19e8800SAvi Kivity     s_index = lp.ptr;
48531ab2b4aSAvi Kivity not_found:
486f3705d53SAvi Kivity     return &phys_sections[s_index];
487f3705d53SAvi Kivity }
488f3705d53SAvi Kivity 
489e5548617SBlue Swirl bool memory_region_is_unassigned(MemoryRegion *mr)
490e5548617SBlue Swirl {
491e5548617SBlue Swirl     return mr != &io_mem_ram && mr != &io_mem_rom
492e5548617SBlue Swirl         && mr != &io_mem_notdirty && !mr->rom_device
493e5548617SBlue Swirl         && mr != &io_mem_watch;
494e5548617SBlue Swirl }
495e5548617SBlue Swirl 
496c8a706feSpbrook #define mmap_lock() do { } while(0)
497c8a706feSpbrook #define mmap_unlock() do { } while(0)
4989fa3e853Sbellard #endif
499fd6ce8f6Sbellard 
5004369415fSbellard #if defined(CONFIG_USER_ONLY)
501ccbb4d44SStuart Brady /* Currently it is not recommended to allocate big chunks of data in
502f1bc0bccSRichard Henderson    user mode. It will change when a dedicated libc will be used.  */
503f1bc0bccSRichard Henderson /* ??? 64-bit hosts ought to have no problem mmaping data outside the
504f1bc0bccSRichard Henderson    region in which the guest needs to run.  Revisit this.  */
5054369415fSbellard #define USE_STATIC_CODE_GEN_BUFFER
5064369415fSbellard #endif
5074369415fSbellard 
508f1bc0bccSRichard Henderson /* ??? Should configure for this, not list operating systems here.  */
509f1bc0bccSRichard Henderson #if (defined(__linux__) \
510f1bc0bccSRichard Henderson     || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
511f1bc0bccSRichard Henderson     || defined(__DragonFly__) || defined(__OpenBSD__) \
512f1bc0bccSRichard Henderson     || defined(__NetBSD__))
513f1bc0bccSRichard Henderson # define USE_MMAP
514f1bc0bccSRichard Henderson #endif
515f1bc0bccSRichard Henderson 
516f1bc0bccSRichard Henderson /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
517f1bc0bccSRichard Henderson    indicated, this is constrained by the range of direct branches on the
518f1bc0bccSRichard Henderson    host cpu, as used by the TCG implementation of goto_tb.  */
519f1bc0bccSRichard Henderson #if defined(__x86_64__)
520f1bc0bccSRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
521f1bc0bccSRichard Henderson #elif defined(__sparc__)
522f1bc0bccSRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
523f1bc0bccSRichard Henderson #elif defined(__arm__)
524f1bc0bccSRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE  (16u * 1024 * 1024)
525f1bc0bccSRichard Henderson #elif defined(__s390x__)
526f1bc0bccSRichard Henderson   /* We have a +- 4GB range on the branches; leave some slop.  */
527f1bc0bccSRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
528f1bc0bccSRichard Henderson #else
529f1bc0bccSRichard Henderson # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
530f1bc0bccSRichard Henderson #endif
531f1bc0bccSRichard Henderson 
5323d85a72fSRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
5333d85a72fSRichard Henderson 
5343d85a72fSRichard Henderson #define DEFAULT_CODE_GEN_BUFFER_SIZE \
5353d85a72fSRichard Henderson   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
5363d85a72fSRichard Henderson    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
537f1bc0bccSRichard Henderson 
538f1bc0bccSRichard Henderson static inline size_t size_code_gen_buffer(size_t tb_size)
539f1bc0bccSRichard Henderson {
540f1bc0bccSRichard Henderson     /* Size the buffer.  */
541f1bc0bccSRichard Henderson     if (tb_size == 0) {
542f1bc0bccSRichard Henderson #ifdef USE_STATIC_CODE_GEN_BUFFER
543f1bc0bccSRichard Henderson         tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
544f1bc0bccSRichard Henderson #else
545f1bc0bccSRichard Henderson         /* ??? Needs adjustments.  */
546f1bc0bccSRichard Henderson         /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
547f1bc0bccSRichard Henderson            static buffer, we could size this on RESERVED_VA, on the text
548f1bc0bccSRichard Henderson            segment size of the executable, or continue to use the default.  */
549f1bc0bccSRichard Henderson         tb_size = (unsigned long)(ram_size / 4);
550f1bc0bccSRichard Henderson #endif
551f1bc0bccSRichard Henderson     }
552f1bc0bccSRichard Henderson     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
553f1bc0bccSRichard Henderson         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
554f1bc0bccSRichard Henderson     }
555f1bc0bccSRichard Henderson     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
556f1bc0bccSRichard Henderson         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
557f1bc0bccSRichard Henderson     }
558f1bc0bccSRichard Henderson     code_gen_buffer_size = tb_size;
559f1bc0bccSRichard Henderson     return tb_size;
560f1bc0bccSRichard Henderson }
561f1bc0bccSRichard Henderson 
5624369415fSbellard #ifdef USE_STATIC_CODE_GEN_BUFFER
563ebf50fb3SAurelien Jarno static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
564ebf50fb3SAurelien Jarno     __attribute__((aligned(CODE_GEN_ALIGN)));
5654369415fSbellard 
566f1bc0bccSRichard Henderson static inline void *alloc_code_gen_buffer(void)
56726a5f13bSbellard {
568f1bc0bccSRichard Henderson     map_exec(static_code_gen_buffer, code_gen_buffer_size);
569f1bc0bccSRichard Henderson     return static_code_gen_buffer;
57026a5f13bSbellard }
571f1bc0bccSRichard Henderson #elif defined(USE_MMAP)
572f1bc0bccSRichard Henderson static inline void *alloc_code_gen_buffer(void)
57326a5f13bSbellard {
574f1bc0bccSRichard Henderson     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
575f1bc0bccSRichard Henderson     uintptr_t start = 0;
576f1bc0bccSRichard Henderson     void *buf;
577141ac468Sblueswir1 
578f1bc0bccSRichard Henderson     /* Constrain the position of the buffer based on the host cpu.
579f1bc0bccSRichard Henderson        Note that these addresses are chosen in concert with the
580f1bc0bccSRichard Henderson        addresses assigned in the relevant linker script file.  */
581405def18SRichard Henderson # if defined(__PIE__) || defined(__PIC__)
582405def18SRichard Henderson     /* Don't bother setting a preferred location if we're building
583405def18SRichard Henderson        a position-independent executable.  We're more likely to get
584405def18SRichard Henderson        an address near the main executable if we let the kernel
585405def18SRichard Henderson        choose the address.  */
586405def18SRichard Henderson # elif defined(__x86_64__) && defined(MAP_32BIT)
587f1bc0bccSRichard Henderson     /* Force the memory down into low memory with the executable.
588f1bc0bccSRichard Henderson        Leave the choice of exact location with the kernel.  */
58926a5f13bSbellard     flags |= MAP_32BIT;
590f1bc0bccSRichard Henderson     /* Cannot expect to map more than 800MB in low memory.  */
591f1bc0bccSRichard Henderson     if (code_gen_buffer_size > 800u * 1024 * 1024) {
592f1bc0bccSRichard Henderson         code_gen_buffer_size = 800u * 1024 * 1024;
593f1bc0bccSRichard Henderson     }
594f1bc0bccSRichard Henderson # elif defined(__sparc__)
595f1bc0bccSRichard Henderson     start = 0x40000000ul;
596eba0b893SRichard Henderson # elif defined(__s390x__)
597f1bc0bccSRichard Henderson     start = 0x90000000ul;
59826a5f13bSbellard # endif
599f1bc0bccSRichard Henderson 
600f1bc0bccSRichard Henderson     buf = mmap((void *)start, code_gen_buffer_size,
601f1bc0bccSRichard Henderson                PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
602f1bc0bccSRichard Henderson     return buf == MAP_FAILED ? NULL : buf;
60306e67a82Saliguori }
60426a5f13bSbellard #else
605f1bc0bccSRichard Henderson static inline void *alloc_code_gen_buffer(void)
606f1bc0bccSRichard Henderson {
607f1bc0bccSRichard Henderson     void *buf = g_malloc(code_gen_buffer_size);
608f1bc0bccSRichard Henderson     if (buf) {
609f1bc0bccSRichard Henderson         map_exec(buf, code_gen_buffer_size);
610f1bc0bccSRichard Henderson     }
611f1bc0bccSRichard Henderson     return buf;
612f1bc0bccSRichard Henderson }
613f1bc0bccSRichard Henderson #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
614f1bc0bccSRichard Henderson 
615f1bc0bccSRichard Henderson static inline void code_gen_alloc(size_t tb_size)
616f1bc0bccSRichard Henderson {
617f1bc0bccSRichard Henderson     code_gen_buffer_size = size_code_gen_buffer(tb_size);
618f1bc0bccSRichard Henderson     code_gen_buffer = alloc_code_gen_buffer();
619f1bc0bccSRichard Henderson     if (code_gen_buffer == NULL) {
620f1bc0bccSRichard Henderson         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
621f1bc0bccSRichard Henderson         exit(1);
622f1bc0bccSRichard Henderson     }
623f1bc0bccSRichard Henderson 
62426a5f13bSbellard     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
62526a5f13bSbellard     code_gen_buffer_max_size = code_gen_buffer_size -
626a884da8aSPeter Maydell         (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
62726a5f13bSbellard     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
6287267c094SAnthony Liguori     tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
62926a5f13bSbellard }
63026a5f13bSbellard 
63126a5f13bSbellard /* Must be called before using the QEMU cpus. 'tb_size' is the size
63226a5f13bSbellard    (in bytes) allocated to the translation buffer. Zero means default
63326a5f13bSbellard    size. */
634d5ab9713SJan Kiszka void tcg_exec_init(unsigned long tb_size)
63526a5f13bSbellard {
63626a5f13bSbellard     cpu_gen_init();
63726a5f13bSbellard     code_gen_alloc(tb_size);
63826a5f13bSbellard     code_gen_ptr = code_gen_buffer;
639813da627SRichard Henderson     tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
6404369415fSbellard     page_init();
6419002ec79SRichard Henderson #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
6429002ec79SRichard Henderson     /* There's no guest base to take into account, so go ahead and
6439002ec79SRichard Henderson        initialize the prologue now.  */
6449002ec79SRichard Henderson     tcg_prologue_init(&tcg_ctx);
6459002ec79SRichard Henderson #endif
64626a5f13bSbellard }
64726a5f13bSbellard 
648d5ab9713SJan Kiszka bool tcg_enabled(void)
649d5ab9713SJan Kiszka {
650d5ab9713SJan Kiszka     return code_gen_buffer != NULL;
651d5ab9713SJan Kiszka }
652d5ab9713SJan Kiszka 
653d5ab9713SJan Kiszka void cpu_exec_init_all(void)
654d5ab9713SJan Kiszka {
655d5ab9713SJan Kiszka #if !defined(CONFIG_USER_ONLY)
656d5ab9713SJan Kiszka     memory_map_init();
657d5ab9713SJan Kiszka     io_mem_init();
658d5ab9713SJan Kiszka #endif
659d5ab9713SJan Kiszka }
660d5ab9713SJan Kiszka 
6619656f324Spbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
6629656f324Spbrook 
663e59fb374SJuan Quintela static int cpu_common_post_load(void *opaque, int version_id)
664e7f4eff7SJuan Quintela {
6659349b4f9SAndreas Färber     CPUArchState *env = opaque;
666e7f4eff7SJuan Quintela 
6673098dba0Saurel32     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
6683098dba0Saurel32        version_id is increased. */
6693098dba0Saurel32     env->interrupt_request &= ~0x01;
6709656f324Spbrook     tlb_flush(env, 1);
6719656f324Spbrook 
6729656f324Spbrook     return 0;
6739656f324Spbrook }
674e7f4eff7SJuan Quintela 
675e7f4eff7SJuan Quintela static const VMStateDescription vmstate_cpu_common = {
676e7f4eff7SJuan Quintela     .name = "cpu_common",
677e7f4eff7SJuan Quintela     .version_id = 1,
678e7f4eff7SJuan Quintela     .minimum_version_id = 1,
679e7f4eff7SJuan Quintela     .minimum_version_id_old = 1,
680e7f4eff7SJuan Quintela     .post_load = cpu_common_post_load,
681e7f4eff7SJuan Quintela     .fields      = (VMStateField []) {
6829349b4f9SAndreas Färber         VMSTATE_UINT32(halted, CPUArchState),
6839349b4f9SAndreas Färber         VMSTATE_UINT32(interrupt_request, CPUArchState),
684e7f4eff7SJuan Quintela         VMSTATE_END_OF_LIST()
685e7f4eff7SJuan Quintela     }
686e7f4eff7SJuan Quintela };
6879656f324Spbrook #endif
6889656f324Spbrook 
6899349b4f9SAndreas Färber CPUArchState *qemu_get_cpu(int cpu)
690950f1472SGlauber Costa {
6919349b4f9SAndreas Färber     CPUArchState *env = first_cpu;
692950f1472SGlauber Costa 
693950f1472SGlauber Costa     while (env) {
694950f1472SGlauber Costa         if (env->cpu_index == cpu)
695950f1472SGlauber Costa             break;
696950f1472SGlauber Costa         env = env->next_cpu;
697950f1472SGlauber Costa     }
698950f1472SGlauber Costa 
699950f1472SGlauber Costa     return env;
700950f1472SGlauber Costa }
701950f1472SGlauber Costa 
7029349b4f9SAndreas Färber void cpu_exec_init(CPUArchState *env)
703fd6ce8f6Sbellard {
7049349b4f9SAndreas Färber     CPUArchState **penv;
7056a00d601Sbellard     int cpu_index;
7066a00d601Sbellard 
707c2764719Spbrook #if defined(CONFIG_USER_ONLY)
708c2764719Spbrook     cpu_list_lock();
709c2764719Spbrook #endif
7106a00d601Sbellard     env->next_cpu = NULL;
7116a00d601Sbellard     penv = &first_cpu;
7126a00d601Sbellard     cpu_index = 0;
7136a00d601Sbellard     while (*penv != NULL) {
7141e9fa730SNathan Froyd         penv = &(*penv)->next_cpu;
7156a00d601Sbellard         cpu_index++;
7166a00d601Sbellard     }
7176a00d601Sbellard     env->cpu_index = cpu_index;
718268a362cSaliguori     env->numa_node = 0;
71972cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
72072cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
721dc7a09cfSJan Kiszka #ifndef CONFIG_USER_ONLY
722dc7a09cfSJan Kiszka     env->thread_id = qemu_get_thread_id();
723dc7a09cfSJan Kiszka #endif
7246a00d601Sbellard     *penv = env;
725c2764719Spbrook #if defined(CONFIG_USER_ONLY)
726c2764719Spbrook     cpu_list_unlock();
727c2764719Spbrook #endif
728b3c7724cSpbrook #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
7290be71e32SAlex Williamson     vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
7300be71e32SAlex Williamson     register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
731b3c7724cSpbrook                     cpu_save, cpu_load, env);
732b3c7724cSpbrook #endif
733fd6ce8f6Sbellard }
734fd6ce8f6Sbellard 
735d1a1eb74STristan Gingold /* Allocate a new translation block. Flush the translation buffer if
736d1a1eb74STristan Gingold    too many translation blocks or too much generated code. */
737d1a1eb74STristan Gingold static TranslationBlock *tb_alloc(target_ulong pc)
738d1a1eb74STristan Gingold {
739d1a1eb74STristan Gingold     TranslationBlock *tb;
740d1a1eb74STristan Gingold 
741d1a1eb74STristan Gingold     if (nb_tbs >= code_gen_max_blocks ||
742d1a1eb74STristan Gingold         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
743d1a1eb74STristan Gingold         return NULL;
744d1a1eb74STristan Gingold     tb = &tbs[nb_tbs++];
745d1a1eb74STristan Gingold     tb->pc = pc;
746d1a1eb74STristan Gingold     tb->cflags = 0;
747d1a1eb74STristan Gingold     return tb;
748d1a1eb74STristan Gingold }
749d1a1eb74STristan Gingold 
750d1a1eb74STristan Gingold void tb_free(TranslationBlock *tb)
751d1a1eb74STristan Gingold {
752d1a1eb74STristan Gingold     /* In practice this is mostly used for single use temporary TB
753d1a1eb74STristan Gingold        Ignore the hard cases and just back up if this TB happens to
754d1a1eb74STristan Gingold        be the last one generated.  */
755d1a1eb74STristan Gingold     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
756d1a1eb74STristan Gingold         code_gen_ptr = tb->tc_ptr;
757d1a1eb74STristan Gingold         nb_tbs--;
758d1a1eb74STristan Gingold     }
759d1a1eb74STristan Gingold }
760d1a1eb74STristan Gingold 
7619fa3e853Sbellard static inline void invalidate_page_bitmap(PageDesc *p)
7629fa3e853Sbellard {
7639fa3e853Sbellard     if (p->code_bitmap) {
7647267c094SAnthony Liguori         g_free(p->code_bitmap);
7659fa3e853Sbellard         p->code_bitmap = NULL;
7669fa3e853Sbellard     }
7679fa3e853Sbellard     p->code_write_count = 0;
7689fa3e853Sbellard }
7699fa3e853Sbellard 
7705cd2c5b6SRichard Henderson /* Set to NULL all the 'first_tb' fields in all PageDescs. */
7715cd2c5b6SRichard Henderson 
7725cd2c5b6SRichard Henderson static void page_flush_tb_1 (int level, void **lp)
7735cd2c5b6SRichard Henderson {
7745cd2c5b6SRichard Henderson     int i;
7755cd2c5b6SRichard Henderson 
7765cd2c5b6SRichard Henderson     if (*lp == NULL) {
7775cd2c5b6SRichard Henderson         return;
7785cd2c5b6SRichard Henderson     }
7795cd2c5b6SRichard Henderson     if (level == 0) {
7805cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
7817296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
7825cd2c5b6SRichard Henderson             pd[i].first_tb = NULL;
7835cd2c5b6SRichard Henderson             invalidate_page_bitmap(pd + i);
7845cd2c5b6SRichard Henderson         }
7855cd2c5b6SRichard Henderson     } else {
7865cd2c5b6SRichard Henderson         void **pp = *lp;
7877296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
7885cd2c5b6SRichard Henderson             page_flush_tb_1 (level - 1, pp + i);
7895cd2c5b6SRichard Henderson         }
7905cd2c5b6SRichard Henderson     }
7915cd2c5b6SRichard Henderson }
7925cd2c5b6SRichard Henderson 
793fd6ce8f6Sbellard static void page_flush_tb(void)
794fd6ce8f6Sbellard {
7955cd2c5b6SRichard Henderson     int i;
7965cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
7975cd2c5b6SRichard Henderson         page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
798fd6ce8f6Sbellard     }
799fd6ce8f6Sbellard }
800fd6ce8f6Sbellard 
801fd6ce8f6Sbellard /* flush all the translation blocks */
802d4e8164fSbellard /* XXX: tb_flush is currently not thread safe */
8039349b4f9SAndreas Färber void tb_flush(CPUArchState *env1)
804fd6ce8f6Sbellard {
8059349b4f9SAndreas Färber     CPUArchState *env;
8060124311eSbellard #if defined(DEBUG_FLUSH)
807ab3d1727Sblueswir1     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
808ab3d1727Sblueswir1            (unsigned long)(code_gen_ptr - code_gen_buffer),
809ab3d1727Sblueswir1            nb_tbs, nb_tbs > 0 ?
810ab3d1727Sblueswir1            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
811fd6ce8f6Sbellard #endif
81226a5f13bSbellard     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
813a208e54aSpbrook         cpu_abort(env1, "Internal error: code buffer overflow\n");
814a208e54aSpbrook 
815fd6ce8f6Sbellard     nb_tbs = 0;
8166a00d601Sbellard 
8176a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
8188a40a180Sbellard         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
8196a00d601Sbellard     }
8209fa3e853Sbellard 
8218a8a608fSbellard     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
822fd6ce8f6Sbellard     page_flush_tb();
8239fa3e853Sbellard 
824fd6ce8f6Sbellard     code_gen_ptr = code_gen_buffer;
825d4e8164fSbellard     /* XXX: flush processor icache at this point if cache flush is
826d4e8164fSbellard        expensive */
827e3db7226Sbellard     tb_flush_count++;
828fd6ce8f6Sbellard }
829fd6ce8f6Sbellard 
830fd6ce8f6Sbellard #ifdef DEBUG_TB_CHECK
831fd6ce8f6Sbellard 
832bc98a7efSj_mayer static void tb_invalidate_check(target_ulong address)
833fd6ce8f6Sbellard {
834fd6ce8f6Sbellard     TranslationBlock *tb;
835fd6ce8f6Sbellard     int i;
836fd6ce8f6Sbellard     address &= TARGET_PAGE_MASK;
83799773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
83899773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
839fd6ce8f6Sbellard             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
840fd6ce8f6Sbellard                   address >= tb->pc + tb->size)) {
8410bf9e31aSBlue Swirl                 printf("ERROR invalidate: address=" TARGET_FMT_lx
8420bf9e31aSBlue Swirl                        " PC=%08lx size=%04x\n",
84399773bd4Spbrook                        address, (long)tb->pc, tb->size);
844fd6ce8f6Sbellard             }
845fd6ce8f6Sbellard         }
846fd6ce8f6Sbellard     }
847fd6ce8f6Sbellard }
848fd6ce8f6Sbellard 
849fd6ce8f6Sbellard /* verify that all the pages have correct rights for code */
850fd6ce8f6Sbellard static void tb_page_check(void)
851fd6ce8f6Sbellard {
852fd6ce8f6Sbellard     TranslationBlock *tb;
853fd6ce8f6Sbellard     int i, flags1, flags2;
854fd6ce8f6Sbellard 
85599773bd4Spbrook     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
85699773bd4Spbrook         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
857fd6ce8f6Sbellard             flags1 = page_get_flags(tb->pc);
858fd6ce8f6Sbellard             flags2 = page_get_flags(tb->pc + tb->size - 1);
859fd6ce8f6Sbellard             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
860fd6ce8f6Sbellard                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
86199773bd4Spbrook                        (long)tb->pc, tb->size, flags1, flags2);
862fd6ce8f6Sbellard             }
863fd6ce8f6Sbellard         }
864fd6ce8f6Sbellard     }
865fd6ce8f6Sbellard }
866fd6ce8f6Sbellard 
867fd6ce8f6Sbellard #endif
868fd6ce8f6Sbellard 
869fd6ce8f6Sbellard /* invalidate one TB */
870fd6ce8f6Sbellard static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
871fd6ce8f6Sbellard                              int next_offset)
872fd6ce8f6Sbellard {
873fd6ce8f6Sbellard     TranslationBlock *tb1;
874fd6ce8f6Sbellard     for(;;) {
875fd6ce8f6Sbellard         tb1 = *ptb;
876fd6ce8f6Sbellard         if (tb1 == tb) {
877fd6ce8f6Sbellard             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
878fd6ce8f6Sbellard             break;
879fd6ce8f6Sbellard         }
880fd6ce8f6Sbellard         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
881fd6ce8f6Sbellard     }
882fd6ce8f6Sbellard }
883fd6ce8f6Sbellard 
8849fa3e853Sbellard static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
8859fa3e853Sbellard {
8869fa3e853Sbellard     TranslationBlock *tb1;
8879fa3e853Sbellard     unsigned int n1;
8889fa3e853Sbellard 
8899fa3e853Sbellard     for(;;) {
8909fa3e853Sbellard         tb1 = *ptb;
8918efe0ca8SStefan Weil         n1 = (uintptr_t)tb1 & 3;
8928efe0ca8SStefan Weil         tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
8939fa3e853Sbellard         if (tb1 == tb) {
8949fa3e853Sbellard             *ptb = tb1->page_next[n1];
8959fa3e853Sbellard             break;
8969fa3e853Sbellard         }
8979fa3e853Sbellard         ptb = &tb1->page_next[n1];
8989fa3e853Sbellard     }
8999fa3e853Sbellard }
9009fa3e853Sbellard 
901d4e8164fSbellard static inline void tb_jmp_remove(TranslationBlock *tb, int n)
902d4e8164fSbellard {
903d4e8164fSbellard     TranslationBlock *tb1, **ptb;
904d4e8164fSbellard     unsigned int n1;
905d4e8164fSbellard 
906d4e8164fSbellard     ptb = &tb->jmp_next[n];
907d4e8164fSbellard     tb1 = *ptb;
908d4e8164fSbellard     if (tb1) {
909d4e8164fSbellard         /* find tb(n) in circular list */
910d4e8164fSbellard         for(;;) {
911d4e8164fSbellard             tb1 = *ptb;
9128efe0ca8SStefan Weil             n1 = (uintptr_t)tb1 & 3;
9138efe0ca8SStefan Weil             tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
914d4e8164fSbellard             if (n1 == n && tb1 == tb)
915d4e8164fSbellard                 break;
916d4e8164fSbellard             if (n1 == 2) {
917d4e8164fSbellard                 ptb = &tb1->jmp_first;
918d4e8164fSbellard             } else {
919d4e8164fSbellard                 ptb = &tb1->jmp_next[n1];
920d4e8164fSbellard             }
921d4e8164fSbellard         }
922d4e8164fSbellard         /* now we can suppress tb(n) from the list */
923d4e8164fSbellard         *ptb = tb->jmp_next[n];
924d4e8164fSbellard 
925d4e8164fSbellard         tb->jmp_next[n] = NULL;
926d4e8164fSbellard     }
927d4e8164fSbellard }
928d4e8164fSbellard 
929d4e8164fSbellard /* reset the jump entry 'n' of a TB so that it is not chained to
930d4e8164fSbellard    another TB */
931d4e8164fSbellard static inline void tb_reset_jump(TranslationBlock *tb, int n)
932d4e8164fSbellard {
9338efe0ca8SStefan Weil     tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
934d4e8164fSbellard }
935d4e8164fSbellard 
93641c1b1c9SPaul Brook void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
937fd6ce8f6Sbellard {
9389349b4f9SAndreas Färber     CPUArchState *env;
939fd6ce8f6Sbellard     PageDesc *p;
9408a40a180Sbellard     unsigned int h, n1;
94141c1b1c9SPaul Brook     tb_page_addr_t phys_pc;
9428a40a180Sbellard     TranslationBlock *tb1, *tb2;
943fd6ce8f6Sbellard 
9449fa3e853Sbellard     /* remove the TB from the hash list */
9459fa3e853Sbellard     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
9469fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
9479fa3e853Sbellard     tb_remove(&tb_phys_hash[h], tb,
9489fa3e853Sbellard               offsetof(TranslationBlock, phys_hash_next));
9499fa3e853Sbellard 
9509fa3e853Sbellard     /* remove the TB from the page list */
9519fa3e853Sbellard     if (tb->page_addr[0] != page_addr) {
9529fa3e853Sbellard         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
9539fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
9549fa3e853Sbellard         invalidate_page_bitmap(p);
9559fa3e853Sbellard     }
9569fa3e853Sbellard     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
9579fa3e853Sbellard         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
9589fa3e853Sbellard         tb_page_remove(&p->first_tb, tb);
9599fa3e853Sbellard         invalidate_page_bitmap(p);
9609fa3e853Sbellard     }
9619fa3e853Sbellard 
9628a40a180Sbellard     tb_invalidated_flag = 1;
9638a40a180Sbellard 
9648a40a180Sbellard     /* remove the TB from the hash list */
9658a40a180Sbellard     h = tb_jmp_cache_hash_func(tb->pc);
9666a00d601Sbellard     for(env = first_cpu; env != NULL; env = env->next_cpu) {
9676a00d601Sbellard         if (env->tb_jmp_cache[h] == tb)
9686a00d601Sbellard             env->tb_jmp_cache[h] = NULL;
9696a00d601Sbellard     }
9708a40a180Sbellard 
9718a40a180Sbellard     /* suppress this TB from the two jump lists */
9728a40a180Sbellard     tb_jmp_remove(tb, 0);
9738a40a180Sbellard     tb_jmp_remove(tb, 1);
9748a40a180Sbellard 
9758a40a180Sbellard     /* suppress any remaining jumps to this TB */
9768a40a180Sbellard     tb1 = tb->jmp_first;
9778a40a180Sbellard     for(;;) {
9788efe0ca8SStefan Weil         n1 = (uintptr_t)tb1 & 3;
9798a40a180Sbellard         if (n1 == 2)
9808a40a180Sbellard             break;
9818efe0ca8SStefan Weil         tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
9828a40a180Sbellard         tb2 = tb1->jmp_next[n1];
9838a40a180Sbellard         tb_reset_jump(tb1, n1);
9848a40a180Sbellard         tb1->jmp_next[n1] = NULL;
9858a40a180Sbellard         tb1 = tb2;
9868a40a180Sbellard     }
9878efe0ca8SStefan Weil     tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
9888a40a180Sbellard 
989e3db7226Sbellard     tb_phys_invalidate_count++;
9909fa3e853Sbellard }
9919fa3e853Sbellard 
9929fa3e853Sbellard static inline void set_bits(uint8_t *tab, int start, int len)
9939fa3e853Sbellard {
9949fa3e853Sbellard     int end, mask, end1;
9959fa3e853Sbellard 
9969fa3e853Sbellard     end = start + len;
9979fa3e853Sbellard     tab += start >> 3;
9989fa3e853Sbellard     mask = 0xff << (start & 7);
9999fa3e853Sbellard     if ((start & ~7) == (end & ~7)) {
10009fa3e853Sbellard         if (start < end) {
10019fa3e853Sbellard             mask &= ~(0xff << (end & 7));
10029fa3e853Sbellard             *tab |= mask;
10039fa3e853Sbellard         }
10049fa3e853Sbellard     } else {
10059fa3e853Sbellard         *tab++ |= mask;
10069fa3e853Sbellard         start = (start + 8) & ~7;
10079fa3e853Sbellard         end1 = end & ~7;
10089fa3e853Sbellard         while (start < end1) {
10099fa3e853Sbellard             *tab++ = 0xff;
10109fa3e853Sbellard             start += 8;
10119fa3e853Sbellard         }
10129fa3e853Sbellard         if (start < end) {
10139fa3e853Sbellard             mask = ~(0xff << (end & 7));
10149fa3e853Sbellard             *tab |= mask;
10159fa3e853Sbellard         }
10169fa3e853Sbellard     }
10179fa3e853Sbellard }
10189fa3e853Sbellard 
10199fa3e853Sbellard static void build_page_bitmap(PageDesc *p)
10209fa3e853Sbellard {
10219fa3e853Sbellard     int n, tb_start, tb_end;
10229fa3e853Sbellard     TranslationBlock *tb;
10239fa3e853Sbellard 
10247267c094SAnthony Liguori     p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
10259fa3e853Sbellard 
10269fa3e853Sbellard     tb = p->first_tb;
10279fa3e853Sbellard     while (tb != NULL) {
10288efe0ca8SStefan Weil         n = (uintptr_t)tb & 3;
10298efe0ca8SStefan Weil         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
10309fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
10319fa3e853Sbellard         if (n == 0) {
10329fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
10339fa3e853Sbellard                it is not a problem */
10349fa3e853Sbellard             tb_start = tb->pc & ~TARGET_PAGE_MASK;
10359fa3e853Sbellard             tb_end = tb_start + tb->size;
10369fa3e853Sbellard             if (tb_end > TARGET_PAGE_SIZE)
10379fa3e853Sbellard                 tb_end = TARGET_PAGE_SIZE;
10389fa3e853Sbellard         } else {
10399fa3e853Sbellard             tb_start = 0;
10409fa3e853Sbellard             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
10419fa3e853Sbellard         }
10429fa3e853Sbellard         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
10439fa3e853Sbellard         tb = tb->page_next[n];
10449fa3e853Sbellard     }
10459fa3e853Sbellard }
10469fa3e853Sbellard 
10479349b4f9SAndreas Färber TranslationBlock *tb_gen_code(CPUArchState *env,
10482e70f6efSpbrook                               target_ulong pc, target_ulong cs_base,
10492e70f6efSpbrook                               int flags, int cflags)
1050d720b93dSbellard {
1051d720b93dSbellard     TranslationBlock *tb;
1052d720b93dSbellard     uint8_t *tc_ptr;
105341c1b1c9SPaul Brook     tb_page_addr_t phys_pc, phys_page2;
105441c1b1c9SPaul Brook     target_ulong virt_page2;
1055d720b93dSbellard     int code_gen_size;
1056d720b93dSbellard 
105741c1b1c9SPaul Brook     phys_pc = get_page_addr_code(env, pc);
1058c27004ecSbellard     tb = tb_alloc(pc);
1059d720b93dSbellard     if (!tb) {
1060d720b93dSbellard         /* flush must be done */
1061d720b93dSbellard         tb_flush(env);
1062d720b93dSbellard         /* cannot fail at this point */
1063c27004ecSbellard         tb = tb_alloc(pc);
10642e70f6efSpbrook         /* Don't forget to invalidate previous TB info.  */
10652e70f6efSpbrook         tb_invalidated_flag = 1;
1066d720b93dSbellard     }
1067d720b93dSbellard     tc_ptr = code_gen_ptr;
1068d720b93dSbellard     tb->tc_ptr = tc_ptr;
1069d720b93dSbellard     tb->cs_base = cs_base;
1070d720b93dSbellard     tb->flags = flags;
1071d720b93dSbellard     tb->cflags = cflags;
1072d07bde88Sblueswir1     cpu_gen_code(env, tb, &code_gen_size);
10738efe0ca8SStefan Weil     code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
10748efe0ca8SStefan Weil                              CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1075d720b93dSbellard 
1076d720b93dSbellard     /* check next page if needed */
1077c27004ecSbellard     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1078d720b93dSbellard     phys_page2 = -1;
1079c27004ecSbellard     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
108041c1b1c9SPaul Brook         phys_page2 = get_page_addr_code(env, virt_page2);
1081d720b93dSbellard     }
108241c1b1c9SPaul Brook     tb_link_page(tb, phys_pc, phys_page2);
10832e70f6efSpbrook     return tb;
1084d720b93dSbellard }
1085d720b93dSbellard 
108677a8f1a5SAlexander Graf /*
10878e0fdce3SJan Kiszka  * Invalidate all TBs which intersect with the target physical address range
10888e0fdce3SJan Kiszka  * [start;end[. NOTE: start and end may refer to *different* physical pages.
10898e0fdce3SJan Kiszka  * 'is_cpu_write_access' should be true if called from a real cpu write
10908e0fdce3SJan Kiszka  * access: the virtual CPU will exit the current TB if code is modified inside
10918e0fdce3SJan Kiszka  * this TB.
109277a8f1a5SAlexander Graf  */
109377a8f1a5SAlexander Graf void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
109477a8f1a5SAlexander Graf                               int is_cpu_write_access)
109577a8f1a5SAlexander Graf {
109677a8f1a5SAlexander Graf     while (start < end) {
109777a8f1a5SAlexander Graf         tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
109877a8f1a5SAlexander Graf         start &= TARGET_PAGE_MASK;
109977a8f1a5SAlexander Graf         start += TARGET_PAGE_SIZE;
110077a8f1a5SAlexander Graf     }
110177a8f1a5SAlexander Graf }
110277a8f1a5SAlexander Graf 
11038e0fdce3SJan Kiszka /*
11048e0fdce3SJan Kiszka  * Invalidate all TBs which intersect with the target physical address range
11058e0fdce3SJan Kiszka  * [start;end[. NOTE: start and end must refer to the *same* physical page.
11068e0fdce3SJan Kiszka  * 'is_cpu_write_access' should be true if called from a real cpu write
11078e0fdce3SJan Kiszka  * access: the virtual CPU will exit the current TB if code is modified inside
11088e0fdce3SJan Kiszka  * this TB.
11098e0fdce3SJan Kiszka  */
111041c1b1c9SPaul Brook void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1111d720b93dSbellard                                    int is_cpu_write_access)
11129fa3e853Sbellard {
11136b917547Saliguori     TranslationBlock *tb, *tb_next, *saved_tb;
11149349b4f9SAndreas Färber     CPUArchState *env = cpu_single_env;
111541c1b1c9SPaul Brook     tb_page_addr_t tb_start, tb_end;
11166b917547Saliguori     PageDesc *p;
11176b917547Saliguori     int n;
11186b917547Saliguori #ifdef TARGET_HAS_PRECISE_SMC
11196b917547Saliguori     int current_tb_not_found = is_cpu_write_access;
11206b917547Saliguori     TranslationBlock *current_tb = NULL;
11216b917547Saliguori     int current_tb_modified = 0;
11226b917547Saliguori     target_ulong current_pc = 0;
11236b917547Saliguori     target_ulong current_cs_base = 0;
11246b917547Saliguori     int current_flags = 0;
11256b917547Saliguori #endif /* TARGET_HAS_PRECISE_SMC */
11269fa3e853Sbellard 
11279fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
11289fa3e853Sbellard     if (!p)
11299fa3e853Sbellard         return;
11309fa3e853Sbellard     if (!p->code_bitmap &&
1131d720b93dSbellard         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1132d720b93dSbellard         is_cpu_write_access) {
11339fa3e853Sbellard         /* build code bitmap */
11349fa3e853Sbellard         build_page_bitmap(p);
11359fa3e853Sbellard     }
11369fa3e853Sbellard 
11379fa3e853Sbellard     /* we remove all the TBs in the range [start, end[ */
11389fa3e853Sbellard     /* XXX: see if in some cases it could be faster to invalidate all the code */
11399fa3e853Sbellard     tb = p->first_tb;
11409fa3e853Sbellard     while (tb != NULL) {
11418efe0ca8SStefan Weil         n = (uintptr_t)tb & 3;
11428efe0ca8SStefan Weil         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
11439fa3e853Sbellard         tb_next = tb->page_next[n];
11449fa3e853Sbellard         /* NOTE: this is subtle as a TB may span two physical pages */
11459fa3e853Sbellard         if (n == 0) {
11469fa3e853Sbellard             /* NOTE: tb_end may be after the end of the page, but
11479fa3e853Sbellard                it is not a problem */
11489fa3e853Sbellard             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
11499fa3e853Sbellard             tb_end = tb_start + tb->size;
11509fa3e853Sbellard         } else {
11519fa3e853Sbellard             tb_start = tb->page_addr[1];
11529fa3e853Sbellard             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
11539fa3e853Sbellard         }
11549fa3e853Sbellard         if (!(tb_end <= start || tb_start >= end)) {
1155d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1156d720b93dSbellard             if (current_tb_not_found) {
1157d720b93dSbellard                 current_tb_not_found = 0;
1158d720b93dSbellard                 current_tb = NULL;
11592e70f6efSpbrook                 if (env->mem_io_pc) {
1160d720b93dSbellard                     /* now we have a real cpu fault */
11612e70f6efSpbrook                     current_tb = tb_find_pc(env->mem_io_pc);
1162d720b93dSbellard                 }
1163d720b93dSbellard             }
1164d720b93dSbellard             if (current_tb == tb &&
11652e70f6efSpbrook                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1166d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1167d720b93dSbellard                 its execution. We could be more precise by checking
1168d720b93dSbellard                 that the modification is after the current PC, but it
1169d720b93dSbellard                 would require a specialized function to partially
1170d720b93dSbellard                 restore the CPU state */
1171d720b93dSbellard 
1172d720b93dSbellard                 current_tb_modified = 1;
1173618ba8e6SStefan Weil                 cpu_restore_state(current_tb, env, env->mem_io_pc);
11746b917547Saliguori                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
11756b917547Saliguori                                      &current_flags);
1176d720b93dSbellard             }
1177d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
11786f5a9f7eSbellard             /* we need to do that to handle the case where a signal
11796f5a9f7eSbellard                occurs while doing tb_phys_invalidate() */
11806f5a9f7eSbellard             saved_tb = NULL;
11816f5a9f7eSbellard             if (env) {
1182ea1c1802Sbellard                 saved_tb = env->current_tb;
1183ea1c1802Sbellard                 env->current_tb = NULL;
11846f5a9f7eSbellard             }
11859fa3e853Sbellard             tb_phys_invalidate(tb, -1);
11866f5a9f7eSbellard             if (env) {
1187ea1c1802Sbellard                 env->current_tb = saved_tb;
1188ea1c1802Sbellard                 if (env->interrupt_request && env->current_tb)
1189ea1c1802Sbellard                     cpu_interrupt(env, env->interrupt_request);
11909fa3e853Sbellard             }
11916f5a9f7eSbellard         }
11929fa3e853Sbellard         tb = tb_next;
11939fa3e853Sbellard     }
11949fa3e853Sbellard #if !defined(CONFIG_USER_ONLY)
11959fa3e853Sbellard     /* if no code remaining, no need to continue to use slow writes */
11969fa3e853Sbellard     if (!p->first_tb) {
11979fa3e853Sbellard         invalidate_page_bitmap(p);
1198d720b93dSbellard         if (is_cpu_write_access) {
11992e70f6efSpbrook             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1200d720b93dSbellard         }
1201d720b93dSbellard     }
1202d720b93dSbellard #endif
1203d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1204d720b93dSbellard     if (current_tb_modified) {
1205d720b93dSbellard         /* we generate a block containing just the instruction
1206d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1207d720b93dSbellard            itself */
1208ea1c1802Sbellard         env->current_tb = NULL;
12092e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1210d720b93dSbellard         cpu_resume_from_signal(env, NULL);
12119fa3e853Sbellard     }
12129fa3e853Sbellard #endif
12139fa3e853Sbellard }
12149fa3e853Sbellard 
12159fa3e853Sbellard /* len must be <= 8 and start must be a multiple of len */
121641c1b1c9SPaul Brook static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
12179fa3e853Sbellard {
12189fa3e853Sbellard     PageDesc *p;
12199fa3e853Sbellard     int offset, b;
122059817ccbSbellard #if 0
1221a4193c8aSbellard     if (1) {
122293fcfe39Saliguori         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
12232e70f6efSpbrook                   cpu_single_env->mem_io_vaddr, len,
1224a4193c8aSbellard                   cpu_single_env->eip,
12258efe0ca8SStefan Weil                   cpu_single_env->eip +
12268efe0ca8SStefan Weil                   (intptr_t)cpu_single_env->segs[R_CS].base);
1227a4193c8aSbellard     }
122859817ccbSbellard #endif
12299fa3e853Sbellard     p = page_find(start >> TARGET_PAGE_BITS);
12309fa3e853Sbellard     if (!p)
12319fa3e853Sbellard         return;
12329fa3e853Sbellard     if (p->code_bitmap) {
12339fa3e853Sbellard         offset = start & ~TARGET_PAGE_MASK;
12349fa3e853Sbellard         b = p->code_bitmap[offset >> 3] >> (offset & 7);
12359fa3e853Sbellard         if (b & ((1 << len) - 1))
12369fa3e853Sbellard             goto do_invalidate;
12379fa3e853Sbellard     } else {
12389fa3e853Sbellard     do_invalidate:
1239d720b93dSbellard         tb_invalidate_phys_page_range(start, start + len, 1);
12409fa3e853Sbellard     }
12419fa3e853Sbellard }
12429fa3e853Sbellard 
12439fa3e853Sbellard #if !defined(CONFIG_SOFTMMU)
124441c1b1c9SPaul Brook static void tb_invalidate_phys_page(tb_page_addr_t addr,
124520503968SBlue Swirl                                     uintptr_t pc, void *puc)
12469fa3e853Sbellard {
12476b917547Saliguori     TranslationBlock *tb;
12489fa3e853Sbellard     PageDesc *p;
12496b917547Saliguori     int n;
1250d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
12516b917547Saliguori     TranslationBlock *current_tb = NULL;
12529349b4f9SAndreas Färber     CPUArchState *env = cpu_single_env;
12536b917547Saliguori     int current_tb_modified = 0;
12546b917547Saliguori     target_ulong current_pc = 0;
12556b917547Saliguori     target_ulong current_cs_base = 0;
12566b917547Saliguori     int current_flags = 0;
1257d720b93dSbellard #endif
12589fa3e853Sbellard 
12599fa3e853Sbellard     addr &= TARGET_PAGE_MASK;
12609fa3e853Sbellard     p = page_find(addr >> TARGET_PAGE_BITS);
1261fd6ce8f6Sbellard     if (!p)
1262fd6ce8f6Sbellard         return;
1263fd6ce8f6Sbellard     tb = p->first_tb;
1264d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1265d720b93dSbellard     if (tb && pc != 0) {
1266d720b93dSbellard         current_tb = tb_find_pc(pc);
1267d720b93dSbellard     }
1268d720b93dSbellard #endif
1269fd6ce8f6Sbellard     while (tb != NULL) {
12708efe0ca8SStefan Weil         n = (uintptr_t)tb & 3;
12718efe0ca8SStefan Weil         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1272d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1273d720b93dSbellard         if (current_tb == tb &&
12742e70f6efSpbrook             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1275d720b93dSbellard                 /* If we are modifying the current TB, we must stop
1276d720b93dSbellard                    its execution. We could be more precise by checking
1277d720b93dSbellard                    that the modification is after the current PC, but it
1278d720b93dSbellard                    would require a specialized function to partially
1279d720b93dSbellard                    restore the CPU state */
1280d720b93dSbellard 
1281d720b93dSbellard             current_tb_modified = 1;
1282618ba8e6SStefan Weil             cpu_restore_state(current_tb, env, pc);
12836b917547Saliguori             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
12846b917547Saliguori                                  &current_flags);
1285d720b93dSbellard         }
1286d720b93dSbellard #endif /* TARGET_HAS_PRECISE_SMC */
12879fa3e853Sbellard         tb_phys_invalidate(tb, addr);
12889fa3e853Sbellard         tb = tb->page_next[n];
1289fd6ce8f6Sbellard     }
1290fd6ce8f6Sbellard     p->first_tb = NULL;
1291d720b93dSbellard #ifdef TARGET_HAS_PRECISE_SMC
1292d720b93dSbellard     if (current_tb_modified) {
1293d720b93dSbellard         /* we generate a block containing just the instruction
1294d720b93dSbellard            modifying the memory. It will ensure that it cannot modify
1295d720b93dSbellard            itself */
1296ea1c1802Sbellard         env->current_tb = NULL;
12972e70f6efSpbrook         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1298d720b93dSbellard         cpu_resume_from_signal(env, puc);
1299d720b93dSbellard     }
1300d720b93dSbellard #endif
1301fd6ce8f6Sbellard }
13029fa3e853Sbellard #endif
1303fd6ce8f6Sbellard 
1304fd6ce8f6Sbellard /* add the tb in the target page and protect it if necessary */
13059fa3e853Sbellard static inline void tb_alloc_page(TranslationBlock *tb,
130641c1b1c9SPaul Brook                                  unsigned int n, tb_page_addr_t page_addr)
1307fd6ce8f6Sbellard {
1308fd6ce8f6Sbellard     PageDesc *p;
13094429ab44SJuan Quintela #ifndef CONFIG_USER_ONLY
13104429ab44SJuan Quintela     bool page_already_protected;
13114429ab44SJuan Quintela #endif
13129fa3e853Sbellard 
13139fa3e853Sbellard     tb->page_addr[n] = page_addr;
13145cd2c5b6SRichard Henderson     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
13159fa3e853Sbellard     tb->page_next[n] = p->first_tb;
13164429ab44SJuan Quintela #ifndef CONFIG_USER_ONLY
13174429ab44SJuan Quintela     page_already_protected = p->first_tb != NULL;
13184429ab44SJuan Quintela #endif
13198efe0ca8SStefan Weil     p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
13209fa3e853Sbellard     invalidate_page_bitmap(p);
13219fa3e853Sbellard 
1322107db443Sbellard #if defined(TARGET_HAS_SMC) || 1
1323d720b93dSbellard 
13249fa3e853Sbellard #if defined(CONFIG_USER_ONLY)
13259fa3e853Sbellard     if (p->flags & PAGE_WRITE) {
132653a5960aSpbrook         target_ulong addr;
132753a5960aSpbrook         PageDesc *p2;
1328fd6ce8f6Sbellard         int prot;
1329fd6ce8f6Sbellard 
1330fd6ce8f6Sbellard         /* force the host page as non writable (writes will have a
1331fd6ce8f6Sbellard            page fault + mprotect overhead) */
133253a5960aSpbrook         page_addr &= qemu_host_page_mask;
1333fd6ce8f6Sbellard         prot = 0;
133453a5960aSpbrook         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
133553a5960aSpbrook             addr += TARGET_PAGE_SIZE) {
133653a5960aSpbrook 
133753a5960aSpbrook             p2 = page_find (addr >> TARGET_PAGE_BITS);
133853a5960aSpbrook             if (!p2)
133953a5960aSpbrook                 continue;
134053a5960aSpbrook             prot |= p2->flags;
134153a5960aSpbrook             p2->flags &= ~PAGE_WRITE;
134253a5960aSpbrook           }
134353a5960aSpbrook         mprotect(g2h(page_addr), qemu_host_page_size,
1344fd6ce8f6Sbellard                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1345fd6ce8f6Sbellard #ifdef DEBUG_TB_INVALIDATE
1346ab3d1727Sblueswir1         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
134753a5960aSpbrook                page_addr);
1348fd6ce8f6Sbellard #endif
1349fd6ce8f6Sbellard     }
13509fa3e853Sbellard #else
13519fa3e853Sbellard     /* if some code is already present, then the pages are already
13529fa3e853Sbellard        protected. So we handle the case where only the first TB is
13539fa3e853Sbellard        allocated in a physical page */
13544429ab44SJuan Quintela     if (!page_already_protected) {
13556a00d601Sbellard         tlb_protect_code(page_addr);
13569fa3e853Sbellard     }
13579fa3e853Sbellard #endif
1358d720b93dSbellard 
1359d720b93dSbellard #endif /* TARGET_HAS_SMC */
1360fd6ce8f6Sbellard }
1361fd6ce8f6Sbellard 
13629fa3e853Sbellard /* add a new TB and link it to the physical page tables. phys_page2 is
13639fa3e853Sbellard    (-1) to indicate that only one page contains the TB. */
136441c1b1c9SPaul Brook void tb_link_page(TranslationBlock *tb,
136541c1b1c9SPaul Brook                   tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1366d4e8164fSbellard {
13679fa3e853Sbellard     unsigned int h;
13689fa3e853Sbellard     TranslationBlock **ptb;
13699fa3e853Sbellard 
1370c8a706feSpbrook     /* Grab the mmap lock to stop another thread invalidating this TB
1371c8a706feSpbrook        before we are done.  */
1372c8a706feSpbrook     mmap_lock();
13739fa3e853Sbellard     /* add in the physical hash table */
13749fa3e853Sbellard     h = tb_phys_hash_func(phys_pc);
13759fa3e853Sbellard     ptb = &tb_phys_hash[h];
13769fa3e853Sbellard     tb->phys_hash_next = *ptb;
13779fa3e853Sbellard     *ptb = tb;
1378fd6ce8f6Sbellard 
1379fd6ce8f6Sbellard     /* add in the page list */
13809fa3e853Sbellard     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
13819fa3e853Sbellard     if (phys_page2 != -1)
13829fa3e853Sbellard         tb_alloc_page(tb, 1, phys_page2);
13839fa3e853Sbellard     else
13849fa3e853Sbellard         tb->page_addr[1] = -1;
13859fa3e853Sbellard 
13868efe0ca8SStefan Weil     tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1387d4e8164fSbellard     tb->jmp_next[0] = NULL;
1388d4e8164fSbellard     tb->jmp_next[1] = NULL;
1389d4e8164fSbellard 
1390d4e8164fSbellard     /* init original jump addresses */
1391d4e8164fSbellard     if (tb->tb_next_offset[0] != 0xffff)
1392d4e8164fSbellard         tb_reset_jump(tb, 0);
1393d4e8164fSbellard     if (tb->tb_next_offset[1] != 0xffff)
1394d4e8164fSbellard         tb_reset_jump(tb, 1);
13958a40a180Sbellard 
13968a40a180Sbellard #ifdef DEBUG_TB_CHECK
13978a40a180Sbellard     tb_page_check();
13988a40a180Sbellard #endif
1399c8a706feSpbrook     mmap_unlock();
1400fd6ce8f6Sbellard }
1401fd6ce8f6Sbellard 
1402a513fe19Sbellard /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1403a513fe19Sbellard    tb[1].tc_ptr. Return NULL if not found */
14046375e09eSStefan Weil TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1405a513fe19Sbellard {
1406a513fe19Sbellard     int m_min, m_max, m;
14078efe0ca8SStefan Weil     uintptr_t v;
1408a513fe19Sbellard     TranslationBlock *tb;
1409a513fe19Sbellard 
1410a513fe19Sbellard     if (nb_tbs <= 0)
1411a513fe19Sbellard         return NULL;
14128efe0ca8SStefan Weil     if (tc_ptr < (uintptr_t)code_gen_buffer ||
14138efe0ca8SStefan Weil         tc_ptr >= (uintptr_t)code_gen_ptr) {
1414a513fe19Sbellard         return NULL;
14158efe0ca8SStefan Weil     }
1416a513fe19Sbellard     /* binary search (cf Knuth) */
1417a513fe19Sbellard     m_min = 0;
1418a513fe19Sbellard     m_max = nb_tbs - 1;
1419a513fe19Sbellard     while (m_min <= m_max) {
1420a513fe19Sbellard         m = (m_min + m_max) >> 1;
1421a513fe19Sbellard         tb = &tbs[m];
14228efe0ca8SStefan Weil         v = (uintptr_t)tb->tc_ptr;
1423a513fe19Sbellard         if (v == tc_ptr)
1424a513fe19Sbellard             return tb;
1425a513fe19Sbellard         else if (tc_ptr < v) {
1426a513fe19Sbellard             m_max = m - 1;
1427a513fe19Sbellard         } else {
1428a513fe19Sbellard             m_min = m + 1;
1429a513fe19Sbellard         }
1430a513fe19Sbellard     }
1431a513fe19Sbellard     return &tbs[m_max];
1432a513fe19Sbellard }
14337501267eSbellard 
1434ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb);
1435ea041c0eSbellard 
1436ea041c0eSbellard static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1437ea041c0eSbellard {
1438ea041c0eSbellard     TranslationBlock *tb1, *tb_next, **ptb;
1439ea041c0eSbellard     unsigned int n1;
1440ea041c0eSbellard 
1441ea041c0eSbellard     tb1 = tb->jmp_next[n];
1442ea041c0eSbellard     if (tb1 != NULL) {
1443ea041c0eSbellard         /* find head of list */
1444ea041c0eSbellard         for(;;) {
14458efe0ca8SStefan Weil             n1 = (uintptr_t)tb1 & 3;
14468efe0ca8SStefan Weil             tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1447ea041c0eSbellard             if (n1 == 2)
1448ea041c0eSbellard                 break;
1449ea041c0eSbellard             tb1 = tb1->jmp_next[n1];
1450ea041c0eSbellard         }
1451ea041c0eSbellard         /* we are now sure now that tb jumps to tb1 */
1452ea041c0eSbellard         tb_next = tb1;
1453ea041c0eSbellard 
1454ea041c0eSbellard         /* remove tb from the jmp_first list */
1455ea041c0eSbellard         ptb = &tb_next->jmp_first;
1456ea041c0eSbellard         for(;;) {
1457ea041c0eSbellard             tb1 = *ptb;
14588efe0ca8SStefan Weil             n1 = (uintptr_t)tb1 & 3;
14598efe0ca8SStefan Weil             tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1460ea041c0eSbellard             if (n1 == n && tb1 == tb)
1461ea041c0eSbellard                 break;
1462ea041c0eSbellard             ptb = &tb1->jmp_next[n1];
1463ea041c0eSbellard         }
1464ea041c0eSbellard         *ptb = tb->jmp_next[n];
1465ea041c0eSbellard         tb->jmp_next[n] = NULL;
1466ea041c0eSbellard 
1467ea041c0eSbellard         /* suppress the jump to next tb in generated code */
1468ea041c0eSbellard         tb_reset_jump(tb, n);
1469ea041c0eSbellard 
14700124311eSbellard         /* suppress jumps in the tb on which we could have jumped */
1471ea041c0eSbellard         tb_reset_jump_recursive(tb_next);
1472ea041c0eSbellard     }
1473ea041c0eSbellard }
1474ea041c0eSbellard 
1475ea041c0eSbellard static void tb_reset_jump_recursive(TranslationBlock *tb)
1476ea041c0eSbellard {
1477ea041c0eSbellard     tb_reset_jump_recursive2(tb, 0);
1478ea041c0eSbellard     tb_reset_jump_recursive2(tb, 1);
1479ea041c0eSbellard }
1480ea041c0eSbellard 
14811fddef4bSbellard #if defined(TARGET_HAS_ICE)
148294df27fdSPaul Brook #if defined(CONFIG_USER_ONLY)
14839349b4f9SAndreas Färber static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
148494df27fdSPaul Brook {
148594df27fdSPaul Brook     tb_invalidate_phys_page_range(pc, pc + 1, 0);
148694df27fdSPaul Brook }
148794df27fdSPaul Brook #else
14881e7855a5SMax Filippov void tb_invalidate_phys_addr(target_phys_addr_t addr)
1489d720b93dSbellard {
1490c227f099SAnthony Liguori     ram_addr_t ram_addr;
1491f3705d53SAvi Kivity     MemoryRegionSection *section;
1492d720b93dSbellard 
149306ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
1494f3705d53SAvi Kivity     if (!(memory_region_is_ram(section->mr)
1495f3705d53SAvi Kivity           || (section->mr->rom_device && section->mr->readable))) {
149606ef3525SAvi Kivity         return;
149706ef3525SAvi Kivity     }
1498f3705d53SAvi Kivity     ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1499cc5bea60SBlue Swirl         + memory_region_section_addr(section, addr);
1500706cd4b5Spbrook     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1501d720b93dSbellard }
15021e7855a5SMax Filippov 
15031e7855a5SMax Filippov static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
15041e7855a5SMax Filippov {
15059d70c4b7SMax Filippov     tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
15069d70c4b7SMax Filippov             (pc & ~TARGET_PAGE_MASK));
15071e7855a5SMax Filippov }
1508c27004ecSbellard #endif
150994df27fdSPaul Brook #endif /* TARGET_HAS_ICE */
1510d720b93dSbellard 
1511c527ee8fSPaul Brook #if defined(CONFIG_USER_ONLY)
15129349b4f9SAndreas Färber void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1513c527ee8fSPaul Brook 
1514c527ee8fSPaul Brook {
1515c527ee8fSPaul Brook }
1516c527ee8fSPaul Brook 
15179349b4f9SAndreas Färber int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1518c527ee8fSPaul Brook                           int flags, CPUWatchpoint **watchpoint)
1519c527ee8fSPaul Brook {
1520c527ee8fSPaul Brook     return -ENOSYS;
1521c527ee8fSPaul Brook }
1522c527ee8fSPaul Brook #else
15236658ffb8Spbrook /* Add a watchpoint.  */
15249349b4f9SAndreas Färber int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
1525a1d1bb31Saliguori                           int flags, CPUWatchpoint **watchpoint)
15266658ffb8Spbrook {
1527b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1528c0ce998eSaliguori     CPUWatchpoint *wp;
15296658ffb8Spbrook 
1530b4051334Saliguori     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
15310dc23828SMax Filippov     if ((len & (len - 1)) || (addr & ~len_mask) ||
15320dc23828SMax Filippov             len == 0 || len > TARGET_PAGE_SIZE) {
1533b4051334Saliguori         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1534b4051334Saliguori                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1535b4051334Saliguori         return -EINVAL;
1536b4051334Saliguori     }
15377267c094SAnthony Liguori     wp = g_malloc(sizeof(*wp));
15386658ffb8Spbrook 
1539a1d1bb31Saliguori     wp->vaddr = addr;
1540b4051334Saliguori     wp->len_mask = len_mask;
1541a1d1bb31Saliguori     wp->flags = flags;
1542a1d1bb31Saliguori 
15432dc9f411Saliguori     /* keep all GDB-injected watchpoints in front */
1544c0ce998eSaliguori     if (flags & BP_GDB)
154572cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1546c0ce998eSaliguori     else
154772cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1548a1d1bb31Saliguori 
15496658ffb8Spbrook     tlb_flush_page(env, addr);
1550a1d1bb31Saliguori 
1551a1d1bb31Saliguori     if (watchpoint)
1552a1d1bb31Saliguori         *watchpoint = wp;
1553a1d1bb31Saliguori     return 0;
15546658ffb8Spbrook }
15556658ffb8Spbrook 
1556a1d1bb31Saliguori /* Remove a specific watchpoint.  */
15579349b4f9SAndreas Färber int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
1558a1d1bb31Saliguori                           int flags)
15596658ffb8Spbrook {
1560b4051334Saliguori     target_ulong len_mask = ~(len - 1);
1561a1d1bb31Saliguori     CPUWatchpoint *wp;
15626658ffb8Spbrook 
156372cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1564b4051334Saliguori         if (addr == wp->vaddr && len_mask == wp->len_mask
15656e140f28Saliguori                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1566a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
15676658ffb8Spbrook             return 0;
15686658ffb8Spbrook         }
15696658ffb8Spbrook     }
1570a1d1bb31Saliguori     return -ENOENT;
15716658ffb8Spbrook }
15726658ffb8Spbrook 
1573a1d1bb31Saliguori /* Remove a specific watchpoint by reference.  */
15749349b4f9SAndreas Färber void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
1575a1d1bb31Saliguori {
157672cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
15777d03f82fSedgar_igl 
1578a1d1bb31Saliguori     tlb_flush_page(env, watchpoint->vaddr);
1579a1d1bb31Saliguori 
15807267c094SAnthony Liguori     g_free(watchpoint);
15817d03f82fSedgar_igl }
15827d03f82fSedgar_igl 
1583a1d1bb31Saliguori /* Remove all matching watchpoints.  */
15849349b4f9SAndreas Färber void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
1585a1d1bb31Saliguori {
1586c0ce998eSaliguori     CPUWatchpoint *wp, *next;
1587a1d1bb31Saliguori 
158872cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1589a1d1bb31Saliguori         if (wp->flags & mask)
1590a1d1bb31Saliguori             cpu_watchpoint_remove_by_ref(env, wp);
1591a1d1bb31Saliguori     }
1592c0ce998eSaliguori }
1593c527ee8fSPaul Brook #endif
1594a1d1bb31Saliguori 
1595a1d1bb31Saliguori /* Add a breakpoint.  */
15969349b4f9SAndreas Färber int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
1597a1d1bb31Saliguori                           CPUBreakpoint **breakpoint)
15984c3a88a2Sbellard {
15991fddef4bSbellard #if defined(TARGET_HAS_ICE)
1600c0ce998eSaliguori     CPUBreakpoint *bp;
16014c3a88a2Sbellard 
16027267c094SAnthony Liguori     bp = g_malloc(sizeof(*bp));
16034c3a88a2Sbellard 
1604a1d1bb31Saliguori     bp->pc = pc;
1605a1d1bb31Saliguori     bp->flags = flags;
1606a1d1bb31Saliguori 
16072dc9f411Saliguori     /* keep all GDB-injected breakpoints in front */
1608c0ce998eSaliguori     if (flags & BP_GDB)
160972cf2d4fSBlue Swirl         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1610c0ce998eSaliguori     else
161172cf2d4fSBlue Swirl         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1612d720b93dSbellard 
1613d720b93dSbellard     breakpoint_invalidate(env, pc);
1614a1d1bb31Saliguori 
1615a1d1bb31Saliguori     if (breakpoint)
1616a1d1bb31Saliguori         *breakpoint = bp;
16174c3a88a2Sbellard     return 0;
16184c3a88a2Sbellard #else
1619a1d1bb31Saliguori     return -ENOSYS;
16204c3a88a2Sbellard #endif
16214c3a88a2Sbellard }
16224c3a88a2Sbellard 
1623a1d1bb31Saliguori /* Remove a specific breakpoint.  */
16249349b4f9SAndreas Färber int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
1625a1d1bb31Saliguori {
16267d03f82fSedgar_igl #if defined(TARGET_HAS_ICE)
1627a1d1bb31Saliguori     CPUBreakpoint *bp;
1628a1d1bb31Saliguori 
162972cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1630a1d1bb31Saliguori         if (bp->pc == pc && bp->flags == flags) {
1631a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1632a1d1bb31Saliguori             return 0;
16337d03f82fSedgar_igl         }
1634a1d1bb31Saliguori     }
1635a1d1bb31Saliguori     return -ENOENT;
1636a1d1bb31Saliguori #else
1637a1d1bb31Saliguori     return -ENOSYS;
16387d03f82fSedgar_igl #endif
16397d03f82fSedgar_igl }
16407d03f82fSedgar_igl 
1641a1d1bb31Saliguori /* Remove a specific breakpoint by reference.  */
16429349b4f9SAndreas Färber void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
16434c3a88a2Sbellard {
16441fddef4bSbellard #if defined(TARGET_HAS_ICE)
164572cf2d4fSBlue Swirl     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1646d720b93dSbellard 
1647a1d1bb31Saliguori     breakpoint_invalidate(env, breakpoint->pc);
1648a1d1bb31Saliguori 
16497267c094SAnthony Liguori     g_free(breakpoint);
1650a1d1bb31Saliguori #endif
1651a1d1bb31Saliguori }
1652a1d1bb31Saliguori 
1653a1d1bb31Saliguori /* Remove all matching breakpoints. */
16549349b4f9SAndreas Färber void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
1655a1d1bb31Saliguori {
1656a1d1bb31Saliguori #if defined(TARGET_HAS_ICE)
1657c0ce998eSaliguori     CPUBreakpoint *bp, *next;
1658a1d1bb31Saliguori 
165972cf2d4fSBlue Swirl     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1660a1d1bb31Saliguori         if (bp->flags & mask)
1661a1d1bb31Saliguori             cpu_breakpoint_remove_by_ref(env, bp);
1662c0ce998eSaliguori     }
16634c3a88a2Sbellard #endif
16644c3a88a2Sbellard }
16654c3a88a2Sbellard 
1666c33a346eSbellard /* enable or disable single step mode. EXCP_DEBUG is returned by the
1667c33a346eSbellard    CPU loop after each instruction */
16689349b4f9SAndreas Färber void cpu_single_step(CPUArchState *env, int enabled)
1669c33a346eSbellard {
16701fddef4bSbellard #if defined(TARGET_HAS_ICE)
1671c33a346eSbellard     if (env->singlestep_enabled != enabled) {
1672c33a346eSbellard         env->singlestep_enabled = enabled;
1673e22a25c9Saliguori         if (kvm_enabled())
1674e22a25c9Saliguori             kvm_update_guest_debug(env, 0);
1675e22a25c9Saliguori         else {
1676ccbb4d44SStuart Brady             /* must flush all the translated code to avoid inconsistencies */
16779fa3e853Sbellard             /* XXX: only flush what is necessary */
16780124311eSbellard             tb_flush(env);
1679c33a346eSbellard         }
1680e22a25c9Saliguori     }
1681c33a346eSbellard #endif
1682c33a346eSbellard }
1683c33a346eSbellard 
16849349b4f9SAndreas Färber static void cpu_unlink_tb(CPUArchState *env)
1685ea041c0eSbellard {
1686d5975363Spbrook     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1687d5975363Spbrook        problem and hope the cpu will stop of its own accord.  For userspace
1688d5975363Spbrook        emulation this often isn't actually as bad as it sounds.  Often
1689d5975363Spbrook        signals are used primarily to interrupt blocking syscalls.  */
16903098dba0Saurel32     TranslationBlock *tb;
1691c227f099SAnthony Liguori     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
16923098dba0Saurel32 
1693cab1b4bdSRiku Voipio     spin_lock(&interrupt_lock);
16943098dba0Saurel32     tb = env->current_tb;
16953098dba0Saurel32     /* if the cpu is currently executing code, we must unlink it and
16963098dba0Saurel32        all the potentially executing TB */
1697f76cfe56SRiku Voipio     if (tb) {
16983098dba0Saurel32         env->current_tb = NULL;
16993098dba0Saurel32         tb_reset_jump_recursive(tb);
17003098dba0Saurel32     }
1701cab1b4bdSRiku Voipio     spin_unlock(&interrupt_lock);
17023098dba0Saurel32 }
17033098dba0Saurel32 
170497ffbd8dSJan Kiszka #ifndef CONFIG_USER_ONLY
17053098dba0Saurel32 /* mask must never be zero, except for A20 change call */
17069349b4f9SAndreas Färber static void tcg_handle_interrupt(CPUArchState *env, int mask)
17073098dba0Saurel32 {
17083098dba0Saurel32     int old_mask;
17093098dba0Saurel32 
17103098dba0Saurel32     old_mask = env->interrupt_request;
17113098dba0Saurel32     env->interrupt_request |= mask;
17123098dba0Saurel32 
17138edac960Saliguori     /*
17148edac960Saliguori      * If called from iothread context, wake the target cpu in
17158edac960Saliguori      * case its halted.
17168edac960Saliguori      */
1717b7680cb6SJan Kiszka     if (!qemu_cpu_is_self(env)) {
17188edac960Saliguori         qemu_cpu_kick(env);
17198edac960Saliguori         return;
17208edac960Saliguori     }
17218edac960Saliguori 
17222e70f6efSpbrook     if (use_icount) {
1723266910c4Spbrook         env->icount_decr.u16.high = 0xffff;
17242e70f6efSpbrook         if (!can_do_io(env)
1725be214e6cSaurel32             && (mask & ~old_mask) != 0) {
17262e70f6efSpbrook             cpu_abort(env, "Raised interrupt while not in I/O function");
17272e70f6efSpbrook         }
17282e70f6efSpbrook     } else {
17293098dba0Saurel32         cpu_unlink_tb(env);
1730ea041c0eSbellard     }
17312e70f6efSpbrook }
1732ea041c0eSbellard 
1733ec6959d0SJan Kiszka CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1734ec6959d0SJan Kiszka 
173597ffbd8dSJan Kiszka #else /* CONFIG_USER_ONLY */
173697ffbd8dSJan Kiszka 
17379349b4f9SAndreas Färber void cpu_interrupt(CPUArchState *env, int mask)
173897ffbd8dSJan Kiszka {
173997ffbd8dSJan Kiszka     env->interrupt_request |= mask;
174097ffbd8dSJan Kiszka     cpu_unlink_tb(env);
174197ffbd8dSJan Kiszka }
174297ffbd8dSJan Kiszka #endif /* CONFIG_USER_ONLY */
174397ffbd8dSJan Kiszka 
17449349b4f9SAndreas Färber void cpu_reset_interrupt(CPUArchState *env, int mask)
1745b54ad049Sbellard {
1746b54ad049Sbellard     env->interrupt_request &= ~mask;
1747b54ad049Sbellard }
1748b54ad049Sbellard 
17499349b4f9SAndreas Färber void cpu_exit(CPUArchState *env)
17503098dba0Saurel32 {
17513098dba0Saurel32     env->exit_request = 1;
17523098dba0Saurel32     cpu_unlink_tb(env);
17533098dba0Saurel32 }
17543098dba0Saurel32 
17559349b4f9SAndreas Färber void cpu_abort(CPUArchState *env, const char *fmt, ...)
17567501267eSbellard {
17577501267eSbellard     va_list ap;
1758493ae1f0Spbrook     va_list ap2;
17597501267eSbellard 
17607501267eSbellard     va_start(ap, fmt);
1761493ae1f0Spbrook     va_copy(ap2, ap);
17627501267eSbellard     fprintf(stderr, "qemu: fatal: ");
17637501267eSbellard     vfprintf(stderr, fmt, ap);
17647501267eSbellard     fprintf(stderr, "\n");
17656fd2a026SPeter Maydell     cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
176693fcfe39Saliguori     if (qemu_log_enabled()) {
176793fcfe39Saliguori         qemu_log("qemu: fatal: ");
176893fcfe39Saliguori         qemu_log_vprintf(fmt, ap2);
176993fcfe39Saliguori         qemu_log("\n");
17706fd2a026SPeter Maydell         log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
177131b1a7b4Saliguori         qemu_log_flush();
177293fcfe39Saliguori         qemu_log_close();
1773924edcaeSbalrog     }
1774493ae1f0Spbrook     va_end(ap2);
1775f9373291Sj_mayer     va_end(ap);
1776fd052bf6SRiku Voipio #if defined(CONFIG_USER_ONLY)
1777fd052bf6SRiku Voipio     {
1778fd052bf6SRiku Voipio         struct sigaction act;
1779fd052bf6SRiku Voipio         sigfillset(&act.sa_mask);
1780fd052bf6SRiku Voipio         act.sa_handler = SIG_DFL;
1781fd052bf6SRiku Voipio         sigaction(SIGABRT, &act, NULL);
1782fd052bf6SRiku Voipio     }
1783fd052bf6SRiku Voipio #endif
17847501267eSbellard     abort();
17857501267eSbellard }
17867501267eSbellard 
17879349b4f9SAndreas Färber CPUArchState *cpu_copy(CPUArchState *env)
1788c5be9f08Sths {
17899349b4f9SAndreas Färber     CPUArchState *new_env = cpu_init(env->cpu_model_str);
17909349b4f9SAndreas Färber     CPUArchState *next_cpu = new_env->next_cpu;
1791c5be9f08Sths     int cpu_index = new_env->cpu_index;
17925a38f081Saliguori #if defined(TARGET_HAS_ICE)
17935a38f081Saliguori     CPUBreakpoint *bp;
17945a38f081Saliguori     CPUWatchpoint *wp;
17955a38f081Saliguori #endif
17965a38f081Saliguori 
17979349b4f9SAndreas Färber     memcpy(new_env, env, sizeof(CPUArchState));
17985a38f081Saliguori 
17995a38f081Saliguori     /* Preserve chaining and index. */
1800c5be9f08Sths     new_env->next_cpu = next_cpu;
1801c5be9f08Sths     new_env->cpu_index = cpu_index;
18025a38f081Saliguori 
18035a38f081Saliguori     /* Clone all break/watchpoints.
18045a38f081Saliguori        Note: Once we support ptrace with hw-debug register access, make sure
18055a38f081Saliguori        BP_CPU break/watchpoints are handled correctly on clone. */
180672cf2d4fSBlue Swirl     QTAILQ_INIT(&env->breakpoints);
180772cf2d4fSBlue Swirl     QTAILQ_INIT(&env->watchpoints);
18085a38f081Saliguori #if defined(TARGET_HAS_ICE)
180972cf2d4fSBlue Swirl     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
18105a38f081Saliguori         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
18115a38f081Saliguori     }
181272cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
18135a38f081Saliguori         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
18145a38f081Saliguori                               wp->flags, NULL);
18155a38f081Saliguori     }
18165a38f081Saliguori #endif
18175a38f081Saliguori 
1818c5be9f08Sths     return new_env;
1819c5be9f08Sths }
1820c5be9f08Sths 
18210124311eSbellard #if !defined(CONFIG_USER_ONLY)
18220cac1b66SBlue Swirl void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
18235c751e99Sedgar_igl {
18245c751e99Sedgar_igl     unsigned int i;
18255c751e99Sedgar_igl 
18265c751e99Sedgar_igl     /* Discard jump cache entries for any tb which might potentially
18275c751e99Sedgar_igl        overlap the flushed page.  */
18285c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
18295c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
18305c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
18315c751e99Sedgar_igl 
18325c751e99Sedgar_igl     i = tb_jmp_cache_hash_page(addr);
18335c751e99Sedgar_igl     memset (&env->tb_jmp_cache[i], 0,
18345c751e99Sedgar_igl             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
18355c751e99Sedgar_igl }
18365c751e99Sedgar_igl 
1837d24981d3SJuan Quintela static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1838d24981d3SJuan Quintela                                       uintptr_t length)
18391ccde1cbSbellard {
1840d24981d3SJuan Quintela     uintptr_t start1;
1841f23db169Sbellard 
18421ccde1cbSbellard     /* we modify the TLB cache so that the dirty bit will be set again
18431ccde1cbSbellard        when accessing the range */
18448efe0ca8SStefan Weil     start1 = (uintptr_t)qemu_safe_ram_ptr(start);
1845a57d23e4SStefan Weil     /* Check that we don't span multiple blocks - this breaks the
18465579c7f3Spbrook        address comparisons below.  */
18478efe0ca8SStefan Weil     if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
18485579c7f3Spbrook             != (end - 1) - start) {
18495579c7f3Spbrook         abort();
18505579c7f3Spbrook     }
1851e5548617SBlue Swirl     cpu_tlb_reset_dirty_all(start1, length);
1852d24981d3SJuan Quintela 
1853d24981d3SJuan Quintela }
1854d24981d3SJuan Quintela 
1855d24981d3SJuan Quintela /* Note: start and end must be within the same ram block.  */
1856d24981d3SJuan Quintela void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1857d24981d3SJuan Quintela                                      int dirty_flags)
1858d24981d3SJuan Quintela {
1859d24981d3SJuan Quintela     uintptr_t length;
1860d24981d3SJuan Quintela 
1861d24981d3SJuan Quintela     start &= TARGET_PAGE_MASK;
1862d24981d3SJuan Quintela     end = TARGET_PAGE_ALIGN(end);
1863d24981d3SJuan Quintela 
1864d24981d3SJuan Quintela     length = end - start;
1865d24981d3SJuan Quintela     if (length == 0)
1866d24981d3SJuan Quintela         return;
1867d24981d3SJuan Quintela     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1868d24981d3SJuan Quintela 
1869d24981d3SJuan Quintela     if (tcg_enabled()) {
1870d24981d3SJuan Quintela         tlb_reset_dirty_range_all(start, end, length);
1871d24981d3SJuan Quintela     }
18721ccde1cbSbellard }
18731ccde1cbSbellard 
187474576198Saliguori int cpu_physical_memory_set_dirty_tracking(int enable)
187574576198Saliguori {
1876f6f3fbcaSMichael S. Tsirkin     int ret = 0;
187774576198Saliguori     in_migration = enable;
1878f6f3fbcaSMichael S. Tsirkin     return ret;
187974576198Saliguori }
188074576198Saliguori 
1881e5548617SBlue Swirl target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1882e5548617SBlue Swirl                                                    MemoryRegionSection *section,
1883e5548617SBlue Swirl                                                    target_ulong vaddr,
1884e5548617SBlue Swirl                                                    target_phys_addr_t paddr,
1885e5548617SBlue Swirl                                                    int prot,
1886e5548617SBlue Swirl                                                    target_ulong *address)
1887e5548617SBlue Swirl {
1888e5548617SBlue Swirl     target_phys_addr_t iotlb;
1889e5548617SBlue Swirl     CPUWatchpoint *wp;
1890e5548617SBlue Swirl 
1891cc5bea60SBlue Swirl     if (memory_region_is_ram(section->mr)) {
1892e5548617SBlue Swirl         /* Normal RAM.  */
1893e5548617SBlue Swirl         iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1894cc5bea60SBlue Swirl             + memory_region_section_addr(section, paddr);
1895e5548617SBlue Swirl         if (!section->readonly) {
1896e5548617SBlue Swirl             iotlb |= phys_section_notdirty;
1897e5548617SBlue Swirl         } else {
1898e5548617SBlue Swirl             iotlb |= phys_section_rom;
1899e5548617SBlue Swirl         }
1900e5548617SBlue Swirl     } else {
1901e5548617SBlue Swirl         /* IO handlers are currently passed a physical address.
1902e5548617SBlue Swirl            It would be nice to pass an offset from the base address
1903e5548617SBlue Swirl            of that region.  This would avoid having to special case RAM,
1904e5548617SBlue Swirl            and avoid full address decoding in every device.
1905e5548617SBlue Swirl            We can't use the high bits of pd for this because
1906e5548617SBlue Swirl            IO_MEM_ROMD uses these as a ram address.  */
1907e5548617SBlue Swirl         iotlb = section - phys_sections;
1908cc5bea60SBlue Swirl         iotlb += memory_region_section_addr(section, paddr);
1909e5548617SBlue Swirl     }
1910e5548617SBlue Swirl 
1911e5548617SBlue Swirl     /* Make accesses to pages with watchpoints go via the
1912e5548617SBlue Swirl        watchpoint trap routines.  */
1913e5548617SBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1914e5548617SBlue Swirl         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1915e5548617SBlue Swirl             /* Avoid trapping reads of pages with a write breakpoint. */
1916e5548617SBlue Swirl             if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1917e5548617SBlue Swirl                 iotlb = phys_section_watch + paddr;
1918e5548617SBlue Swirl                 *address |= TLB_MMIO;
1919e5548617SBlue Swirl                 break;
1920e5548617SBlue Swirl             }
1921e5548617SBlue Swirl         }
1922e5548617SBlue Swirl     }
1923e5548617SBlue Swirl 
1924e5548617SBlue Swirl     return iotlb;
1925e5548617SBlue Swirl }
1926e5548617SBlue Swirl 
19270124311eSbellard #else
1928edf8e2afSMika Westerberg /*
1929edf8e2afSMika Westerberg  * Walks guest process memory "regions" one by one
1930edf8e2afSMika Westerberg  * and calls callback function 'fn' for each region.
1931edf8e2afSMika Westerberg  */
19325cd2c5b6SRichard Henderson 
19335cd2c5b6SRichard Henderson struct walk_memory_regions_data
193433417e70Sbellard {
19355cd2c5b6SRichard Henderson     walk_memory_regions_fn fn;
19365cd2c5b6SRichard Henderson     void *priv;
19378efe0ca8SStefan Weil     uintptr_t start;
19385cd2c5b6SRichard Henderson     int prot;
19395cd2c5b6SRichard Henderson };
19409fa3e853Sbellard 
19415cd2c5b6SRichard Henderson static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1942b480d9b7SPaul Brook                                    abi_ulong end, int new_prot)
19435cd2c5b6SRichard Henderson {
19445cd2c5b6SRichard Henderson     if (data->start != -1ul) {
19455cd2c5b6SRichard Henderson         int rc = data->fn(data->priv, data->start, end, data->prot);
19465cd2c5b6SRichard Henderson         if (rc != 0) {
19475cd2c5b6SRichard Henderson             return rc;
19485cd2c5b6SRichard Henderson         }
19495cd2c5b6SRichard Henderson     }
1950edf8e2afSMika Westerberg 
19515cd2c5b6SRichard Henderson     data->start = (new_prot ? end : -1ul);
19525cd2c5b6SRichard Henderson     data->prot = new_prot;
19535cd2c5b6SRichard Henderson 
19545cd2c5b6SRichard Henderson     return 0;
195533417e70Sbellard }
19565cd2c5b6SRichard Henderson 
19575cd2c5b6SRichard Henderson static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1958b480d9b7SPaul Brook                                  abi_ulong base, int level, void **lp)
19595cd2c5b6SRichard Henderson {
1960b480d9b7SPaul Brook     abi_ulong pa;
19615cd2c5b6SRichard Henderson     int i, rc;
19625cd2c5b6SRichard Henderson 
19635cd2c5b6SRichard Henderson     if (*lp == NULL) {
19645cd2c5b6SRichard Henderson         return walk_memory_regions_end(data, base, 0);
19659fa3e853Sbellard     }
19665cd2c5b6SRichard Henderson 
19675cd2c5b6SRichard Henderson     if (level == 0) {
19685cd2c5b6SRichard Henderson         PageDesc *pd = *lp;
19697296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
19705cd2c5b6SRichard Henderson             int prot = pd[i].flags;
19715cd2c5b6SRichard Henderson 
19725cd2c5b6SRichard Henderson             pa = base | (i << TARGET_PAGE_BITS);
19735cd2c5b6SRichard Henderson             if (prot != data->prot) {
19745cd2c5b6SRichard Henderson                 rc = walk_memory_regions_end(data, pa, prot);
19755cd2c5b6SRichard Henderson                 if (rc != 0) {
19765cd2c5b6SRichard Henderson                     return rc;
19779fa3e853Sbellard                 }
19789fa3e853Sbellard             }
19795cd2c5b6SRichard Henderson         }
19805cd2c5b6SRichard Henderson     } else {
19815cd2c5b6SRichard Henderson         void **pp = *lp;
19827296abacSPaul Brook         for (i = 0; i < L2_SIZE; ++i) {
1983b480d9b7SPaul Brook             pa = base | ((abi_ulong)i <<
1984b480d9b7SPaul Brook                 (TARGET_PAGE_BITS + L2_BITS * level));
19855cd2c5b6SRichard Henderson             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
19865cd2c5b6SRichard Henderson             if (rc != 0) {
19875cd2c5b6SRichard Henderson                 return rc;
19885cd2c5b6SRichard Henderson             }
19895cd2c5b6SRichard Henderson         }
19905cd2c5b6SRichard Henderson     }
19915cd2c5b6SRichard Henderson 
19925cd2c5b6SRichard Henderson     return 0;
19935cd2c5b6SRichard Henderson }
19945cd2c5b6SRichard Henderson 
19955cd2c5b6SRichard Henderson int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
19965cd2c5b6SRichard Henderson {
19975cd2c5b6SRichard Henderson     struct walk_memory_regions_data data;
19988efe0ca8SStefan Weil     uintptr_t i;
19995cd2c5b6SRichard Henderson 
20005cd2c5b6SRichard Henderson     data.fn = fn;
20015cd2c5b6SRichard Henderson     data.priv = priv;
20025cd2c5b6SRichard Henderson     data.start = -1ul;
20035cd2c5b6SRichard Henderson     data.prot = 0;
20045cd2c5b6SRichard Henderson 
20055cd2c5b6SRichard Henderson     for (i = 0; i < V_L1_SIZE; i++) {
2006b480d9b7SPaul Brook         int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
20075cd2c5b6SRichard Henderson                                        V_L1_SHIFT / L2_BITS - 1, l1_map + i);
20085cd2c5b6SRichard Henderson         if (rc != 0) {
20095cd2c5b6SRichard Henderson             return rc;
20105cd2c5b6SRichard Henderson         }
20115cd2c5b6SRichard Henderson     }
20125cd2c5b6SRichard Henderson 
20135cd2c5b6SRichard Henderson     return walk_memory_regions_end(&data, 0, 0);
2014edf8e2afSMika Westerberg }
2015edf8e2afSMika Westerberg 
2016b480d9b7SPaul Brook static int dump_region(void *priv, abi_ulong start,
2017b480d9b7SPaul Brook     abi_ulong end, unsigned long prot)
2018edf8e2afSMika Westerberg {
2019edf8e2afSMika Westerberg     FILE *f = (FILE *)priv;
2020edf8e2afSMika Westerberg 
2021b480d9b7SPaul Brook     (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2022b480d9b7SPaul Brook         " "TARGET_ABI_FMT_lx" %c%c%c\n",
2023edf8e2afSMika Westerberg         start, end, end - start,
2024edf8e2afSMika Westerberg         ((prot & PAGE_READ) ? 'r' : '-'),
2025edf8e2afSMika Westerberg         ((prot & PAGE_WRITE) ? 'w' : '-'),
2026edf8e2afSMika Westerberg         ((prot & PAGE_EXEC) ? 'x' : '-'));
2027edf8e2afSMika Westerberg 
2028edf8e2afSMika Westerberg     return (0);
2029edf8e2afSMika Westerberg }
2030edf8e2afSMika Westerberg 
2031edf8e2afSMika Westerberg /* dump memory mappings */
2032edf8e2afSMika Westerberg void page_dump(FILE *f)
2033edf8e2afSMika Westerberg {
2034edf8e2afSMika Westerberg     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2035edf8e2afSMika Westerberg             "start", "end", "size", "prot");
2036edf8e2afSMika Westerberg     walk_memory_regions(f, dump_region);
20379fa3e853Sbellard }
20389fa3e853Sbellard 
203953a5960aSpbrook int page_get_flags(target_ulong address)
20409fa3e853Sbellard {
20419fa3e853Sbellard     PageDesc *p;
20429fa3e853Sbellard 
20439fa3e853Sbellard     p = page_find(address >> TARGET_PAGE_BITS);
20449fa3e853Sbellard     if (!p)
20459fa3e853Sbellard         return 0;
20469fa3e853Sbellard     return p->flags;
20479fa3e853Sbellard }
20489fa3e853Sbellard 
2049376a7909SRichard Henderson /* Modify the flags of a page and invalidate the code if necessary.
2050376a7909SRichard Henderson    The flag PAGE_WRITE_ORG is positioned automatically depending
2051376a7909SRichard Henderson    on PAGE_WRITE.  The mmap_lock should already be held.  */
205253a5960aSpbrook void page_set_flags(target_ulong start, target_ulong end, int flags)
20539fa3e853Sbellard {
2054376a7909SRichard Henderson     target_ulong addr, len;
20559fa3e853Sbellard 
2056376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2057376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2058376a7909SRichard Henderson        a missing call to h2g_valid.  */
2059b480d9b7SPaul Brook #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2060b480d9b7SPaul Brook     assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2061376a7909SRichard Henderson #endif
2062376a7909SRichard Henderson     assert(start < end);
2063376a7909SRichard Henderson 
20649fa3e853Sbellard     start = start & TARGET_PAGE_MASK;
20659fa3e853Sbellard     end = TARGET_PAGE_ALIGN(end);
2066376a7909SRichard Henderson 
2067376a7909SRichard Henderson     if (flags & PAGE_WRITE) {
20689fa3e853Sbellard         flags |= PAGE_WRITE_ORG;
2069376a7909SRichard Henderson     }
2070376a7909SRichard Henderson 
2071376a7909SRichard Henderson     for (addr = start, len = end - start;
2072376a7909SRichard Henderson          len != 0;
2073376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2074376a7909SRichard Henderson         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2075376a7909SRichard Henderson 
2076376a7909SRichard Henderson         /* If the write protection bit is set, then we invalidate
2077376a7909SRichard Henderson            the code inside.  */
20789fa3e853Sbellard         if (!(p->flags & PAGE_WRITE) &&
20799fa3e853Sbellard             (flags & PAGE_WRITE) &&
20809fa3e853Sbellard             p->first_tb) {
2081d720b93dSbellard             tb_invalidate_phys_page(addr, 0, NULL);
20829fa3e853Sbellard         }
20839fa3e853Sbellard         p->flags = flags;
20849fa3e853Sbellard     }
20859fa3e853Sbellard }
20869fa3e853Sbellard 
20873d97b40bSths int page_check_range(target_ulong start, target_ulong len, int flags)
20883d97b40bSths {
20893d97b40bSths     PageDesc *p;
20903d97b40bSths     target_ulong end;
20913d97b40bSths     target_ulong addr;
20923d97b40bSths 
2093376a7909SRichard Henderson     /* This function should never be called with addresses outside the
2094376a7909SRichard Henderson        guest address space.  If this assert fires, it probably indicates
2095376a7909SRichard Henderson        a missing call to h2g_valid.  */
2096338e9e6cSBlue Swirl #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2097338e9e6cSBlue Swirl     assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2098376a7909SRichard Henderson #endif
2099376a7909SRichard Henderson 
21003e0650a9SRichard Henderson     if (len == 0) {
21013e0650a9SRichard Henderson         return 0;
21023e0650a9SRichard Henderson     }
2103376a7909SRichard Henderson     if (start + len - 1 < start) {
2104376a7909SRichard Henderson         /* We've wrapped around.  */
210555f280c9Sbalrog         return -1;
2106376a7909SRichard Henderson     }
210755f280c9Sbalrog 
21083d97b40bSths     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
21093d97b40bSths     start = start & TARGET_PAGE_MASK;
21103d97b40bSths 
2111376a7909SRichard Henderson     for (addr = start, len = end - start;
2112376a7909SRichard Henderson          len != 0;
2113376a7909SRichard Henderson          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
21143d97b40bSths         p = page_find(addr >> TARGET_PAGE_BITS);
21153d97b40bSths         if( !p )
21163d97b40bSths             return -1;
21173d97b40bSths         if( !(p->flags & PAGE_VALID) )
21183d97b40bSths             return -1;
21193d97b40bSths 
2120dae3270cSbellard         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
21213d97b40bSths             return -1;
2122dae3270cSbellard         if (flags & PAGE_WRITE) {
2123dae3270cSbellard             if (!(p->flags & PAGE_WRITE_ORG))
21243d97b40bSths                 return -1;
2125dae3270cSbellard             /* unprotect the page if it was put read-only because it
2126dae3270cSbellard                contains translated code */
2127dae3270cSbellard             if (!(p->flags & PAGE_WRITE)) {
2128dae3270cSbellard                 if (!page_unprotect(addr, 0, NULL))
2129dae3270cSbellard                     return -1;
2130dae3270cSbellard             }
2131dae3270cSbellard             return 0;
2132dae3270cSbellard         }
21333d97b40bSths     }
21343d97b40bSths     return 0;
21353d97b40bSths }
21363d97b40bSths 
21379fa3e853Sbellard /* called from signal handler: invalidate the code and unprotect the
2138ccbb4d44SStuart Brady    page. Return TRUE if the fault was successfully handled. */
21396375e09eSStefan Weil int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
21409fa3e853Sbellard {
214145d679d6SAurelien Jarno     unsigned int prot;
214245d679d6SAurelien Jarno     PageDesc *p;
214353a5960aSpbrook     target_ulong host_start, host_end, addr;
21449fa3e853Sbellard 
2145c8a706feSpbrook     /* Technically this isn't safe inside a signal handler.  However we
2146c8a706feSpbrook        know this only ever happens in a synchronous SEGV handler, so in
2147c8a706feSpbrook        practice it seems to be ok.  */
2148c8a706feSpbrook     mmap_lock();
2149c8a706feSpbrook 
215045d679d6SAurelien Jarno     p = page_find(address >> TARGET_PAGE_BITS);
215145d679d6SAurelien Jarno     if (!p) {
2152c8a706feSpbrook         mmap_unlock();
21539fa3e853Sbellard         return 0;
2154c8a706feSpbrook     }
215545d679d6SAurelien Jarno 
21569fa3e853Sbellard     /* if the page was really writable, then we change its
21579fa3e853Sbellard        protection back to writable */
215845d679d6SAurelien Jarno     if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
215945d679d6SAurelien Jarno         host_start = address & qemu_host_page_mask;
216045d679d6SAurelien Jarno         host_end = host_start + qemu_host_page_size;
216145d679d6SAurelien Jarno 
216245d679d6SAurelien Jarno         prot = 0;
216345d679d6SAurelien Jarno         for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
216445d679d6SAurelien Jarno             p = page_find(addr >> TARGET_PAGE_BITS);
216545d679d6SAurelien Jarno             p->flags |= PAGE_WRITE;
216645d679d6SAurelien Jarno             prot |= p->flags;
216745d679d6SAurelien Jarno 
21689fa3e853Sbellard             /* and since the content will be modified, we must invalidate
21699fa3e853Sbellard                the corresponding translated code. */
217045d679d6SAurelien Jarno             tb_invalidate_phys_page(addr, pc, puc);
21719fa3e853Sbellard #ifdef DEBUG_TB_CHECK
217245d679d6SAurelien Jarno             tb_invalidate_check(addr);
21739fa3e853Sbellard #endif
217445d679d6SAurelien Jarno         }
217545d679d6SAurelien Jarno         mprotect((void *)g2h(host_start), qemu_host_page_size,
217645d679d6SAurelien Jarno                  prot & PAGE_BITS);
217745d679d6SAurelien Jarno 
2178c8a706feSpbrook         mmap_unlock();
21799fa3e853Sbellard         return 1;
21809fa3e853Sbellard     }
2181c8a706feSpbrook     mmap_unlock();
21829fa3e853Sbellard     return 0;
21839fa3e853Sbellard }
21849fa3e853Sbellard #endif /* defined(CONFIG_USER_ONLY) */
218533417e70Sbellard 
2186e2eef170Spbrook #if !defined(CONFIG_USER_ONLY)
21878da3ff18Spbrook 
2188c04b2b78SPaul Brook #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2189c04b2b78SPaul Brook typedef struct subpage_t {
219070c68e44SAvi Kivity     MemoryRegion iomem;
2191c04b2b78SPaul Brook     target_phys_addr_t base;
21925312bd8bSAvi Kivity     uint16_t sub_section[TARGET_PAGE_SIZE];
2193c04b2b78SPaul Brook } subpage_t;
2194c04b2b78SPaul Brook 
2195c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
21965312bd8bSAvi Kivity                              uint16_t section);
21970f0cb164SAvi Kivity static subpage_t *subpage_init(target_phys_addr_t base);
21985312bd8bSAvi Kivity static void destroy_page_desc(uint16_t section_index)
219954688b1eSAvi Kivity {
22005312bd8bSAvi Kivity     MemoryRegionSection *section = &phys_sections[section_index];
22015312bd8bSAvi Kivity     MemoryRegion *mr = section->mr;
220254688b1eSAvi Kivity 
220354688b1eSAvi Kivity     if (mr->subpage) {
220454688b1eSAvi Kivity         subpage_t *subpage = container_of(mr, subpage_t, iomem);
220554688b1eSAvi Kivity         memory_region_destroy(&subpage->iomem);
220654688b1eSAvi Kivity         g_free(subpage);
220754688b1eSAvi Kivity     }
220854688b1eSAvi Kivity }
220954688b1eSAvi Kivity 
22104346ae3eSAvi Kivity static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
221154688b1eSAvi Kivity {
221254688b1eSAvi Kivity     unsigned i;
2213d6f2ea22SAvi Kivity     PhysPageEntry *p;
221454688b1eSAvi Kivity 
2215c19e8800SAvi Kivity     if (lp->ptr == PHYS_MAP_NODE_NIL) {
221654688b1eSAvi Kivity         return;
221754688b1eSAvi Kivity     }
221854688b1eSAvi Kivity 
2219c19e8800SAvi Kivity     p = phys_map_nodes[lp->ptr];
222054688b1eSAvi Kivity     for (i = 0; i < L2_SIZE; ++i) {
222107f07b31SAvi Kivity         if (!p[i].is_leaf) {
222254688b1eSAvi Kivity             destroy_l2_mapping(&p[i], level - 1);
22234346ae3eSAvi Kivity         } else {
2224c19e8800SAvi Kivity             destroy_page_desc(p[i].ptr);
22254346ae3eSAvi Kivity         }
222654688b1eSAvi Kivity     }
222707f07b31SAvi Kivity     lp->is_leaf = 0;
2228c19e8800SAvi Kivity     lp->ptr = PHYS_MAP_NODE_NIL;
222954688b1eSAvi Kivity }
223054688b1eSAvi Kivity 
223154688b1eSAvi Kivity static void destroy_all_mappings(void)
223254688b1eSAvi Kivity {
22333eef53dfSAvi Kivity     destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
2234d6f2ea22SAvi Kivity     phys_map_nodes_reset();
223554688b1eSAvi Kivity }
223654688b1eSAvi Kivity 
22375312bd8bSAvi Kivity static uint16_t phys_section_add(MemoryRegionSection *section)
22385312bd8bSAvi Kivity {
22395312bd8bSAvi Kivity     if (phys_sections_nb == phys_sections_nb_alloc) {
22405312bd8bSAvi Kivity         phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
22415312bd8bSAvi Kivity         phys_sections = g_renew(MemoryRegionSection, phys_sections,
22425312bd8bSAvi Kivity                                 phys_sections_nb_alloc);
22435312bd8bSAvi Kivity     }
22445312bd8bSAvi Kivity     phys_sections[phys_sections_nb] = *section;
22455312bd8bSAvi Kivity     return phys_sections_nb++;
22465312bd8bSAvi Kivity }
22475312bd8bSAvi Kivity 
22485312bd8bSAvi Kivity static void phys_sections_clear(void)
22495312bd8bSAvi Kivity {
22505312bd8bSAvi Kivity     phys_sections_nb = 0;
22515312bd8bSAvi Kivity }
22525312bd8bSAvi Kivity 
22530f0cb164SAvi Kivity static void register_subpage(MemoryRegionSection *section)
22540f0cb164SAvi Kivity {
22550f0cb164SAvi Kivity     subpage_t *subpage;
22560f0cb164SAvi Kivity     target_phys_addr_t base = section->offset_within_address_space
22570f0cb164SAvi Kivity         & TARGET_PAGE_MASK;
2258f3705d53SAvi Kivity     MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
22590f0cb164SAvi Kivity     MemoryRegionSection subsection = {
22600f0cb164SAvi Kivity         .offset_within_address_space = base,
22610f0cb164SAvi Kivity         .size = TARGET_PAGE_SIZE,
22620f0cb164SAvi Kivity     };
22630f0cb164SAvi Kivity     target_phys_addr_t start, end;
22640f0cb164SAvi Kivity 
2265f3705d53SAvi Kivity     assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
22660f0cb164SAvi Kivity 
2267f3705d53SAvi Kivity     if (!(existing->mr->subpage)) {
22680f0cb164SAvi Kivity         subpage = subpage_init(base);
22690f0cb164SAvi Kivity         subsection.mr = &subpage->iomem;
22702999097bSAvi Kivity         phys_page_set(base >> TARGET_PAGE_BITS, 1,
22712999097bSAvi Kivity                       phys_section_add(&subsection));
22720f0cb164SAvi Kivity     } else {
2273f3705d53SAvi Kivity         subpage = container_of(existing->mr, subpage_t, iomem);
22740f0cb164SAvi Kivity     }
22750f0cb164SAvi Kivity     start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2276adb2a9b5STyler Hall     end = start + section->size - 1;
22770f0cb164SAvi Kivity     subpage_register(subpage, start, end, phys_section_add(section));
22780f0cb164SAvi Kivity }
22790f0cb164SAvi Kivity 
22800f0cb164SAvi Kivity 
22810f0cb164SAvi Kivity static void register_multipage(MemoryRegionSection *section)
228233417e70Sbellard {
2283dd81124bSAvi Kivity     target_phys_addr_t start_addr = section->offset_within_address_space;
2284dd81124bSAvi Kivity     ram_addr_t size = section->size;
22852999097bSAvi Kivity     target_phys_addr_t addr;
22865312bd8bSAvi Kivity     uint16_t section_index = phys_section_add(section);
2287dd81124bSAvi Kivity 
22883b8e6a2dSEdgar E. Iglesias     assert(size);
2289f6f3fbcaSMichael S. Tsirkin 
22903b8e6a2dSEdgar E. Iglesias     addr = start_addr;
22912999097bSAvi Kivity     phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
22922999097bSAvi Kivity                   section_index);
229333417e70Sbellard }
229433417e70Sbellard 
22950f0cb164SAvi Kivity void cpu_register_physical_memory_log(MemoryRegionSection *section,
22960f0cb164SAvi Kivity                                       bool readonly)
22970f0cb164SAvi Kivity {
22980f0cb164SAvi Kivity     MemoryRegionSection now = *section, remain = *section;
22990f0cb164SAvi Kivity 
23000f0cb164SAvi Kivity     if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
23010f0cb164SAvi Kivity         || (now.size < TARGET_PAGE_SIZE)) {
23020f0cb164SAvi Kivity         now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
23030f0cb164SAvi Kivity                        - now.offset_within_address_space,
23040f0cb164SAvi Kivity                        now.size);
23050f0cb164SAvi Kivity         register_subpage(&now);
23060f0cb164SAvi Kivity         remain.size -= now.size;
23070f0cb164SAvi Kivity         remain.offset_within_address_space += now.size;
23080f0cb164SAvi Kivity         remain.offset_within_region += now.size;
23090f0cb164SAvi Kivity     }
231069b67646STyler Hall     while (remain.size >= TARGET_PAGE_SIZE) {
23110f0cb164SAvi Kivity         now = remain;
231269b67646STyler Hall         if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
231369b67646STyler Hall             now.size = TARGET_PAGE_SIZE;
231469b67646STyler Hall             register_subpage(&now);
231569b67646STyler Hall         } else {
23160f0cb164SAvi Kivity             now.size &= TARGET_PAGE_MASK;
23170f0cb164SAvi Kivity             register_multipage(&now);
231869b67646STyler Hall         }
23190f0cb164SAvi Kivity         remain.size -= now.size;
23200f0cb164SAvi Kivity         remain.offset_within_address_space += now.size;
23210f0cb164SAvi Kivity         remain.offset_within_region += now.size;
23220f0cb164SAvi Kivity     }
23230f0cb164SAvi Kivity     now = remain;
23240f0cb164SAvi Kivity     if (now.size) {
23250f0cb164SAvi Kivity         register_subpage(&now);
23260f0cb164SAvi Kivity     }
23270f0cb164SAvi Kivity }
23280f0cb164SAvi Kivity 
23290f0cb164SAvi Kivity 
2330c227f099SAnthony Liguori void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2331f65ed4c1Saliguori {
2332f65ed4c1Saliguori     if (kvm_enabled())
2333f65ed4c1Saliguori         kvm_coalesce_mmio_region(addr, size);
2334f65ed4c1Saliguori }
2335f65ed4c1Saliguori 
2336c227f099SAnthony Liguori void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2337f65ed4c1Saliguori {
2338f65ed4c1Saliguori     if (kvm_enabled())
2339f65ed4c1Saliguori         kvm_uncoalesce_mmio_region(addr, size);
2340f65ed4c1Saliguori }
2341f65ed4c1Saliguori 
234262a2744cSSheng Yang void qemu_flush_coalesced_mmio_buffer(void)
234362a2744cSSheng Yang {
234462a2744cSSheng Yang     if (kvm_enabled())
234562a2744cSSheng Yang         kvm_flush_coalesced_mmio_buffer();
234662a2744cSSheng Yang }
234762a2744cSSheng Yang 
2348c902760fSMarcelo Tosatti #if defined(__linux__) && !defined(TARGET_S390X)
2349c902760fSMarcelo Tosatti 
2350c902760fSMarcelo Tosatti #include <sys/vfs.h>
2351c902760fSMarcelo Tosatti 
2352c902760fSMarcelo Tosatti #define HUGETLBFS_MAGIC       0x958458f6
2353c902760fSMarcelo Tosatti 
2354c902760fSMarcelo Tosatti static long gethugepagesize(const char *path)
2355c902760fSMarcelo Tosatti {
2356c902760fSMarcelo Tosatti     struct statfs fs;
2357c902760fSMarcelo Tosatti     int ret;
2358c902760fSMarcelo Tosatti 
2359c902760fSMarcelo Tosatti     do {
2360c902760fSMarcelo Tosatti         ret = statfs(path, &fs);
2361c902760fSMarcelo Tosatti     } while (ret != 0 && errno == EINTR);
2362c902760fSMarcelo Tosatti 
2363c902760fSMarcelo Tosatti     if (ret != 0) {
23646adc0549SMichael Tokarev         perror(path);
2365c902760fSMarcelo Tosatti         return 0;
2366c902760fSMarcelo Tosatti     }
2367c902760fSMarcelo Tosatti 
2368c902760fSMarcelo Tosatti     if (fs.f_type != HUGETLBFS_MAGIC)
2369c902760fSMarcelo Tosatti         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2370c902760fSMarcelo Tosatti 
2371c902760fSMarcelo Tosatti     return fs.f_bsize;
2372c902760fSMarcelo Tosatti }
2373c902760fSMarcelo Tosatti 
237404b16653SAlex Williamson static void *file_ram_alloc(RAMBlock *block,
237504b16653SAlex Williamson                             ram_addr_t memory,
237604b16653SAlex Williamson                             const char *path)
2377c902760fSMarcelo Tosatti {
2378c902760fSMarcelo Tosatti     char *filename;
2379c902760fSMarcelo Tosatti     void *area;
2380c902760fSMarcelo Tosatti     int fd;
2381c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2382c902760fSMarcelo Tosatti     int flags;
2383c902760fSMarcelo Tosatti #endif
2384c902760fSMarcelo Tosatti     unsigned long hpagesize;
2385c902760fSMarcelo Tosatti 
2386c902760fSMarcelo Tosatti     hpagesize = gethugepagesize(path);
2387c902760fSMarcelo Tosatti     if (!hpagesize) {
2388c902760fSMarcelo Tosatti         return NULL;
2389c902760fSMarcelo Tosatti     }
2390c902760fSMarcelo Tosatti 
2391c902760fSMarcelo Tosatti     if (memory < hpagesize) {
2392c902760fSMarcelo Tosatti         return NULL;
2393c902760fSMarcelo Tosatti     }
2394c902760fSMarcelo Tosatti 
2395c902760fSMarcelo Tosatti     if (kvm_enabled() && !kvm_has_sync_mmu()) {
2396c902760fSMarcelo Tosatti         fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2397c902760fSMarcelo Tosatti         return NULL;
2398c902760fSMarcelo Tosatti     }
2399c902760fSMarcelo Tosatti 
2400c902760fSMarcelo Tosatti     if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2401c902760fSMarcelo Tosatti         return NULL;
2402c902760fSMarcelo Tosatti     }
2403c902760fSMarcelo Tosatti 
2404c902760fSMarcelo Tosatti     fd = mkstemp(filename);
2405c902760fSMarcelo Tosatti     if (fd < 0) {
24066adc0549SMichael Tokarev         perror("unable to create backing store for hugepages");
2407c902760fSMarcelo Tosatti         free(filename);
2408c902760fSMarcelo Tosatti         return NULL;
2409c902760fSMarcelo Tosatti     }
2410c902760fSMarcelo Tosatti     unlink(filename);
2411c902760fSMarcelo Tosatti     free(filename);
2412c902760fSMarcelo Tosatti 
2413c902760fSMarcelo Tosatti     memory = (memory+hpagesize-1) & ~(hpagesize-1);
2414c902760fSMarcelo Tosatti 
2415c902760fSMarcelo Tosatti     /*
2416c902760fSMarcelo Tosatti      * ftruncate is not supported by hugetlbfs in older
2417c902760fSMarcelo Tosatti      * hosts, so don't bother bailing out on errors.
2418c902760fSMarcelo Tosatti      * If anything goes wrong with it under other filesystems,
2419c902760fSMarcelo Tosatti      * mmap will fail.
2420c902760fSMarcelo Tosatti      */
2421c902760fSMarcelo Tosatti     if (ftruncate(fd, memory))
2422c902760fSMarcelo Tosatti         perror("ftruncate");
2423c902760fSMarcelo Tosatti 
2424c902760fSMarcelo Tosatti #ifdef MAP_POPULATE
2425c902760fSMarcelo Tosatti     /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2426c902760fSMarcelo Tosatti      * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2427c902760fSMarcelo Tosatti      * to sidestep this quirk.
2428c902760fSMarcelo Tosatti      */
2429c902760fSMarcelo Tosatti     flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2430c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2431c902760fSMarcelo Tosatti #else
2432c902760fSMarcelo Tosatti     area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2433c902760fSMarcelo Tosatti #endif
2434c902760fSMarcelo Tosatti     if (area == MAP_FAILED) {
2435c902760fSMarcelo Tosatti         perror("file_ram_alloc: can't mmap RAM pages");
2436c902760fSMarcelo Tosatti         close(fd);
2437c902760fSMarcelo Tosatti         return (NULL);
2438c902760fSMarcelo Tosatti     }
243904b16653SAlex Williamson     block->fd = fd;
2440c902760fSMarcelo Tosatti     return area;
2441c902760fSMarcelo Tosatti }
2442c902760fSMarcelo Tosatti #endif
2443c902760fSMarcelo Tosatti 
2444d17b5288SAlex Williamson static ram_addr_t find_ram_offset(ram_addr_t size)
2445d17b5288SAlex Williamson {
244604b16653SAlex Williamson     RAMBlock *block, *next_block;
24473e837b2cSAlex Williamson     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
244804b16653SAlex Williamson 
244904b16653SAlex Williamson     if (QLIST_EMPTY(&ram_list.blocks))
245004b16653SAlex Williamson         return 0;
245104b16653SAlex Williamson 
245204b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
2453f15fbc4bSAnthony PERARD         ram_addr_t end, next = RAM_ADDR_MAX;
245404b16653SAlex Williamson 
245504b16653SAlex Williamson         end = block->offset + block->length;
245604b16653SAlex Williamson 
245704b16653SAlex Williamson         QLIST_FOREACH(next_block, &ram_list.blocks, next) {
245804b16653SAlex Williamson             if (next_block->offset >= end) {
245904b16653SAlex Williamson                 next = MIN(next, next_block->offset);
246004b16653SAlex Williamson             }
246104b16653SAlex Williamson         }
246204b16653SAlex Williamson         if (next - end >= size && next - end < mingap) {
246304b16653SAlex Williamson             offset = end;
246404b16653SAlex Williamson             mingap = next - end;
246504b16653SAlex Williamson         }
246604b16653SAlex Williamson     }
24673e837b2cSAlex Williamson 
24683e837b2cSAlex Williamson     if (offset == RAM_ADDR_MAX) {
24693e837b2cSAlex Williamson         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
24703e837b2cSAlex Williamson                 (uint64_t)size);
24713e837b2cSAlex Williamson         abort();
24723e837b2cSAlex Williamson     }
24733e837b2cSAlex Williamson 
247404b16653SAlex Williamson     return offset;
247504b16653SAlex Williamson }
247604b16653SAlex Williamson 
247704b16653SAlex Williamson static ram_addr_t last_ram_offset(void)
247804b16653SAlex Williamson {
2479d17b5288SAlex Williamson     RAMBlock *block;
2480d17b5288SAlex Williamson     ram_addr_t last = 0;
2481d17b5288SAlex Williamson 
2482d17b5288SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next)
2483d17b5288SAlex Williamson         last = MAX(last, block->offset + block->length);
2484d17b5288SAlex Williamson 
2485d17b5288SAlex Williamson     return last;
2486d17b5288SAlex Williamson }
2487d17b5288SAlex Williamson 
2488ddb97f1dSJason Baron static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2489ddb97f1dSJason Baron {
2490ddb97f1dSJason Baron     int ret;
2491ddb97f1dSJason Baron     QemuOpts *machine_opts;
2492ddb97f1dSJason Baron 
2493ddb97f1dSJason Baron     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2494ddb97f1dSJason Baron     machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2495ddb97f1dSJason Baron     if (machine_opts &&
2496ddb97f1dSJason Baron         !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2497ddb97f1dSJason Baron         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2498ddb97f1dSJason Baron         if (ret) {
2499ddb97f1dSJason Baron             perror("qemu_madvise");
2500ddb97f1dSJason Baron             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2501ddb97f1dSJason Baron                             "but dump_guest_core=off specified\n");
2502ddb97f1dSJason Baron         }
2503ddb97f1dSJason Baron     }
2504ddb97f1dSJason Baron }
2505ddb97f1dSJason Baron 
2506c5705a77SAvi Kivity void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
250784b89d78SCam Macdonell {
250884b89d78SCam Macdonell     RAMBlock *new_block, *block;
250984b89d78SCam Macdonell 
2510c5705a77SAvi Kivity     new_block = NULL;
2511c5705a77SAvi Kivity     QLIST_FOREACH(block, &ram_list.blocks, next) {
2512c5705a77SAvi Kivity         if (block->offset == addr) {
2513c5705a77SAvi Kivity             new_block = block;
2514c5705a77SAvi Kivity             break;
2515c5705a77SAvi Kivity         }
2516c5705a77SAvi Kivity     }
2517c5705a77SAvi Kivity     assert(new_block);
2518c5705a77SAvi Kivity     assert(!new_block->idstr[0]);
251984b89d78SCam Macdonell 
252009e5ab63SAnthony Liguori     if (dev) {
252109e5ab63SAnthony Liguori         char *id = qdev_get_dev_path(dev);
252284b89d78SCam Macdonell         if (id) {
252384b89d78SCam Macdonell             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
25247267c094SAnthony Liguori             g_free(id);
252584b89d78SCam Macdonell         }
252684b89d78SCam Macdonell     }
252784b89d78SCam Macdonell     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
252884b89d78SCam Macdonell 
252984b89d78SCam Macdonell     QLIST_FOREACH(block, &ram_list.blocks, next) {
2530c5705a77SAvi Kivity         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
253184b89d78SCam Macdonell             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
253284b89d78SCam Macdonell                     new_block->idstr);
253384b89d78SCam Macdonell             abort();
253484b89d78SCam Macdonell         }
253584b89d78SCam Macdonell     }
2536c5705a77SAvi Kivity }
2537c5705a77SAvi Kivity 
25388490fc78SLuiz Capitulino static int memory_try_enable_merging(void *addr, size_t len)
25398490fc78SLuiz Capitulino {
25408490fc78SLuiz Capitulino     QemuOpts *opts;
25418490fc78SLuiz Capitulino 
25428490fc78SLuiz Capitulino     opts = qemu_opts_find(qemu_find_opts("machine"), 0);
25438490fc78SLuiz Capitulino     if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
25448490fc78SLuiz Capitulino         /* disabled by the user */
25458490fc78SLuiz Capitulino         return 0;
25468490fc78SLuiz Capitulino     }
25478490fc78SLuiz Capitulino 
25488490fc78SLuiz Capitulino     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
25498490fc78SLuiz Capitulino }
25508490fc78SLuiz Capitulino 
2551c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2552c5705a77SAvi Kivity                                    MemoryRegion *mr)
2553c5705a77SAvi Kivity {
2554c5705a77SAvi Kivity     RAMBlock *new_block;
2555c5705a77SAvi Kivity 
2556c5705a77SAvi Kivity     size = TARGET_PAGE_ALIGN(size);
2557c5705a77SAvi Kivity     new_block = g_malloc0(sizeof(*new_block));
255884b89d78SCam Macdonell 
25597c637366SAvi Kivity     new_block->mr = mr;
2560432d268cSJun Nakajima     new_block->offset = find_ram_offset(size);
25616977dfe6SYoshiaki Tamura     if (host) {
256284b89d78SCam Macdonell         new_block->host = host;
2563cd19cfa2SHuang Ying         new_block->flags |= RAM_PREALLOC_MASK;
25646977dfe6SYoshiaki Tamura     } else {
2565c902760fSMarcelo Tosatti         if (mem_path) {
2566c902760fSMarcelo Tosatti #if defined (__linux__) && !defined(TARGET_S390X)
256704b16653SAlex Williamson             new_block->host = file_ram_alloc(new_block, size, mem_path);
2568618a568dSMarcelo Tosatti             if (!new_block->host) {
2569618a568dSMarcelo Tosatti                 new_block->host = qemu_vmalloc(size);
25708490fc78SLuiz Capitulino                 memory_try_enable_merging(new_block->host, size);
2571618a568dSMarcelo Tosatti             }
2572c902760fSMarcelo Tosatti #else
2573c902760fSMarcelo Tosatti             fprintf(stderr, "-mem-path option unsupported\n");
2574c902760fSMarcelo Tosatti             exit(1);
2575c902760fSMarcelo Tosatti #endif
2576c902760fSMarcelo Tosatti         } else {
2577868bb33fSJan Kiszka             if (xen_enabled()) {
2578fce537d4SAvi Kivity                 xen_ram_alloc(new_block->offset, size, mr);
2579fdec9918SChristian Borntraeger             } else if (kvm_enabled()) {
2580fdec9918SChristian Borntraeger                 /* some s390/kvm configurations have special constraints */
2581fdec9918SChristian Borntraeger                 new_block->host = kvm_vmalloc(size);
2582432d268cSJun Nakajima             } else {
258394a6b54fSpbrook                 new_block->host = qemu_vmalloc(size);
2584432d268cSJun Nakajima             }
25858490fc78SLuiz Capitulino             memory_try_enable_merging(new_block->host, size);
2586c902760fSMarcelo Tosatti         }
25876977dfe6SYoshiaki Tamura     }
258894a6b54fSpbrook     new_block->length = size;
258994a6b54fSpbrook 
2590f471a17eSAlex Williamson     QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
259194a6b54fSpbrook 
25927267c094SAnthony Liguori     ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
259304b16653SAlex Williamson                                        last_ram_offset() >> TARGET_PAGE_BITS);
25945fda043fSIgor Mitsyanko     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
25955fda043fSIgor Mitsyanko            0, size >> TARGET_PAGE_BITS);
25961720aeeeSJuan Quintela     cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
259794a6b54fSpbrook 
2598ddb97f1dSJason Baron     qemu_ram_setup_dump(new_block->host, size);
2599ddb97f1dSJason Baron 
26006f0437e8SJan Kiszka     if (kvm_enabled())
26016f0437e8SJan Kiszka         kvm_setup_guest_memory(new_block->host, size);
26026f0437e8SJan Kiszka 
260394a6b54fSpbrook     return new_block->offset;
260494a6b54fSpbrook }
2605e9a1ab19Sbellard 
2606c5705a77SAvi Kivity ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
26076977dfe6SYoshiaki Tamura {
2608c5705a77SAvi Kivity     return qemu_ram_alloc_from_ptr(size, NULL, mr);
26096977dfe6SYoshiaki Tamura }
26106977dfe6SYoshiaki Tamura 
26111f2e98b6SAlex Williamson void qemu_ram_free_from_ptr(ram_addr_t addr)
26121f2e98b6SAlex Williamson {
26131f2e98b6SAlex Williamson     RAMBlock *block;
26141f2e98b6SAlex Williamson 
26151f2e98b6SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
26161f2e98b6SAlex Williamson         if (addr == block->offset) {
26171f2e98b6SAlex Williamson             QLIST_REMOVE(block, next);
26187267c094SAnthony Liguori             g_free(block);
26191f2e98b6SAlex Williamson             return;
26201f2e98b6SAlex Williamson         }
26211f2e98b6SAlex Williamson     }
26221f2e98b6SAlex Williamson }
26231f2e98b6SAlex Williamson 
2624c227f099SAnthony Liguori void qemu_ram_free(ram_addr_t addr)
2625e9a1ab19Sbellard {
262604b16653SAlex Williamson     RAMBlock *block;
262704b16653SAlex Williamson 
262804b16653SAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
262904b16653SAlex Williamson         if (addr == block->offset) {
263004b16653SAlex Williamson             QLIST_REMOVE(block, next);
2631cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
2632cd19cfa2SHuang Ying                 ;
2633cd19cfa2SHuang Ying             } else if (mem_path) {
263404b16653SAlex Williamson #if defined (__linux__) && !defined(TARGET_S390X)
263504b16653SAlex Williamson                 if (block->fd) {
263604b16653SAlex Williamson                     munmap(block->host, block->length);
263704b16653SAlex Williamson                     close(block->fd);
263804b16653SAlex Williamson                 } else {
263904b16653SAlex Williamson                     qemu_vfree(block->host);
264004b16653SAlex Williamson                 }
2641fd28aa13SJan Kiszka #else
2642fd28aa13SJan Kiszka                 abort();
264304b16653SAlex Williamson #endif
264404b16653SAlex Williamson             } else {
264504b16653SAlex Williamson #if defined(TARGET_S390X) && defined(CONFIG_KVM)
264604b16653SAlex Williamson                 munmap(block->host, block->length);
264704b16653SAlex Williamson #else
2648868bb33fSJan Kiszka                 if (xen_enabled()) {
2649e41d7c69SJan Kiszka                     xen_invalidate_map_cache_entry(block->host);
2650432d268cSJun Nakajima                 } else {
265104b16653SAlex Williamson                     qemu_vfree(block->host);
2652432d268cSJun Nakajima                 }
265304b16653SAlex Williamson #endif
265404b16653SAlex Williamson             }
26557267c094SAnthony Liguori             g_free(block);
265604b16653SAlex Williamson             return;
265704b16653SAlex Williamson         }
265804b16653SAlex Williamson     }
265904b16653SAlex Williamson 
2660e9a1ab19Sbellard }
2661e9a1ab19Sbellard 
2662cd19cfa2SHuang Ying #ifndef _WIN32
2663cd19cfa2SHuang Ying void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2664cd19cfa2SHuang Ying {
2665cd19cfa2SHuang Ying     RAMBlock *block;
2666cd19cfa2SHuang Ying     ram_addr_t offset;
2667cd19cfa2SHuang Ying     int flags;
2668cd19cfa2SHuang Ying     void *area, *vaddr;
2669cd19cfa2SHuang Ying 
2670cd19cfa2SHuang Ying     QLIST_FOREACH(block, &ram_list.blocks, next) {
2671cd19cfa2SHuang Ying         offset = addr - block->offset;
2672cd19cfa2SHuang Ying         if (offset < block->length) {
2673cd19cfa2SHuang Ying             vaddr = block->host + offset;
2674cd19cfa2SHuang Ying             if (block->flags & RAM_PREALLOC_MASK) {
2675cd19cfa2SHuang Ying                 ;
2676cd19cfa2SHuang Ying             } else {
2677cd19cfa2SHuang Ying                 flags = MAP_FIXED;
2678cd19cfa2SHuang Ying                 munmap(vaddr, length);
2679cd19cfa2SHuang Ying                 if (mem_path) {
2680cd19cfa2SHuang Ying #if defined(__linux__) && !defined(TARGET_S390X)
2681cd19cfa2SHuang Ying                     if (block->fd) {
2682cd19cfa2SHuang Ying #ifdef MAP_POPULATE
2683cd19cfa2SHuang Ying                         flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2684cd19cfa2SHuang Ying                             MAP_PRIVATE;
2685cd19cfa2SHuang Ying #else
2686cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE;
2687cd19cfa2SHuang Ying #endif
2688cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2689cd19cfa2SHuang Ying                                     flags, block->fd, offset);
2690cd19cfa2SHuang Ying                     } else {
2691cd19cfa2SHuang Ying                         flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2692cd19cfa2SHuang Ying                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2693cd19cfa2SHuang Ying                                     flags, -1, 0);
2694cd19cfa2SHuang Ying                     }
2695fd28aa13SJan Kiszka #else
2696fd28aa13SJan Kiszka                     abort();
2697cd19cfa2SHuang Ying #endif
2698cd19cfa2SHuang Ying                 } else {
2699cd19cfa2SHuang Ying #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2700cd19cfa2SHuang Ying                     flags |= MAP_SHARED | MAP_ANONYMOUS;
2701cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2702cd19cfa2SHuang Ying                                 flags, -1, 0);
2703cd19cfa2SHuang Ying #else
2704cd19cfa2SHuang Ying                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2705cd19cfa2SHuang Ying                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2706cd19cfa2SHuang Ying                                 flags, -1, 0);
2707cd19cfa2SHuang Ying #endif
2708cd19cfa2SHuang Ying                 }
2709cd19cfa2SHuang Ying                 if (area != vaddr) {
2710f15fbc4bSAnthony PERARD                     fprintf(stderr, "Could not remap addr: "
2711f15fbc4bSAnthony PERARD                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2712cd19cfa2SHuang Ying                             length, addr);
2713cd19cfa2SHuang Ying                     exit(1);
2714cd19cfa2SHuang Ying                 }
27158490fc78SLuiz Capitulino                 memory_try_enable_merging(vaddr, length);
2716ddb97f1dSJason Baron                 qemu_ram_setup_dump(vaddr, length);
2717cd19cfa2SHuang Ying             }
2718cd19cfa2SHuang Ying             return;
2719cd19cfa2SHuang Ying         }
2720cd19cfa2SHuang Ying     }
2721cd19cfa2SHuang Ying }
2722cd19cfa2SHuang Ying #endif /* !_WIN32 */
2723cd19cfa2SHuang Ying 
2724dc828ca1Spbrook /* Return a host pointer to ram allocated with qemu_ram_alloc.
27255579c7f3Spbrook    With the exception of the softmmu code in this file, this should
27265579c7f3Spbrook    only be used for local memory (e.g. video ram) that the device owns,
27275579c7f3Spbrook    and knows it isn't going to access beyond the end of the block.
27285579c7f3Spbrook 
27295579c7f3Spbrook    It should not be used for general purpose DMA.
27305579c7f3Spbrook    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
27315579c7f3Spbrook  */
2732c227f099SAnthony Liguori void *qemu_get_ram_ptr(ram_addr_t addr)
2733dc828ca1Spbrook {
273494a6b54fSpbrook     RAMBlock *block;
273594a6b54fSpbrook 
2736f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
2737f471a17eSAlex Williamson         if (addr - block->offset < block->length) {
27387d82af38SVincent Palatin             /* Move this entry to to start of the list.  */
27397d82af38SVincent Palatin             if (block != QLIST_FIRST(&ram_list.blocks)) {
2740f471a17eSAlex Williamson                 QLIST_REMOVE(block, next);
2741f471a17eSAlex Williamson                 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
27427d82af38SVincent Palatin             }
2743868bb33fSJan Kiszka             if (xen_enabled()) {
2744432d268cSJun Nakajima                 /* We need to check if the requested address is in the RAM
2745432d268cSJun Nakajima                  * because we don't want to map the entire memory in QEMU.
2746712c2b41SStefano Stabellini                  * In that case just map until the end of the page.
2747432d268cSJun Nakajima                  */
2748432d268cSJun Nakajima                 if (block->offset == 0) {
2749e41d7c69SJan Kiszka                     return xen_map_cache(addr, 0, 0);
2750432d268cSJun Nakajima                 } else if (block->host == NULL) {
2751e41d7c69SJan Kiszka                     block->host =
2752e41d7c69SJan Kiszka                         xen_map_cache(block->offset, block->length, 1);
2753432d268cSJun Nakajima                 }
2754432d268cSJun Nakajima             }
2755f471a17eSAlex Williamson             return block->host + (addr - block->offset);
275694a6b54fSpbrook         }
2757f471a17eSAlex Williamson     }
2758f471a17eSAlex Williamson 
275994a6b54fSpbrook     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
276094a6b54fSpbrook     abort();
2761f471a17eSAlex Williamson 
2762f471a17eSAlex Williamson     return NULL;
2763dc828ca1Spbrook }
2764dc828ca1Spbrook 
2765b2e0a138SMichael S. Tsirkin /* Return a host pointer to ram allocated with qemu_ram_alloc.
2766b2e0a138SMichael S. Tsirkin  * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2767b2e0a138SMichael S. Tsirkin  */
2768b2e0a138SMichael S. Tsirkin void *qemu_safe_ram_ptr(ram_addr_t addr)
2769b2e0a138SMichael S. Tsirkin {
2770b2e0a138SMichael S. Tsirkin     RAMBlock *block;
2771b2e0a138SMichael S. Tsirkin 
2772b2e0a138SMichael S. Tsirkin     QLIST_FOREACH(block, &ram_list.blocks, next) {
2773b2e0a138SMichael S. Tsirkin         if (addr - block->offset < block->length) {
2774868bb33fSJan Kiszka             if (xen_enabled()) {
2775432d268cSJun Nakajima                 /* We need to check if the requested address is in the RAM
2776432d268cSJun Nakajima                  * because we don't want to map the entire memory in QEMU.
2777712c2b41SStefano Stabellini                  * In that case just map until the end of the page.
2778432d268cSJun Nakajima                  */
2779432d268cSJun Nakajima                 if (block->offset == 0) {
2780e41d7c69SJan Kiszka                     return xen_map_cache(addr, 0, 0);
2781432d268cSJun Nakajima                 } else if (block->host == NULL) {
2782e41d7c69SJan Kiszka                     block->host =
2783e41d7c69SJan Kiszka                         xen_map_cache(block->offset, block->length, 1);
2784432d268cSJun Nakajima                 }
2785432d268cSJun Nakajima             }
2786b2e0a138SMichael S. Tsirkin             return block->host + (addr - block->offset);
2787b2e0a138SMichael S. Tsirkin         }
2788b2e0a138SMichael S. Tsirkin     }
2789b2e0a138SMichael S. Tsirkin 
2790b2e0a138SMichael S. Tsirkin     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2791b2e0a138SMichael S. Tsirkin     abort();
2792b2e0a138SMichael S. Tsirkin 
2793b2e0a138SMichael S. Tsirkin     return NULL;
2794b2e0a138SMichael S. Tsirkin }
2795b2e0a138SMichael S. Tsirkin 
279638bee5dcSStefano Stabellini /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
279738bee5dcSStefano Stabellini  * but takes a size argument */
27988ab934f9SStefano Stabellini void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
279938bee5dcSStefano Stabellini {
28008ab934f9SStefano Stabellini     if (*size == 0) {
28018ab934f9SStefano Stabellini         return NULL;
28028ab934f9SStefano Stabellini     }
2803868bb33fSJan Kiszka     if (xen_enabled()) {
2804e41d7c69SJan Kiszka         return xen_map_cache(addr, *size, 1);
2805868bb33fSJan Kiszka     } else {
280638bee5dcSStefano Stabellini         RAMBlock *block;
280738bee5dcSStefano Stabellini 
280838bee5dcSStefano Stabellini         QLIST_FOREACH(block, &ram_list.blocks, next) {
280938bee5dcSStefano Stabellini             if (addr - block->offset < block->length) {
281038bee5dcSStefano Stabellini                 if (addr - block->offset + *size > block->length)
281138bee5dcSStefano Stabellini                     *size = block->length - addr + block->offset;
281238bee5dcSStefano Stabellini                 return block->host + (addr - block->offset);
281338bee5dcSStefano Stabellini             }
281438bee5dcSStefano Stabellini         }
281538bee5dcSStefano Stabellini 
281638bee5dcSStefano Stabellini         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
281738bee5dcSStefano Stabellini         abort();
281838bee5dcSStefano Stabellini     }
281938bee5dcSStefano Stabellini }
282038bee5dcSStefano Stabellini 
2821050a0ddfSAnthony PERARD void qemu_put_ram_ptr(void *addr)
2822050a0ddfSAnthony PERARD {
2823050a0ddfSAnthony PERARD     trace_qemu_put_ram_ptr(addr);
2824050a0ddfSAnthony PERARD }
2825050a0ddfSAnthony PERARD 
2826e890261fSMarcelo Tosatti int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
28275579c7f3Spbrook {
282894a6b54fSpbrook     RAMBlock *block;
282994a6b54fSpbrook     uint8_t *host = ptr;
283094a6b54fSpbrook 
2831868bb33fSJan Kiszka     if (xen_enabled()) {
2832e41d7c69SJan Kiszka         *ram_addr = xen_ram_addr_from_mapcache(ptr);
2833712c2b41SStefano Stabellini         return 0;
2834712c2b41SStefano Stabellini     }
2835712c2b41SStefano Stabellini 
2836f471a17eSAlex Williamson     QLIST_FOREACH(block, &ram_list.blocks, next) {
2837432d268cSJun Nakajima         /* This case append when the block is not mapped. */
2838432d268cSJun Nakajima         if (block->host == NULL) {
2839432d268cSJun Nakajima             continue;
2840432d268cSJun Nakajima         }
2841f471a17eSAlex Williamson         if (host - block->host < block->length) {
2842e890261fSMarcelo Tosatti             *ram_addr = block->offset + (host - block->host);
2843e890261fSMarcelo Tosatti             return 0;
284494a6b54fSpbrook         }
2845f471a17eSAlex Williamson     }
2846432d268cSJun Nakajima 
2847e890261fSMarcelo Tosatti     return -1;
2848e890261fSMarcelo Tosatti }
2849f471a17eSAlex Williamson 
2850e890261fSMarcelo Tosatti /* Some of the softmmu routines need to translate from a host pointer
2851e890261fSMarcelo Tosatti    (typically a TLB entry) back to a ram offset.  */
2852e890261fSMarcelo Tosatti ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2853e890261fSMarcelo Tosatti {
2854e890261fSMarcelo Tosatti     ram_addr_t ram_addr;
2855e890261fSMarcelo Tosatti 
2856e890261fSMarcelo Tosatti     if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
285794a6b54fSpbrook         fprintf(stderr, "Bad ram pointer %p\n", ptr);
285894a6b54fSpbrook         abort();
2859e890261fSMarcelo Tosatti     }
2860e890261fSMarcelo Tosatti     return ram_addr;
28615579c7f3Spbrook }
28625579c7f3Spbrook 
28630e0df1e2SAvi Kivity static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
28640e0df1e2SAvi Kivity                                     unsigned size)
286533417e70Sbellard {
286667d3b957Spbrook #ifdef DEBUG_UNASSIGNED
2867ab3d1727Sblueswir1     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
286867d3b957Spbrook #endif
28695b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
28700e0df1e2SAvi Kivity     cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
2871e18231a3Sblueswir1 #endif
2872e18231a3Sblueswir1     return 0;
2873e18231a3Sblueswir1 }
2874e18231a3Sblueswir1 
28750e0df1e2SAvi Kivity static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
28760e0df1e2SAvi Kivity                                  uint64_t val, unsigned size)
2877e18231a3Sblueswir1 {
2878e18231a3Sblueswir1 #ifdef DEBUG_UNASSIGNED
28790e0df1e2SAvi Kivity     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
2880e18231a3Sblueswir1 #endif
28815b450407SRichard Henderson #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
28820e0df1e2SAvi Kivity     cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
2883e18231a3Sblueswir1 #endif
2884e18231a3Sblueswir1 }
2885e18231a3Sblueswir1 
28860e0df1e2SAvi Kivity static const MemoryRegionOps unassigned_mem_ops = {
28870e0df1e2SAvi Kivity     .read = unassigned_mem_read,
28880e0df1e2SAvi Kivity     .write = unassigned_mem_write,
28890e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
289033417e70Sbellard };
289133417e70Sbellard 
28920e0df1e2SAvi Kivity static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
28930e0df1e2SAvi Kivity                                unsigned size)
28940e0df1e2SAvi Kivity {
28950e0df1e2SAvi Kivity     abort();
28960e0df1e2SAvi Kivity }
28970e0df1e2SAvi Kivity 
28980e0df1e2SAvi Kivity static void error_mem_write(void *opaque, target_phys_addr_t addr,
28990e0df1e2SAvi Kivity                             uint64_t value, unsigned size)
29000e0df1e2SAvi Kivity {
29010e0df1e2SAvi Kivity     abort();
29020e0df1e2SAvi Kivity }
29030e0df1e2SAvi Kivity 
29040e0df1e2SAvi Kivity static const MemoryRegionOps error_mem_ops = {
29050e0df1e2SAvi Kivity     .read = error_mem_read,
29060e0df1e2SAvi Kivity     .write = error_mem_write,
29070e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
290833417e70Sbellard };
290933417e70Sbellard 
29100e0df1e2SAvi Kivity static const MemoryRegionOps rom_mem_ops = {
29110e0df1e2SAvi Kivity     .read = error_mem_read,
29120e0df1e2SAvi Kivity     .write = unassigned_mem_write,
29130e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
29140e0df1e2SAvi Kivity };
29150e0df1e2SAvi Kivity 
29160e0df1e2SAvi Kivity static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
29170e0df1e2SAvi Kivity                                uint64_t val, unsigned size)
29181ccde1cbSbellard {
29193a7d929eSbellard     int dirty_flags;
2920f7c11b53SYoshiaki Tamura     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
29213a7d929eSbellard     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
29223a7d929eSbellard #if !defined(CONFIG_USER_ONLY)
29230e0df1e2SAvi Kivity         tb_invalidate_phys_page_fast(ram_addr, size);
2924f7c11b53SYoshiaki Tamura         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
29253a7d929eSbellard #endif
29263a7d929eSbellard     }
29270e0df1e2SAvi Kivity     switch (size) {
29280e0df1e2SAvi Kivity     case 1:
29295579c7f3Spbrook         stb_p(qemu_get_ram_ptr(ram_addr), val);
29300e0df1e2SAvi Kivity         break;
29310e0df1e2SAvi Kivity     case 2:
29325579c7f3Spbrook         stw_p(qemu_get_ram_ptr(ram_addr), val);
29330e0df1e2SAvi Kivity         break;
29340e0df1e2SAvi Kivity     case 4:
29355579c7f3Spbrook         stl_p(qemu_get_ram_ptr(ram_addr), val);
29360e0df1e2SAvi Kivity         break;
29370e0df1e2SAvi Kivity     default:
29380e0df1e2SAvi Kivity         abort();
29390e0df1e2SAvi Kivity     }
2940f23db169Sbellard     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2941f7c11b53SYoshiaki Tamura     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2942f23db169Sbellard     /* we remove the notdirty callback only if the code has been
2943f23db169Sbellard        flushed */
2944f23db169Sbellard     if (dirty_flags == 0xff)
29452e70f6efSpbrook         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
29461ccde1cbSbellard }
29471ccde1cbSbellard 
29480e0df1e2SAvi Kivity static const MemoryRegionOps notdirty_mem_ops = {
29490e0df1e2SAvi Kivity     .read = error_mem_read,
29500e0df1e2SAvi Kivity     .write = notdirty_mem_write,
29510e0df1e2SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
29521ccde1cbSbellard };
29531ccde1cbSbellard 
29540f459d16Spbrook /* Generate a debug exception if a watchpoint has been hit.  */
2955b4051334Saliguori static void check_watchpoint(int offset, int len_mask, int flags)
29560f459d16Spbrook {
29579349b4f9SAndreas Färber     CPUArchState *env = cpu_single_env;
295806d55cc1Saliguori     target_ulong pc, cs_base;
295906d55cc1Saliguori     TranslationBlock *tb;
29600f459d16Spbrook     target_ulong vaddr;
2961a1d1bb31Saliguori     CPUWatchpoint *wp;
296206d55cc1Saliguori     int cpu_flags;
29630f459d16Spbrook 
296406d55cc1Saliguori     if (env->watchpoint_hit) {
296506d55cc1Saliguori         /* We re-entered the check after replacing the TB. Now raise
296606d55cc1Saliguori          * the debug interrupt so that is will trigger after the
296706d55cc1Saliguori          * current instruction. */
296806d55cc1Saliguori         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
296906d55cc1Saliguori         return;
297006d55cc1Saliguori     }
29712e70f6efSpbrook     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
297272cf2d4fSBlue Swirl     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2973b4051334Saliguori         if ((vaddr == (wp->vaddr & len_mask) ||
2974b4051334Saliguori              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
29756e140f28Saliguori             wp->flags |= BP_WATCHPOINT_HIT;
29766e140f28Saliguori             if (!env->watchpoint_hit) {
2977a1d1bb31Saliguori                 env->watchpoint_hit = wp;
297806d55cc1Saliguori                 tb = tb_find_pc(env->mem_io_pc);
297906d55cc1Saliguori                 if (!tb) {
29806e140f28Saliguori                     cpu_abort(env, "check_watchpoint: could not find TB for "
29816e140f28Saliguori                               "pc=%p", (void *)env->mem_io_pc);
298206d55cc1Saliguori                 }
2983618ba8e6SStefan Weil                 cpu_restore_state(tb, env, env->mem_io_pc);
298406d55cc1Saliguori                 tb_phys_invalidate(tb, -1);
298506d55cc1Saliguori                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
298606d55cc1Saliguori                     env->exception_index = EXCP_DEBUG;
2987488d6577SMax Filippov                     cpu_loop_exit(env);
298806d55cc1Saliguori                 } else {
298906d55cc1Saliguori                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
299006d55cc1Saliguori                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
299106d55cc1Saliguori                     cpu_resume_from_signal(env, NULL);
29920f459d16Spbrook                 }
2993488d6577SMax Filippov             }
29946e140f28Saliguori         } else {
29956e140f28Saliguori             wp->flags &= ~BP_WATCHPOINT_HIT;
29966e140f28Saliguori         }
29970f459d16Spbrook     }
29980f459d16Spbrook }
29990f459d16Spbrook 
30006658ffb8Spbrook /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
30016658ffb8Spbrook    so these check for a hit then pass through to the normal out-of-line
30026658ffb8Spbrook    phys routines.  */
30031ec9b909SAvi Kivity static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
30041ec9b909SAvi Kivity                                unsigned size)
30056658ffb8Spbrook {
30061ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
30071ec9b909SAvi Kivity     switch (size) {
30081ec9b909SAvi Kivity     case 1: return ldub_phys(addr);
30091ec9b909SAvi Kivity     case 2: return lduw_phys(addr);
30101ec9b909SAvi Kivity     case 4: return ldl_phys(addr);
30111ec9b909SAvi Kivity     default: abort();
30121ec9b909SAvi Kivity     }
30136658ffb8Spbrook }
30146658ffb8Spbrook 
30151ec9b909SAvi Kivity static void watch_mem_write(void *opaque, target_phys_addr_t addr,
30161ec9b909SAvi Kivity                             uint64_t val, unsigned size)
30176658ffb8Spbrook {
30181ec9b909SAvi Kivity     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
30191ec9b909SAvi Kivity     switch (size) {
302067364150SMax Filippov     case 1:
302167364150SMax Filippov         stb_phys(addr, val);
302267364150SMax Filippov         break;
302367364150SMax Filippov     case 2:
302467364150SMax Filippov         stw_phys(addr, val);
302567364150SMax Filippov         break;
302667364150SMax Filippov     case 4:
302767364150SMax Filippov         stl_phys(addr, val);
302867364150SMax Filippov         break;
30291ec9b909SAvi Kivity     default: abort();
30301ec9b909SAvi Kivity     }
30316658ffb8Spbrook }
30326658ffb8Spbrook 
30331ec9b909SAvi Kivity static const MemoryRegionOps watch_mem_ops = {
30341ec9b909SAvi Kivity     .read = watch_mem_read,
30351ec9b909SAvi Kivity     .write = watch_mem_write,
30361ec9b909SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
30376658ffb8Spbrook };
30386658ffb8Spbrook 
303970c68e44SAvi Kivity static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
304070c68e44SAvi Kivity                              unsigned len)
3041db7b5426Sblueswir1 {
304270c68e44SAvi Kivity     subpage_t *mmio = opaque;
3043f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
30445312bd8bSAvi Kivity     MemoryRegionSection *section;
3045db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3046db7b5426Sblueswir1     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3047db7b5426Sblueswir1            mmio, len, addr, idx);
3048db7b5426Sblueswir1 #endif
3049db7b5426Sblueswir1 
30505312bd8bSAvi Kivity     section = &phys_sections[mmio->sub_section[idx]];
30515312bd8bSAvi Kivity     addr += mmio->base;
30525312bd8bSAvi Kivity     addr -= section->offset_within_address_space;
30535312bd8bSAvi Kivity     addr += section->offset_within_region;
305437ec01d4SAvi Kivity     return io_mem_read(section->mr, addr, len);
3055db7b5426Sblueswir1 }
3056db7b5426Sblueswir1 
305770c68e44SAvi Kivity static void subpage_write(void *opaque, target_phys_addr_t addr,
305870c68e44SAvi Kivity                           uint64_t value, unsigned len)
3059db7b5426Sblueswir1 {
306070c68e44SAvi Kivity     subpage_t *mmio = opaque;
3061f6405247SRichard Henderson     unsigned int idx = SUBPAGE_IDX(addr);
30625312bd8bSAvi Kivity     MemoryRegionSection *section;
3063db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
306470c68e44SAvi Kivity     printf("%s: subpage %p len %d addr " TARGET_FMT_plx
306570c68e44SAvi Kivity            " idx %d value %"PRIx64"\n",
3066f6405247SRichard Henderson            __func__, mmio, len, addr, idx, value);
3067db7b5426Sblueswir1 #endif
3068f6405247SRichard Henderson 
30695312bd8bSAvi Kivity     section = &phys_sections[mmio->sub_section[idx]];
30705312bd8bSAvi Kivity     addr += mmio->base;
30715312bd8bSAvi Kivity     addr -= section->offset_within_address_space;
30725312bd8bSAvi Kivity     addr += section->offset_within_region;
307337ec01d4SAvi Kivity     io_mem_write(section->mr, addr, value, len);
3074db7b5426Sblueswir1 }
3075db7b5426Sblueswir1 
307670c68e44SAvi Kivity static const MemoryRegionOps subpage_ops = {
307770c68e44SAvi Kivity     .read = subpage_read,
307870c68e44SAvi Kivity     .write = subpage_write,
307970c68e44SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
3080db7b5426Sblueswir1 };
3081db7b5426Sblueswir1 
3082de712f94SAvi Kivity static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3083de712f94SAvi Kivity                                  unsigned size)
308456384e8bSAndreas Färber {
308556384e8bSAndreas Färber     ram_addr_t raddr = addr;
308656384e8bSAndreas Färber     void *ptr = qemu_get_ram_ptr(raddr);
3087de712f94SAvi Kivity     switch (size) {
3088de712f94SAvi Kivity     case 1: return ldub_p(ptr);
3089de712f94SAvi Kivity     case 2: return lduw_p(ptr);
3090de712f94SAvi Kivity     case 4: return ldl_p(ptr);
3091de712f94SAvi Kivity     default: abort();
3092de712f94SAvi Kivity     }
309356384e8bSAndreas Färber }
309456384e8bSAndreas Färber 
3095de712f94SAvi Kivity static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3096de712f94SAvi Kivity                               uint64_t value, unsigned size)
309756384e8bSAndreas Färber {
309856384e8bSAndreas Färber     ram_addr_t raddr = addr;
309956384e8bSAndreas Färber     void *ptr = qemu_get_ram_ptr(raddr);
3100de712f94SAvi Kivity     switch (size) {
3101de712f94SAvi Kivity     case 1: return stb_p(ptr, value);
3102de712f94SAvi Kivity     case 2: return stw_p(ptr, value);
3103de712f94SAvi Kivity     case 4: return stl_p(ptr, value);
3104de712f94SAvi Kivity     default: abort();
3105de712f94SAvi Kivity     }
310656384e8bSAndreas Färber }
310756384e8bSAndreas Färber 
3108de712f94SAvi Kivity static const MemoryRegionOps subpage_ram_ops = {
3109de712f94SAvi Kivity     .read = subpage_ram_read,
3110de712f94SAvi Kivity     .write = subpage_ram_write,
3111de712f94SAvi Kivity     .endianness = DEVICE_NATIVE_ENDIAN,
311256384e8bSAndreas Färber };
311356384e8bSAndreas Färber 
3114c227f099SAnthony Liguori static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
31155312bd8bSAvi Kivity                              uint16_t section)
3116db7b5426Sblueswir1 {
3117db7b5426Sblueswir1     int idx, eidx;
3118db7b5426Sblueswir1 
3119db7b5426Sblueswir1     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3120db7b5426Sblueswir1         return -1;
3121db7b5426Sblueswir1     idx = SUBPAGE_IDX(start);
3122db7b5426Sblueswir1     eidx = SUBPAGE_IDX(end);
3123db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
31240bf9e31aSBlue Swirl     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3125db7b5426Sblueswir1            mmio, start, end, idx, eidx, memory);
3126db7b5426Sblueswir1 #endif
31275312bd8bSAvi Kivity     if (memory_region_is_ram(phys_sections[section].mr)) {
31285312bd8bSAvi Kivity         MemoryRegionSection new_section = phys_sections[section];
31295312bd8bSAvi Kivity         new_section.mr = &io_mem_subpage_ram;
31305312bd8bSAvi Kivity         section = phys_section_add(&new_section);
313156384e8bSAndreas Färber     }
3132db7b5426Sblueswir1     for (; idx <= eidx; idx++) {
31335312bd8bSAvi Kivity         mmio->sub_section[idx] = section;
3134db7b5426Sblueswir1     }
3135db7b5426Sblueswir1 
3136db7b5426Sblueswir1     return 0;
3137db7b5426Sblueswir1 }
3138db7b5426Sblueswir1 
31390f0cb164SAvi Kivity static subpage_t *subpage_init(target_phys_addr_t base)
3140db7b5426Sblueswir1 {
3141c227f099SAnthony Liguori     subpage_t *mmio;
3142db7b5426Sblueswir1 
31437267c094SAnthony Liguori     mmio = g_malloc0(sizeof(subpage_t));
31441eec614bSaliguori 
3145db7b5426Sblueswir1     mmio->base = base;
314670c68e44SAvi Kivity     memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
314770c68e44SAvi Kivity                           "subpage", TARGET_PAGE_SIZE);
3148b3b00c78SAvi Kivity     mmio->iomem.subpage = true;
3149db7b5426Sblueswir1 #if defined(DEBUG_SUBPAGE)
3150db7b5426Sblueswir1     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3151db7b5426Sblueswir1            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3152db7b5426Sblueswir1 #endif
31530f0cb164SAvi Kivity     subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
3154db7b5426Sblueswir1 
3155db7b5426Sblueswir1     return mmio;
3156db7b5426Sblueswir1 }
3157db7b5426Sblueswir1 
31585312bd8bSAvi Kivity static uint16_t dummy_section(MemoryRegion *mr)
31595312bd8bSAvi Kivity {
31605312bd8bSAvi Kivity     MemoryRegionSection section = {
31615312bd8bSAvi Kivity         .mr = mr,
31625312bd8bSAvi Kivity         .offset_within_address_space = 0,
31635312bd8bSAvi Kivity         .offset_within_region = 0,
31645312bd8bSAvi Kivity         .size = UINT64_MAX,
31655312bd8bSAvi Kivity     };
31665312bd8bSAvi Kivity 
31675312bd8bSAvi Kivity     return phys_section_add(&section);
31685312bd8bSAvi Kivity }
31695312bd8bSAvi Kivity 
317037ec01d4SAvi Kivity MemoryRegion *iotlb_to_region(target_phys_addr_t index)
3171aa102231SAvi Kivity {
317237ec01d4SAvi Kivity     return phys_sections[index & ~TARGET_PAGE_MASK].mr;
3173aa102231SAvi Kivity }
3174aa102231SAvi Kivity 
3175e9179ce1SAvi Kivity static void io_mem_init(void)
3176e9179ce1SAvi Kivity {
31770e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
31780e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
31790e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
31800e0df1e2SAvi Kivity                           "unassigned", UINT64_MAX);
31810e0df1e2SAvi Kivity     memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
31820e0df1e2SAvi Kivity                           "notdirty", UINT64_MAX);
3183de712f94SAvi Kivity     memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3184de712f94SAvi Kivity                           "subpage-ram", UINT64_MAX);
31851ec9b909SAvi Kivity     memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
31861ec9b909SAvi Kivity                           "watch", UINT64_MAX);
3187e9179ce1SAvi Kivity }
3188e9179ce1SAvi Kivity 
318950c1e149SAvi Kivity static void core_begin(MemoryListener *listener)
319050c1e149SAvi Kivity {
319154688b1eSAvi Kivity     destroy_all_mappings();
31925312bd8bSAvi Kivity     phys_sections_clear();
3193c19e8800SAvi Kivity     phys_map.ptr = PHYS_MAP_NODE_NIL;
31945312bd8bSAvi Kivity     phys_section_unassigned = dummy_section(&io_mem_unassigned);
3195aa102231SAvi Kivity     phys_section_notdirty = dummy_section(&io_mem_notdirty);
3196aa102231SAvi Kivity     phys_section_rom = dummy_section(&io_mem_rom);
3197aa102231SAvi Kivity     phys_section_watch = dummy_section(&io_mem_watch);
319850c1e149SAvi Kivity }
319950c1e149SAvi Kivity 
320050c1e149SAvi Kivity static void core_commit(MemoryListener *listener)
320150c1e149SAvi Kivity {
32029349b4f9SAndreas Färber     CPUArchState *env;
3203117712c3SAvi Kivity 
3204117712c3SAvi Kivity     /* since each CPU stores ram addresses in its TLB cache, we must
3205117712c3SAvi Kivity        reset the modified entries */
3206117712c3SAvi Kivity     /* XXX: slow ! */
3207117712c3SAvi Kivity     for(env = first_cpu; env != NULL; env = env->next_cpu) {
3208117712c3SAvi Kivity         tlb_flush(env, 1);
3209117712c3SAvi Kivity     }
321050c1e149SAvi Kivity }
321150c1e149SAvi Kivity 
321293632747SAvi Kivity static void core_region_add(MemoryListener *listener,
321393632747SAvi Kivity                             MemoryRegionSection *section)
321493632747SAvi Kivity {
321593632747SAvi Kivity     cpu_register_physical_memory_log(section, section->readonly);
321693632747SAvi Kivity }
321793632747SAvi Kivity 
321893632747SAvi Kivity static void core_region_del(MemoryListener *listener,
321993632747SAvi Kivity                             MemoryRegionSection *section)
322093632747SAvi Kivity {
322193632747SAvi Kivity }
322293632747SAvi Kivity 
322350c1e149SAvi Kivity static void core_region_nop(MemoryListener *listener,
322450c1e149SAvi Kivity                             MemoryRegionSection *section)
322550c1e149SAvi Kivity {
322654688b1eSAvi Kivity     cpu_register_physical_memory_log(section, section->readonly);
322750c1e149SAvi Kivity }
322850c1e149SAvi Kivity 
322993632747SAvi Kivity static void core_log_start(MemoryListener *listener,
323093632747SAvi Kivity                            MemoryRegionSection *section)
323193632747SAvi Kivity {
323293632747SAvi Kivity }
323393632747SAvi Kivity 
323493632747SAvi Kivity static void core_log_stop(MemoryListener *listener,
323593632747SAvi Kivity                           MemoryRegionSection *section)
323693632747SAvi Kivity {
323793632747SAvi Kivity }
323893632747SAvi Kivity 
323993632747SAvi Kivity static void core_log_sync(MemoryListener *listener,
324093632747SAvi Kivity                           MemoryRegionSection *section)
324193632747SAvi Kivity {
324293632747SAvi Kivity }
324393632747SAvi Kivity 
324493632747SAvi Kivity static void core_log_global_start(MemoryListener *listener)
324593632747SAvi Kivity {
324693632747SAvi Kivity     cpu_physical_memory_set_dirty_tracking(1);
324793632747SAvi Kivity }
324893632747SAvi Kivity 
324993632747SAvi Kivity static void core_log_global_stop(MemoryListener *listener)
325093632747SAvi Kivity {
325193632747SAvi Kivity     cpu_physical_memory_set_dirty_tracking(0);
325293632747SAvi Kivity }
325393632747SAvi Kivity 
325493632747SAvi Kivity static void core_eventfd_add(MemoryListener *listener,
325593632747SAvi Kivity                              MemoryRegionSection *section,
3256753d5e14SPaolo Bonzini                              bool match_data, uint64_t data, EventNotifier *e)
325793632747SAvi Kivity {
325893632747SAvi Kivity }
325993632747SAvi Kivity 
326093632747SAvi Kivity static void core_eventfd_del(MemoryListener *listener,
326193632747SAvi Kivity                              MemoryRegionSection *section,
3262753d5e14SPaolo Bonzini                              bool match_data, uint64_t data, EventNotifier *e)
326393632747SAvi Kivity {
326493632747SAvi Kivity }
326593632747SAvi Kivity 
326650c1e149SAvi Kivity static void io_begin(MemoryListener *listener)
326750c1e149SAvi Kivity {
326850c1e149SAvi Kivity }
326950c1e149SAvi Kivity 
327050c1e149SAvi Kivity static void io_commit(MemoryListener *listener)
327150c1e149SAvi Kivity {
327250c1e149SAvi Kivity }
327350c1e149SAvi Kivity 
32744855d41aSAvi Kivity static void io_region_add(MemoryListener *listener,
32754855d41aSAvi Kivity                           MemoryRegionSection *section)
32764855d41aSAvi Kivity {
3277a2d33521SAvi Kivity     MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3278a2d33521SAvi Kivity 
3279a2d33521SAvi Kivity     mrio->mr = section->mr;
3280a2d33521SAvi Kivity     mrio->offset = section->offset_within_region;
3281a2d33521SAvi Kivity     iorange_init(&mrio->iorange, &memory_region_iorange_ops,
32824855d41aSAvi Kivity                  section->offset_within_address_space, section->size);
3283a2d33521SAvi Kivity     ioport_register(&mrio->iorange);
32844855d41aSAvi Kivity }
32854855d41aSAvi Kivity 
32864855d41aSAvi Kivity static void io_region_del(MemoryListener *listener,
32874855d41aSAvi Kivity                           MemoryRegionSection *section)
32884855d41aSAvi Kivity {
32894855d41aSAvi Kivity     isa_unassign_ioport(section->offset_within_address_space, section->size);
32904855d41aSAvi Kivity }
32914855d41aSAvi Kivity 
329250c1e149SAvi Kivity static void io_region_nop(MemoryListener *listener,
329350c1e149SAvi Kivity                           MemoryRegionSection *section)
329450c1e149SAvi Kivity {
329550c1e149SAvi Kivity }
329650c1e149SAvi Kivity 
32974855d41aSAvi Kivity static void io_log_start(MemoryListener *listener,
32984855d41aSAvi Kivity                          MemoryRegionSection *section)
32994855d41aSAvi Kivity {
33004855d41aSAvi Kivity }
33014855d41aSAvi Kivity 
33024855d41aSAvi Kivity static void io_log_stop(MemoryListener *listener,
33034855d41aSAvi Kivity                         MemoryRegionSection *section)
33044855d41aSAvi Kivity {
33054855d41aSAvi Kivity }
33064855d41aSAvi Kivity 
33074855d41aSAvi Kivity static void io_log_sync(MemoryListener *listener,
33084855d41aSAvi Kivity                         MemoryRegionSection *section)
33094855d41aSAvi Kivity {
33104855d41aSAvi Kivity }
33114855d41aSAvi Kivity 
33124855d41aSAvi Kivity static void io_log_global_start(MemoryListener *listener)
33134855d41aSAvi Kivity {
33144855d41aSAvi Kivity }
33154855d41aSAvi Kivity 
33164855d41aSAvi Kivity static void io_log_global_stop(MemoryListener *listener)
33174855d41aSAvi Kivity {
33184855d41aSAvi Kivity }
33194855d41aSAvi Kivity 
33204855d41aSAvi Kivity static void io_eventfd_add(MemoryListener *listener,
33214855d41aSAvi Kivity                            MemoryRegionSection *section,
3322753d5e14SPaolo Bonzini                            bool match_data, uint64_t data, EventNotifier *e)
33234855d41aSAvi Kivity {
33244855d41aSAvi Kivity }
33254855d41aSAvi Kivity 
33264855d41aSAvi Kivity static void io_eventfd_del(MemoryListener *listener,
33274855d41aSAvi Kivity                            MemoryRegionSection *section,
3328753d5e14SPaolo Bonzini                            bool match_data, uint64_t data, EventNotifier *e)
33294855d41aSAvi Kivity {
33304855d41aSAvi Kivity }
33314855d41aSAvi Kivity 
333293632747SAvi Kivity static MemoryListener core_memory_listener = {
333350c1e149SAvi Kivity     .begin = core_begin,
333450c1e149SAvi Kivity     .commit = core_commit,
333593632747SAvi Kivity     .region_add = core_region_add,
333693632747SAvi Kivity     .region_del = core_region_del,
333750c1e149SAvi Kivity     .region_nop = core_region_nop,
333893632747SAvi Kivity     .log_start = core_log_start,
333993632747SAvi Kivity     .log_stop = core_log_stop,
334093632747SAvi Kivity     .log_sync = core_log_sync,
334193632747SAvi Kivity     .log_global_start = core_log_global_start,
334293632747SAvi Kivity     .log_global_stop = core_log_global_stop,
334393632747SAvi Kivity     .eventfd_add = core_eventfd_add,
334493632747SAvi Kivity     .eventfd_del = core_eventfd_del,
334593632747SAvi Kivity     .priority = 0,
334693632747SAvi Kivity };
334793632747SAvi Kivity 
33484855d41aSAvi Kivity static MemoryListener io_memory_listener = {
334950c1e149SAvi Kivity     .begin = io_begin,
335050c1e149SAvi Kivity     .commit = io_commit,
33514855d41aSAvi Kivity     .region_add = io_region_add,
33524855d41aSAvi Kivity     .region_del = io_region_del,
335350c1e149SAvi Kivity     .region_nop = io_region_nop,
33544855d41aSAvi Kivity     .log_start = io_log_start,
33554855d41aSAvi Kivity     .log_stop = io_log_stop,
33564855d41aSAvi Kivity     .log_sync = io_log_sync,
33574855d41aSAvi Kivity     .log_global_start = io_log_global_start,
33584855d41aSAvi Kivity     .log_global_stop = io_log_global_stop,
33594855d41aSAvi Kivity     .eventfd_add = io_eventfd_add,
33604855d41aSAvi Kivity     .eventfd_del = io_eventfd_del,
33614855d41aSAvi Kivity     .priority = 0,
33624855d41aSAvi Kivity };
33634855d41aSAvi Kivity 
336462152b8aSAvi Kivity static void memory_map_init(void)
336562152b8aSAvi Kivity {
33667267c094SAnthony Liguori     system_memory = g_malloc(sizeof(*system_memory));
33678417cebfSAvi Kivity     memory_region_init(system_memory, "system", INT64_MAX);
336862152b8aSAvi Kivity     set_system_memory_map(system_memory);
3369309cb471SAvi Kivity 
33707267c094SAnthony Liguori     system_io = g_malloc(sizeof(*system_io));
3371309cb471SAvi Kivity     memory_region_init(system_io, "io", 65536);
3372309cb471SAvi Kivity     set_system_io_map(system_io);
337393632747SAvi Kivity 
33744855d41aSAvi Kivity     memory_listener_register(&core_memory_listener, system_memory);
33754855d41aSAvi Kivity     memory_listener_register(&io_memory_listener, system_io);
337662152b8aSAvi Kivity }
337762152b8aSAvi Kivity 
337862152b8aSAvi Kivity MemoryRegion *get_system_memory(void)
337962152b8aSAvi Kivity {
338062152b8aSAvi Kivity     return system_memory;
338162152b8aSAvi Kivity }
338262152b8aSAvi Kivity 
3383309cb471SAvi Kivity MemoryRegion *get_system_io(void)
3384309cb471SAvi Kivity {
3385309cb471SAvi Kivity     return system_io;
3386309cb471SAvi Kivity }
3387309cb471SAvi Kivity 
3388e2eef170Spbrook #endif /* !defined(CONFIG_USER_ONLY) */
3389e2eef170Spbrook 
339013eb76e0Sbellard /* physical memory access (slow version, mainly for debug) */
339113eb76e0Sbellard #if defined(CONFIG_USER_ONLY)
33929349b4f9SAndreas Färber int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
3393a68fe89cSPaul Brook                         uint8_t *buf, int len, int is_write)
339413eb76e0Sbellard {
339513eb76e0Sbellard     int l, flags;
339613eb76e0Sbellard     target_ulong page;
339753a5960aSpbrook     void * p;
339813eb76e0Sbellard 
339913eb76e0Sbellard     while (len > 0) {
340013eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
340113eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
340213eb76e0Sbellard         if (l > len)
340313eb76e0Sbellard             l = len;
340413eb76e0Sbellard         flags = page_get_flags(page);
340513eb76e0Sbellard         if (!(flags & PAGE_VALID))
3406a68fe89cSPaul Brook             return -1;
340713eb76e0Sbellard         if (is_write) {
340813eb76e0Sbellard             if (!(flags & PAGE_WRITE))
3409a68fe89cSPaul Brook                 return -1;
3410579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
341172fb7daaSaurel32             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3412a68fe89cSPaul Brook                 return -1;
341372fb7daaSaurel32             memcpy(p, buf, l);
341472fb7daaSaurel32             unlock_user(p, addr, l);
341513eb76e0Sbellard         } else {
341613eb76e0Sbellard             if (!(flags & PAGE_READ))
3417a68fe89cSPaul Brook                 return -1;
3418579a97f7Sbellard             /* XXX: this code should not depend on lock_user */
341972fb7daaSaurel32             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3420a68fe89cSPaul Brook                 return -1;
342172fb7daaSaurel32             memcpy(buf, p, l);
34225b257578Saurel32             unlock_user(p, addr, 0);
342313eb76e0Sbellard         }
342413eb76e0Sbellard         len -= l;
342513eb76e0Sbellard         buf += l;
342613eb76e0Sbellard         addr += l;
342713eb76e0Sbellard     }
3428a68fe89cSPaul Brook     return 0;
342913eb76e0Sbellard }
34308df1cd07Sbellard 
343113eb76e0Sbellard #else
343251d7a9ebSAnthony PERARD 
343351d7a9ebSAnthony PERARD static void invalidate_and_set_dirty(target_phys_addr_t addr,
343451d7a9ebSAnthony PERARD                                      target_phys_addr_t length)
343551d7a9ebSAnthony PERARD {
343651d7a9ebSAnthony PERARD     if (!cpu_physical_memory_is_dirty(addr)) {
343751d7a9ebSAnthony PERARD         /* invalidate code */
343851d7a9ebSAnthony PERARD         tb_invalidate_phys_page_range(addr, addr + length, 0);
343951d7a9ebSAnthony PERARD         /* set dirty bit */
344051d7a9ebSAnthony PERARD         cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
344151d7a9ebSAnthony PERARD     }
3442e226939dSAnthony PERARD     xen_modified_memory(addr, length);
344351d7a9ebSAnthony PERARD }
344451d7a9ebSAnthony PERARD 
3445c227f099SAnthony Liguori void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
344613eb76e0Sbellard                             int len, int is_write)
344713eb76e0Sbellard {
344837ec01d4SAvi Kivity     int l;
344913eb76e0Sbellard     uint8_t *ptr;
345013eb76e0Sbellard     uint32_t val;
3451c227f099SAnthony Liguori     target_phys_addr_t page;
3452f3705d53SAvi Kivity     MemoryRegionSection *section;
345313eb76e0Sbellard 
345413eb76e0Sbellard     while (len > 0) {
345513eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
345613eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
345713eb76e0Sbellard         if (l > len)
345813eb76e0Sbellard             l = len;
345906ef3525SAvi Kivity         section = phys_page_find(page >> TARGET_PAGE_BITS);
346013eb76e0Sbellard 
346113eb76e0Sbellard         if (is_write) {
3462f3705d53SAvi Kivity             if (!memory_region_is_ram(section->mr)) {
3463f1f6e3b8SAvi Kivity                 target_phys_addr_t addr1;
3464cc5bea60SBlue Swirl                 addr1 = memory_region_section_addr(section, addr);
34656a00d601Sbellard                 /* XXX: could force cpu_single_env to NULL to avoid
34666a00d601Sbellard                    potential bugs */
34676c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
34681c213d19Sbellard                     /* 32 bit write access */
3469c27004ecSbellard                     val = ldl_p(buf);
347037ec01d4SAvi Kivity                     io_mem_write(section->mr, addr1, val, 4);
347113eb76e0Sbellard                     l = 4;
34726c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
34731c213d19Sbellard                     /* 16 bit write access */
3474c27004ecSbellard                     val = lduw_p(buf);
347537ec01d4SAvi Kivity                     io_mem_write(section->mr, addr1, val, 2);
347613eb76e0Sbellard                     l = 2;
347713eb76e0Sbellard                 } else {
34781c213d19Sbellard                     /* 8 bit write access */
3479c27004ecSbellard                     val = ldub_p(buf);
348037ec01d4SAvi Kivity                     io_mem_write(section->mr, addr1, val, 1);
348113eb76e0Sbellard                     l = 1;
348213eb76e0Sbellard                 }
3483f3705d53SAvi Kivity             } else if (!section->readonly) {
34848ca5692dSAnthony PERARD                 ram_addr_t addr1;
3485f3705d53SAvi Kivity                 addr1 = memory_region_get_ram_addr(section->mr)
3486cc5bea60SBlue Swirl                     + memory_region_section_addr(section, addr);
348713eb76e0Sbellard                 /* RAM case */
34885579c7f3Spbrook                 ptr = qemu_get_ram_ptr(addr1);
348913eb76e0Sbellard                 memcpy(ptr, buf, l);
349051d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
3491050a0ddfSAnthony PERARD                 qemu_put_ram_ptr(ptr);
34923a7d929eSbellard             }
349313eb76e0Sbellard         } else {
3494cc5bea60SBlue Swirl             if (!(memory_region_is_ram(section->mr) ||
3495cc5bea60SBlue Swirl                   memory_region_is_romd(section->mr))) {
3496f1f6e3b8SAvi Kivity                 target_phys_addr_t addr1;
349713eb76e0Sbellard                 /* I/O case */
3498cc5bea60SBlue Swirl                 addr1 = memory_region_section_addr(section, addr);
34996c2934dbSaurel32                 if (l >= 4 && ((addr1 & 3) == 0)) {
350013eb76e0Sbellard                     /* 32 bit read access */
350137ec01d4SAvi Kivity                     val = io_mem_read(section->mr, addr1, 4);
3502c27004ecSbellard                     stl_p(buf, val);
350313eb76e0Sbellard                     l = 4;
35046c2934dbSaurel32                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
350513eb76e0Sbellard                     /* 16 bit read access */
350637ec01d4SAvi Kivity                     val = io_mem_read(section->mr, addr1, 2);
3507c27004ecSbellard                     stw_p(buf, val);
350813eb76e0Sbellard                     l = 2;
350913eb76e0Sbellard                 } else {
35101c213d19Sbellard                     /* 8 bit read access */
351137ec01d4SAvi Kivity                     val = io_mem_read(section->mr, addr1, 1);
3512c27004ecSbellard                     stb_p(buf, val);
351313eb76e0Sbellard                     l = 1;
351413eb76e0Sbellard                 }
351513eb76e0Sbellard             } else {
351613eb76e0Sbellard                 /* RAM case */
35170a1b357fSAnthony PERARD                 ptr = qemu_get_ram_ptr(section->mr->ram_addr
3518cc5bea60SBlue Swirl                                        + memory_region_section_addr(section,
3519cc5bea60SBlue Swirl                                                                     addr));
3520f3705d53SAvi Kivity                 memcpy(buf, ptr, l);
3521050a0ddfSAnthony PERARD                 qemu_put_ram_ptr(ptr);
352213eb76e0Sbellard             }
352313eb76e0Sbellard         }
352413eb76e0Sbellard         len -= l;
352513eb76e0Sbellard         buf += l;
352613eb76e0Sbellard         addr += l;
352713eb76e0Sbellard     }
352813eb76e0Sbellard }
35298df1cd07Sbellard 
3530d0ecd2aaSbellard /* used for ROM loading : can write in RAM and ROM */
3531c227f099SAnthony Liguori void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3532d0ecd2aaSbellard                                    const uint8_t *buf, int len)
3533d0ecd2aaSbellard {
3534d0ecd2aaSbellard     int l;
3535d0ecd2aaSbellard     uint8_t *ptr;
3536c227f099SAnthony Liguori     target_phys_addr_t page;
3537f3705d53SAvi Kivity     MemoryRegionSection *section;
3538d0ecd2aaSbellard 
3539d0ecd2aaSbellard     while (len > 0) {
3540d0ecd2aaSbellard         page = addr & TARGET_PAGE_MASK;
3541d0ecd2aaSbellard         l = (page + TARGET_PAGE_SIZE) - addr;
3542d0ecd2aaSbellard         if (l > len)
3543d0ecd2aaSbellard             l = len;
354406ef3525SAvi Kivity         section = phys_page_find(page >> TARGET_PAGE_BITS);
3545d0ecd2aaSbellard 
3546cc5bea60SBlue Swirl         if (!(memory_region_is_ram(section->mr) ||
3547cc5bea60SBlue Swirl               memory_region_is_romd(section->mr))) {
3548d0ecd2aaSbellard             /* do nothing */
3549d0ecd2aaSbellard         } else {
3550d0ecd2aaSbellard             unsigned long addr1;
3551f3705d53SAvi Kivity             addr1 = memory_region_get_ram_addr(section->mr)
3552cc5bea60SBlue Swirl                 + memory_region_section_addr(section, addr);
3553d0ecd2aaSbellard             /* ROM/RAM case */
35545579c7f3Spbrook             ptr = qemu_get_ram_ptr(addr1);
3555d0ecd2aaSbellard             memcpy(ptr, buf, l);
355651d7a9ebSAnthony PERARD             invalidate_and_set_dirty(addr1, l);
3557050a0ddfSAnthony PERARD             qemu_put_ram_ptr(ptr);
3558d0ecd2aaSbellard         }
3559d0ecd2aaSbellard         len -= l;
3560d0ecd2aaSbellard         buf += l;
3561d0ecd2aaSbellard         addr += l;
3562d0ecd2aaSbellard     }
3563d0ecd2aaSbellard }
3564d0ecd2aaSbellard 
35656d16c2f8Saliguori typedef struct {
35666d16c2f8Saliguori     void *buffer;
3567c227f099SAnthony Liguori     target_phys_addr_t addr;
3568c227f099SAnthony Liguori     target_phys_addr_t len;
35696d16c2f8Saliguori } BounceBuffer;
35706d16c2f8Saliguori 
35716d16c2f8Saliguori static BounceBuffer bounce;
35726d16c2f8Saliguori 
3573ba223c29Saliguori typedef struct MapClient {
3574ba223c29Saliguori     void *opaque;
3575ba223c29Saliguori     void (*callback)(void *opaque);
357672cf2d4fSBlue Swirl     QLIST_ENTRY(MapClient) link;
3577ba223c29Saliguori } MapClient;
3578ba223c29Saliguori 
357972cf2d4fSBlue Swirl static QLIST_HEAD(map_client_list, MapClient) map_client_list
358072cf2d4fSBlue Swirl     = QLIST_HEAD_INITIALIZER(map_client_list);
3581ba223c29Saliguori 
3582ba223c29Saliguori void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3583ba223c29Saliguori {
35847267c094SAnthony Liguori     MapClient *client = g_malloc(sizeof(*client));
3585ba223c29Saliguori 
3586ba223c29Saliguori     client->opaque = opaque;
3587ba223c29Saliguori     client->callback = callback;
358872cf2d4fSBlue Swirl     QLIST_INSERT_HEAD(&map_client_list, client, link);
3589ba223c29Saliguori     return client;
3590ba223c29Saliguori }
3591ba223c29Saliguori 
3592ba223c29Saliguori void cpu_unregister_map_client(void *_client)
3593ba223c29Saliguori {
3594ba223c29Saliguori     MapClient *client = (MapClient *)_client;
3595ba223c29Saliguori 
359672cf2d4fSBlue Swirl     QLIST_REMOVE(client, link);
35977267c094SAnthony Liguori     g_free(client);
3598ba223c29Saliguori }
3599ba223c29Saliguori 
3600ba223c29Saliguori static void cpu_notify_map_clients(void)
3601ba223c29Saliguori {
3602ba223c29Saliguori     MapClient *client;
3603ba223c29Saliguori 
360472cf2d4fSBlue Swirl     while (!QLIST_EMPTY(&map_client_list)) {
360572cf2d4fSBlue Swirl         client = QLIST_FIRST(&map_client_list);
3606ba223c29Saliguori         client->callback(client->opaque);
360734d5e948SIsaku Yamahata         cpu_unregister_map_client(client);
3608ba223c29Saliguori     }
3609ba223c29Saliguori }
3610ba223c29Saliguori 
36116d16c2f8Saliguori /* Map a physical memory region into a host virtual address.
36126d16c2f8Saliguori  * May map a subset of the requested range, given by and returned in *plen.
36136d16c2f8Saliguori  * May return NULL if resources needed to perform the mapping are exhausted.
36146d16c2f8Saliguori  * Use only for reads OR writes - not for read-modify-write operations.
3615ba223c29Saliguori  * Use cpu_register_map_client() to know when retrying the map operation is
3616ba223c29Saliguori  * likely to succeed.
36176d16c2f8Saliguori  */
3618c227f099SAnthony Liguori void *cpu_physical_memory_map(target_phys_addr_t addr,
3619c227f099SAnthony Liguori                               target_phys_addr_t *plen,
36206d16c2f8Saliguori                               int is_write)
36216d16c2f8Saliguori {
3622c227f099SAnthony Liguori     target_phys_addr_t len = *plen;
362338bee5dcSStefano Stabellini     target_phys_addr_t todo = 0;
36246d16c2f8Saliguori     int l;
3625c227f099SAnthony Liguori     target_phys_addr_t page;
3626f3705d53SAvi Kivity     MemoryRegionSection *section;
3627f15fbc4bSAnthony PERARD     ram_addr_t raddr = RAM_ADDR_MAX;
36288ab934f9SStefano Stabellini     ram_addr_t rlen;
36298ab934f9SStefano Stabellini     void *ret;
36306d16c2f8Saliguori 
36316d16c2f8Saliguori     while (len > 0) {
36326d16c2f8Saliguori         page = addr & TARGET_PAGE_MASK;
36336d16c2f8Saliguori         l = (page + TARGET_PAGE_SIZE) - addr;
36346d16c2f8Saliguori         if (l > len)
36356d16c2f8Saliguori             l = len;
363606ef3525SAvi Kivity         section = phys_page_find(page >> TARGET_PAGE_BITS);
36376d16c2f8Saliguori 
3638f3705d53SAvi Kivity         if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
363938bee5dcSStefano Stabellini             if (todo || bounce.buffer) {
36406d16c2f8Saliguori                 break;
36416d16c2f8Saliguori             }
36426d16c2f8Saliguori             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
36436d16c2f8Saliguori             bounce.addr = addr;
36446d16c2f8Saliguori             bounce.len = l;
36456d16c2f8Saliguori             if (!is_write) {
364654f7b4a3SStefan Weil                 cpu_physical_memory_read(addr, bounce.buffer, l);
36476d16c2f8Saliguori             }
364838bee5dcSStefano Stabellini 
364938bee5dcSStefano Stabellini             *plen = l;
365038bee5dcSStefano Stabellini             return bounce.buffer;
36516d16c2f8Saliguori         }
36528ab934f9SStefano Stabellini         if (!todo) {
3653f3705d53SAvi Kivity             raddr = memory_region_get_ram_addr(section->mr)
3654cc5bea60SBlue Swirl                 + memory_region_section_addr(section, addr);
36558ab934f9SStefano Stabellini         }
36566d16c2f8Saliguori 
36576d16c2f8Saliguori         len -= l;
36586d16c2f8Saliguori         addr += l;
365938bee5dcSStefano Stabellini         todo += l;
36606d16c2f8Saliguori     }
36618ab934f9SStefano Stabellini     rlen = todo;
36628ab934f9SStefano Stabellini     ret = qemu_ram_ptr_length(raddr, &rlen);
36638ab934f9SStefano Stabellini     *plen = rlen;
36648ab934f9SStefano Stabellini     return ret;
36656d16c2f8Saliguori }
36666d16c2f8Saliguori 
36676d16c2f8Saliguori /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
36686d16c2f8Saliguori  * Will also mark the memory as dirty if is_write == 1.  access_len gives
36696d16c2f8Saliguori  * the amount of memory that was actually read or written by the caller.
36706d16c2f8Saliguori  */
3671c227f099SAnthony Liguori void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3672c227f099SAnthony Liguori                                int is_write, target_phys_addr_t access_len)
36736d16c2f8Saliguori {
36746d16c2f8Saliguori     if (buffer != bounce.buffer) {
36756d16c2f8Saliguori         if (is_write) {
3676e890261fSMarcelo Tosatti             ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
36776d16c2f8Saliguori             while (access_len) {
36786d16c2f8Saliguori                 unsigned l;
36796d16c2f8Saliguori                 l = TARGET_PAGE_SIZE;
36806d16c2f8Saliguori                 if (l > access_len)
36816d16c2f8Saliguori                     l = access_len;
368251d7a9ebSAnthony PERARD                 invalidate_and_set_dirty(addr1, l);
36836d16c2f8Saliguori                 addr1 += l;
36846d16c2f8Saliguori                 access_len -= l;
36856d16c2f8Saliguori             }
36866d16c2f8Saliguori         }
3687868bb33fSJan Kiszka         if (xen_enabled()) {
3688e41d7c69SJan Kiszka             xen_invalidate_map_cache_entry(buffer);
3689050a0ddfSAnthony PERARD         }
36906d16c2f8Saliguori         return;
36916d16c2f8Saliguori     }
36926d16c2f8Saliguori     if (is_write) {
36936d16c2f8Saliguori         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
36946d16c2f8Saliguori     }
3695f8a83245SHerve Poussineau     qemu_vfree(bounce.buffer);
36966d16c2f8Saliguori     bounce.buffer = NULL;
3697ba223c29Saliguori     cpu_notify_map_clients();
36986d16c2f8Saliguori }
3699d0ecd2aaSbellard 
37008df1cd07Sbellard /* warning: addr must be aligned */
37011e78bcc1SAlexander Graf static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
37021e78bcc1SAlexander Graf                                          enum device_endian endian)
37038df1cd07Sbellard {
37048df1cd07Sbellard     uint8_t *ptr;
37058df1cd07Sbellard     uint32_t val;
3706f3705d53SAvi Kivity     MemoryRegionSection *section;
37078df1cd07Sbellard 
370806ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
37098df1cd07Sbellard 
3710cc5bea60SBlue Swirl     if (!(memory_region_is_ram(section->mr) ||
3711cc5bea60SBlue Swirl           memory_region_is_romd(section->mr))) {
37128df1cd07Sbellard         /* I/O case */
3713cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
371437ec01d4SAvi Kivity         val = io_mem_read(section->mr, addr, 4);
37151e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
37161e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
37171e78bcc1SAlexander Graf             val = bswap32(val);
37181e78bcc1SAlexander Graf         }
37191e78bcc1SAlexander Graf #else
37201e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
37211e78bcc1SAlexander Graf             val = bswap32(val);
37221e78bcc1SAlexander Graf         }
37231e78bcc1SAlexander Graf #endif
37248df1cd07Sbellard     } else {
37258df1cd07Sbellard         /* RAM case */
3726f3705d53SAvi Kivity         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
372706ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3728cc5bea60SBlue Swirl                                + memory_region_section_addr(section, addr));
37291e78bcc1SAlexander Graf         switch (endian) {
37301e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
37311e78bcc1SAlexander Graf             val = ldl_le_p(ptr);
37321e78bcc1SAlexander Graf             break;
37331e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
37341e78bcc1SAlexander Graf             val = ldl_be_p(ptr);
37351e78bcc1SAlexander Graf             break;
37361e78bcc1SAlexander Graf         default:
37378df1cd07Sbellard             val = ldl_p(ptr);
37381e78bcc1SAlexander Graf             break;
37391e78bcc1SAlexander Graf         }
37408df1cd07Sbellard     }
37418df1cd07Sbellard     return val;
37428df1cd07Sbellard }
37438df1cd07Sbellard 
37441e78bcc1SAlexander Graf uint32_t ldl_phys(target_phys_addr_t addr)
37451e78bcc1SAlexander Graf {
37461e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
37471e78bcc1SAlexander Graf }
37481e78bcc1SAlexander Graf 
37491e78bcc1SAlexander Graf uint32_t ldl_le_phys(target_phys_addr_t addr)
37501e78bcc1SAlexander Graf {
37511e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
37521e78bcc1SAlexander Graf }
37531e78bcc1SAlexander Graf 
37541e78bcc1SAlexander Graf uint32_t ldl_be_phys(target_phys_addr_t addr)
37551e78bcc1SAlexander Graf {
37561e78bcc1SAlexander Graf     return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
37571e78bcc1SAlexander Graf }
37581e78bcc1SAlexander Graf 
375984b7b8e7Sbellard /* warning: addr must be aligned */
37601e78bcc1SAlexander Graf static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
37611e78bcc1SAlexander Graf                                          enum device_endian endian)
376284b7b8e7Sbellard {
376384b7b8e7Sbellard     uint8_t *ptr;
376484b7b8e7Sbellard     uint64_t val;
3765f3705d53SAvi Kivity     MemoryRegionSection *section;
376684b7b8e7Sbellard 
376706ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
376884b7b8e7Sbellard 
3769cc5bea60SBlue Swirl     if (!(memory_region_is_ram(section->mr) ||
3770cc5bea60SBlue Swirl           memory_region_is_romd(section->mr))) {
377184b7b8e7Sbellard         /* I/O case */
3772cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
37731e78bcc1SAlexander Graf 
37741e78bcc1SAlexander Graf         /* XXX This is broken when device endian != cpu endian.
37751e78bcc1SAlexander Graf                Fix and add "endian" variable check */
377684b7b8e7Sbellard #ifdef TARGET_WORDS_BIGENDIAN
377737ec01d4SAvi Kivity         val = io_mem_read(section->mr, addr, 4) << 32;
377837ec01d4SAvi Kivity         val |= io_mem_read(section->mr, addr + 4, 4);
377984b7b8e7Sbellard #else
378037ec01d4SAvi Kivity         val = io_mem_read(section->mr, addr, 4);
378137ec01d4SAvi Kivity         val |= io_mem_read(section->mr, addr + 4, 4) << 32;
378284b7b8e7Sbellard #endif
378384b7b8e7Sbellard     } else {
378484b7b8e7Sbellard         /* RAM case */
3785f3705d53SAvi Kivity         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
378606ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3787cc5bea60SBlue Swirl                                + memory_region_section_addr(section, addr));
37881e78bcc1SAlexander Graf         switch (endian) {
37891e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
37901e78bcc1SAlexander Graf             val = ldq_le_p(ptr);
37911e78bcc1SAlexander Graf             break;
37921e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
37931e78bcc1SAlexander Graf             val = ldq_be_p(ptr);
37941e78bcc1SAlexander Graf             break;
37951e78bcc1SAlexander Graf         default:
379684b7b8e7Sbellard             val = ldq_p(ptr);
37971e78bcc1SAlexander Graf             break;
37981e78bcc1SAlexander Graf         }
379984b7b8e7Sbellard     }
380084b7b8e7Sbellard     return val;
380184b7b8e7Sbellard }
380284b7b8e7Sbellard 
38031e78bcc1SAlexander Graf uint64_t ldq_phys(target_phys_addr_t addr)
38041e78bcc1SAlexander Graf {
38051e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
38061e78bcc1SAlexander Graf }
38071e78bcc1SAlexander Graf 
38081e78bcc1SAlexander Graf uint64_t ldq_le_phys(target_phys_addr_t addr)
38091e78bcc1SAlexander Graf {
38101e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
38111e78bcc1SAlexander Graf }
38121e78bcc1SAlexander Graf 
38131e78bcc1SAlexander Graf uint64_t ldq_be_phys(target_phys_addr_t addr)
38141e78bcc1SAlexander Graf {
38151e78bcc1SAlexander Graf     return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
38161e78bcc1SAlexander Graf }
38171e78bcc1SAlexander Graf 
3818aab33094Sbellard /* XXX: optimize */
3819c227f099SAnthony Liguori uint32_t ldub_phys(target_phys_addr_t addr)
3820aab33094Sbellard {
3821aab33094Sbellard     uint8_t val;
3822aab33094Sbellard     cpu_physical_memory_read(addr, &val, 1);
3823aab33094Sbellard     return val;
3824aab33094Sbellard }
3825aab33094Sbellard 
3826733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
38271e78bcc1SAlexander Graf static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
38281e78bcc1SAlexander Graf                                           enum device_endian endian)
3829aab33094Sbellard {
3830733f0b02SMichael S. Tsirkin     uint8_t *ptr;
3831733f0b02SMichael S. Tsirkin     uint64_t val;
3832f3705d53SAvi Kivity     MemoryRegionSection *section;
3833733f0b02SMichael S. Tsirkin 
383406ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
3835733f0b02SMichael S. Tsirkin 
3836cc5bea60SBlue Swirl     if (!(memory_region_is_ram(section->mr) ||
3837cc5bea60SBlue Swirl           memory_region_is_romd(section->mr))) {
3838733f0b02SMichael S. Tsirkin         /* I/O case */
3839cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
384037ec01d4SAvi Kivity         val = io_mem_read(section->mr, addr, 2);
38411e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
38421e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
38431e78bcc1SAlexander Graf             val = bswap16(val);
38441e78bcc1SAlexander Graf         }
38451e78bcc1SAlexander Graf #else
38461e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
38471e78bcc1SAlexander Graf             val = bswap16(val);
38481e78bcc1SAlexander Graf         }
38491e78bcc1SAlexander Graf #endif
3850733f0b02SMichael S. Tsirkin     } else {
3851733f0b02SMichael S. Tsirkin         /* RAM case */
3852f3705d53SAvi Kivity         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
385306ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3854cc5bea60SBlue Swirl                                + memory_region_section_addr(section, addr));
38551e78bcc1SAlexander Graf         switch (endian) {
38561e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
38571e78bcc1SAlexander Graf             val = lduw_le_p(ptr);
38581e78bcc1SAlexander Graf             break;
38591e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
38601e78bcc1SAlexander Graf             val = lduw_be_p(ptr);
38611e78bcc1SAlexander Graf             break;
38621e78bcc1SAlexander Graf         default:
3863733f0b02SMichael S. Tsirkin             val = lduw_p(ptr);
38641e78bcc1SAlexander Graf             break;
38651e78bcc1SAlexander Graf         }
3866733f0b02SMichael S. Tsirkin     }
3867733f0b02SMichael S. Tsirkin     return val;
3868aab33094Sbellard }
3869aab33094Sbellard 
38701e78bcc1SAlexander Graf uint32_t lduw_phys(target_phys_addr_t addr)
38711e78bcc1SAlexander Graf {
38721e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
38731e78bcc1SAlexander Graf }
38741e78bcc1SAlexander Graf 
38751e78bcc1SAlexander Graf uint32_t lduw_le_phys(target_phys_addr_t addr)
38761e78bcc1SAlexander Graf {
38771e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
38781e78bcc1SAlexander Graf }
38791e78bcc1SAlexander Graf 
38801e78bcc1SAlexander Graf uint32_t lduw_be_phys(target_phys_addr_t addr)
38811e78bcc1SAlexander Graf {
38821e78bcc1SAlexander Graf     return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
38831e78bcc1SAlexander Graf }
38841e78bcc1SAlexander Graf 
38858df1cd07Sbellard /* warning: addr must be aligned. The ram page is not masked as dirty
38868df1cd07Sbellard    and the code inside is not invalidated. It is useful if the dirty
38878df1cd07Sbellard    bits are used to track modified PTEs */
3888c227f099SAnthony Liguori void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
38898df1cd07Sbellard {
38908df1cd07Sbellard     uint8_t *ptr;
3891f3705d53SAvi Kivity     MemoryRegionSection *section;
38928df1cd07Sbellard 
389306ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
38948df1cd07Sbellard 
3895f3705d53SAvi Kivity     if (!memory_region_is_ram(section->mr) || section->readonly) {
3896cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
389737ec01d4SAvi Kivity         if (memory_region_is_ram(section->mr)) {
389837ec01d4SAvi Kivity             section = &phys_sections[phys_section_rom];
389937ec01d4SAvi Kivity         }
390037ec01d4SAvi Kivity         io_mem_write(section->mr, addr, val, 4);
39018df1cd07Sbellard     } else {
3902f3705d53SAvi Kivity         unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
390306ef3525SAvi Kivity                                & TARGET_PAGE_MASK)
3904cc5bea60SBlue Swirl             + memory_region_section_addr(section, addr);
39055579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
39068df1cd07Sbellard         stl_p(ptr, val);
390774576198Saliguori 
390874576198Saliguori         if (unlikely(in_migration)) {
390974576198Saliguori             if (!cpu_physical_memory_is_dirty(addr1)) {
391074576198Saliguori                 /* invalidate code */
391174576198Saliguori                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
391274576198Saliguori                 /* set dirty bit */
3913f7c11b53SYoshiaki Tamura                 cpu_physical_memory_set_dirty_flags(
3914f7c11b53SYoshiaki Tamura                     addr1, (0xff & ~CODE_DIRTY_FLAG));
391574576198Saliguori             }
391674576198Saliguori         }
39178df1cd07Sbellard     }
39188df1cd07Sbellard }
39198df1cd07Sbellard 
3920c227f099SAnthony Liguori void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3921bc98a7efSj_mayer {
3922bc98a7efSj_mayer     uint8_t *ptr;
3923f3705d53SAvi Kivity     MemoryRegionSection *section;
3924bc98a7efSj_mayer 
392506ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
3926bc98a7efSj_mayer 
3927f3705d53SAvi Kivity     if (!memory_region_is_ram(section->mr) || section->readonly) {
3928cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
392937ec01d4SAvi Kivity         if (memory_region_is_ram(section->mr)) {
393037ec01d4SAvi Kivity             section = &phys_sections[phys_section_rom];
393137ec01d4SAvi Kivity         }
3932bc98a7efSj_mayer #ifdef TARGET_WORDS_BIGENDIAN
393337ec01d4SAvi Kivity         io_mem_write(section->mr, addr, val >> 32, 4);
393437ec01d4SAvi Kivity         io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
3935bc98a7efSj_mayer #else
393637ec01d4SAvi Kivity         io_mem_write(section->mr, addr, (uint32_t)val, 4);
393737ec01d4SAvi Kivity         io_mem_write(section->mr, addr + 4, val >> 32, 4);
3938bc98a7efSj_mayer #endif
3939bc98a7efSj_mayer     } else {
3940f3705d53SAvi Kivity         ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
394106ef3525SAvi Kivity                                 & TARGET_PAGE_MASK)
3942cc5bea60SBlue Swirl                                + memory_region_section_addr(section, addr));
3943bc98a7efSj_mayer         stq_p(ptr, val);
3944bc98a7efSj_mayer     }
3945bc98a7efSj_mayer }
3946bc98a7efSj_mayer 
39478df1cd07Sbellard /* warning: addr must be aligned */
39481e78bcc1SAlexander Graf static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
39491e78bcc1SAlexander Graf                                      enum device_endian endian)
39508df1cd07Sbellard {
39518df1cd07Sbellard     uint8_t *ptr;
3952f3705d53SAvi Kivity     MemoryRegionSection *section;
39538df1cd07Sbellard 
395406ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
39558df1cd07Sbellard 
3956f3705d53SAvi Kivity     if (!memory_region_is_ram(section->mr) || section->readonly) {
3957cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
395837ec01d4SAvi Kivity         if (memory_region_is_ram(section->mr)) {
395937ec01d4SAvi Kivity             section = &phys_sections[phys_section_rom];
396037ec01d4SAvi Kivity         }
39611e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
39621e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
39631e78bcc1SAlexander Graf             val = bswap32(val);
39641e78bcc1SAlexander Graf         }
39651e78bcc1SAlexander Graf #else
39661e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
39671e78bcc1SAlexander Graf             val = bswap32(val);
39681e78bcc1SAlexander Graf         }
39691e78bcc1SAlexander Graf #endif
397037ec01d4SAvi Kivity         io_mem_write(section->mr, addr, val, 4);
39718df1cd07Sbellard     } else {
39728df1cd07Sbellard         unsigned long addr1;
3973f3705d53SAvi Kivity         addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
3974cc5bea60SBlue Swirl             + memory_region_section_addr(section, addr);
39758df1cd07Sbellard         /* RAM case */
39765579c7f3Spbrook         ptr = qemu_get_ram_ptr(addr1);
39771e78bcc1SAlexander Graf         switch (endian) {
39781e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
39791e78bcc1SAlexander Graf             stl_le_p(ptr, val);
39801e78bcc1SAlexander Graf             break;
39811e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
39821e78bcc1SAlexander Graf             stl_be_p(ptr, val);
39831e78bcc1SAlexander Graf             break;
39841e78bcc1SAlexander Graf         default:
39858df1cd07Sbellard             stl_p(ptr, val);
39861e78bcc1SAlexander Graf             break;
39871e78bcc1SAlexander Graf         }
398851d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 4);
39898df1cd07Sbellard     }
39903a7d929eSbellard }
39918df1cd07Sbellard 
39921e78bcc1SAlexander Graf void stl_phys(target_phys_addr_t addr, uint32_t val)
39931e78bcc1SAlexander Graf {
39941e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
39951e78bcc1SAlexander Graf }
39961e78bcc1SAlexander Graf 
39971e78bcc1SAlexander Graf void stl_le_phys(target_phys_addr_t addr, uint32_t val)
39981e78bcc1SAlexander Graf {
39991e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
40001e78bcc1SAlexander Graf }
40011e78bcc1SAlexander Graf 
40021e78bcc1SAlexander Graf void stl_be_phys(target_phys_addr_t addr, uint32_t val)
40031e78bcc1SAlexander Graf {
40041e78bcc1SAlexander Graf     stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
40051e78bcc1SAlexander Graf }
40061e78bcc1SAlexander Graf 
4007aab33094Sbellard /* XXX: optimize */
4008c227f099SAnthony Liguori void stb_phys(target_phys_addr_t addr, uint32_t val)
4009aab33094Sbellard {
4010aab33094Sbellard     uint8_t v = val;
4011aab33094Sbellard     cpu_physical_memory_write(addr, &v, 1);
4012aab33094Sbellard }
4013aab33094Sbellard 
4014733f0b02SMichael S. Tsirkin /* warning: addr must be aligned */
40151e78bcc1SAlexander Graf static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
40161e78bcc1SAlexander Graf                                      enum device_endian endian)
4017aab33094Sbellard {
4018733f0b02SMichael S. Tsirkin     uint8_t *ptr;
4019f3705d53SAvi Kivity     MemoryRegionSection *section;
4020733f0b02SMichael S. Tsirkin 
402106ef3525SAvi Kivity     section = phys_page_find(addr >> TARGET_PAGE_BITS);
4022733f0b02SMichael S. Tsirkin 
4023f3705d53SAvi Kivity     if (!memory_region_is_ram(section->mr) || section->readonly) {
4024cc5bea60SBlue Swirl         addr = memory_region_section_addr(section, addr);
402537ec01d4SAvi Kivity         if (memory_region_is_ram(section->mr)) {
402637ec01d4SAvi Kivity             section = &phys_sections[phys_section_rom];
402737ec01d4SAvi Kivity         }
40281e78bcc1SAlexander Graf #if defined(TARGET_WORDS_BIGENDIAN)
40291e78bcc1SAlexander Graf         if (endian == DEVICE_LITTLE_ENDIAN) {
40301e78bcc1SAlexander Graf             val = bswap16(val);
40311e78bcc1SAlexander Graf         }
40321e78bcc1SAlexander Graf #else
40331e78bcc1SAlexander Graf         if (endian == DEVICE_BIG_ENDIAN) {
40341e78bcc1SAlexander Graf             val = bswap16(val);
40351e78bcc1SAlexander Graf         }
40361e78bcc1SAlexander Graf #endif
403737ec01d4SAvi Kivity         io_mem_write(section->mr, addr, val, 2);
4038733f0b02SMichael S. Tsirkin     } else {
4039733f0b02SMichael S. Tsirkin         unsigned long addr1;
4040f3705d53SAvi Kivity         addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4041cc5bea60SBlue Swirl             + memory_region_section_addr(section, addr);
4042733f0b02SMichael S. Tsirkin         /* RAM case */
4043733f0b02SMichael S. Tsirkin         ptr = qemu_get_ram_ptr(addr1);
40441e78bcc1SAlexander Graf         switch (endian) {
40451e78bcc1SAlexander Graf         case DEVICE_LITTLE_ENDIAN:
40461e78bcc1SAlexander Graf             stw_le_p(ptr, val);
40471e78bcc1SAlexander Graf             break;
40481e78bcc1SAlexander Graf         case DEVICE_BIG_ENDIAN:
40491e78bcc1SAlexander Graf             stw_be_p(ptr, val);
40501e78bcc1SAlexander Graf             break;
40511e78bcc1SAlexander Graf         default:
4052733f0b02SMichael S. Tsirkin             stw_p(ptr, val);
40531e78bcc1SAlexander Graf             break;
40541e78bcc1SAlexander Graf         }
405551d7a9ebSAnthony PERARD         invalidate_and_set_dirty(addr1, 2);
4056733f0b02SMichael S. Tsirkin     }
4057aab33094Sbellard }
4058aab33094Sbellard 
40591e78bcc1SAlexander Graf void stw_phys(target_phys_addr_t addr, uint32_t val)
40601e78bcc1SAlexander Graf {
40611e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
40621e78bcc1SAlexander Graf }
40631e78bcc1SAlexander Graf 
40641e78bcc1SAlexander Graf void stw_le_phys(target_phys_addr_t addr, uint32_t val)
40651e78bcc1SAlexander Graf {
40661e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
40671e78bcc1SAlexander Graf }
40681e78bcc1SAlexander Graf 
40691e78bcc1SAlexander Graf void stw_be_phys(target_phys_addr_t addr, uint32_t val)
40701e78bcc1SAlexander Graf {
40711e78bcc1SAlexander Graf     stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
40721e78bcc1SAlexander Graf }
40731e78bcc1SAlexander Graf 
4074aab33094Sbellard /* XXX: optimize */
4075c227f099SAnthony Liguori void stq_phys(target_phys_addr_t addr, uint64_t val)
4076aab33094Sbellard {
4077aab33094Sbellard     val = tswap64(val);
407871d2b725SStefan Weil     cpu_physical_memory_write(addr, &val, 8);
4079aab33094Sbellard }
4080aab33094Sbellard 
40811e78bcc1SAlexander Graf void stq_le_phys(target_phys_addr_t addr, uint64_t val)
40821e78bcc1SAlexander Graf {
40831e78bcc1SAlexander Graf     val = cpu_to_le64(val);
40841e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
40851e78bcc1SAlexander Graf }
40861e78bcc1SAlexander Graf 
40871e78bcc1SAlexander Graf void stq_be_phys(target_phys_addr_t addr, uint64_t val)
40881e78bcc1SAlexander Graf {
40891e78bcc1SAlexander Graf     val = cpu_to_be64(val);
40901e78bcc1SAlexander Graf     cpu_physical_memory_write(addr, &val, 8);
40911e78bcc1SAlexander Graf }
40921e78bcc1SAlexander Graf 
40935e2972fdSaliguori /* virtual memory access for debug (includes writing to ROM) */
40949349b4f9SAndreas Färber int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
4095b448f2f3Sbellard                         uint8_t *buf, int len, int is_write)
409613eb76e0Sbellard {
409713eb76e0Sbellard     int l;
4098c227f099SAnthony Liguori     target_phys_addr_t phys_addr;
40999b3c35e0Sj_mayer     target_ulong page;
410013eb76e0Sbellard 
410113eb76e0Sbellard     while (len > 0) {
410213eb76e0Sbellard         page = addr & TARGET_PAGE_MASK;
410313eb76e0Sbellard         phys_addr = cpu_get_phys_page_debug(env, page);
410413eb76e0Sbellard         /* if no physical page mapped, return an error */
410513eb76e0Sbellard         if (phys_addr == -1)
410613eb76e0Sbellard             return -1;
410713eb76e0Sbellard         l = (page + TARGET_PAGE_SIZE) - addr;
410813eb76e0Sbellard         if (l > len)
410913eb76e0Sbellard             l = len;
41105e2972fdSaliguori         phys_addr += (addr & ~TARGET_PAGE_MASK);
41115e2972fdSaliguori         if (is_write)
41125e2972fdSaliguori             cpu_physical_memory_write_rom(phys_addr, buf, l);
41135e2972fdSaliguori         else
41145e2972fdSaliguori             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
411513eb76e0Sbellard         len -= l;
411613eb76e0Sbellard         buf += l;
411713eb76e0Sbellard         addr += l;
411813eb76e0Sbellard     }
411913eb76e0Sbellard     return 0;
412013eb76e0Sbellard }
4121a68fe89cSPaul Brook #endif
412213eb76e0Sbellard 
41232e70f6efSpbrook /* in deterministic execution mode, instructions doing device I/Os
41242e70f6efSpbrook    must be at the end of the TB */
412520503968SBlue Swirl void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
41262e70f6efSpbrook {
41272e70f6efSpbrook     TranslationBlock *tb;
41282e70f6efSpbrook     uint32_t n, cflags;
41292e70f6efSpbrook     target_ulong pc, cs_base;
41302e70f6efSpbrook     uint64_t flags;
41312e70f6efSpbrook 
413220503968SBlue Swirl     tb = tb_find_pc(retaddr);
41332e70f6efSpbrook     if (!tb) {
41342e70f6efSpbrook         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
413520503968SBlue Swirl                   (void *)retaddr);
41362e70f6efSpbrook     }
41372e70f6efSpbrook     n = env->icount_decr.u16.low + tb->icount;
413820503968SBlue Swirl     cpu_restore_state(tb, env, retaddr);
41392e70f6efSpbrook     /* Calculate how many instructions had been executed before the fault
4140bf20dc07Sths        occurred.  */
41412e70f6efSpbrook     n = n - env->icount_decr.u16.low;
41422e70f6efSpbrook     /* Generate a new TB ending on the I/O insn.  */
41432e70f6efSpbrook     n++;
41442e70f6efSpbrook     /* On MIPS and SH, delay slot instructions can only be restarted if
41452e70f6efSpbrook        they were already the first instruction in the TB.  If this is not
4146bf20dc07Sths        the first instruction in a TB then re-execute the preceding
41472e70f6efSpbrook        branch.  */
41482e70f6efSpbrook #if defined(TARGET_MIPS)
41492e70f6efSpbrook     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
41502e70f6efSpbrook         env->active_tc.PC -= 4;
41512e70f6efSpbrook         env->icount_decr.u16.low++;
41522e70f6efSpbrook         env->hflags &= ~MIPS_HFLAG_BMASK;
41532e70f6efSpbrook     }
41542e70f6efSpbrook #elif defined(TARGET_SH4)
41552e70f6efSpbrook     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
41562e70f6efSpbrook             && n > 1) {
41572e70f6efSpbrook         env->pc -= 2;
41582e70f6efSpbrook         env->icount_decr.u16.low++;
41592e70f6efSpbrook         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
41602e70f6efSpbrook     }
41612e70f6efSpbrook #endif
41622e70f6efSpbrook     /* This should never happen.  */
41632e70f6efSpbrook     if (n > CF_COUNT_MASK)
41642e70f6efSpbrook         cpu_abort(env, "TB too big during recompile");
41652e70f6efSpbrook 
41662e70f6efSpbrook     cflags = n | CF_LAST_IO;
41672e70f6efSpbrook     pc = tb->pc;
41682e70f6efSpbrook     cs_base = tb->cs_base;
41692e70f6efSpbrook     flags = tb->flags;
41702e70f6efSpbrook     tb_phys_invalidate(tb, -1);
41712e70f6efSpbrook     /* FIXME: In theory this could raise an exception.  In practice
41722e70f6efSpbrook        we have already translated the block once so it's probably ok.  */
41732e70f6efSpbrook     tb_gen_code(env, pc, cs_base, flags, cflags);
4174bf20dc07Sths     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
41752e70f6efSpbrook        the first in the TB) then we end up generating a whole new TB and
41762e70f6efSpbrook        repeating the fault, which is horribly inefficient.
41772e70f6efSpbrook        Better would be to execute just this insn uncached, or generate a
41782e70f6efSpbrook        second new TB.  */
41792e70f6efSpbrook     cpu_resume_from_signal(env, NULL);
41802e70f6efSpbrook }
41812e70f6efSpbrook 
4182b3755a91SPaul Brook #if !defined(CONFIG_USER_ONLY)
4183b3755a91SPaul Brook 
4184055403b2SStefan Weil void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4185e3db7226Sbellard {
4186e3db7226Sbellard     int i, target_code_size, max_target_code_size;
4187e3db7226Sbellard     int direct_jmp_count, direct_jmp2_count, cross_page;
4188e3db7226Sbellard     TranslationBlock *tb;
4189e3db7226Sbellard 
4190e3db7226Sbellard     target_code_size = 0;
4191e3db7226Sbellard     max_target_code_size = 0;
4192e3db7226Sbellard     cross_page = 0;
4193e3db7226Sbellard     direct_jmp_count = 0;
4194e3db7226Sbellard     direct_jmp2_count = 0;
4195e3db7226Sbellard     for(i = 0; i < nb_tbs; i++) {
4196e3db7226Sbellard         tb = &tbs[i];
4197e3db7226Sbellard         target_code_size += tb->size;
4198e3db7226Sbellard         if (tb->size > max_target_code_size)
4199e3db7226Sbellard             max_target_code_size = tb->size;
4200e3db7226Sbellard         if (tb->page_addr[1] != -1)
4201e3db7226Sbellard             cross_page++;
4202e3db7226Sbellard         if (tb->tb_next_offset[0] != 0xffff) {
4203e3db7226Sbellard             direct_jmp_count++;
4204e3db7226Sbellard             if (tb->tb_next_offset[1] != 0xffff) {
4205e3db7226Sbellard                 direct_jmp2_count++;
4206e3db7226Sbellard             }
4207e3db7226Sbellard         }
4208e3db7226Sbellard     }
4209e3db7226Sbellard     /* XXX: avoid using doubles ? */
421057fec1feSbellard     cpu_fprintf(f, "Translation buffer state:\n");
4211f1bc0bccSRichard Henderson     cpu_fprintf(f, "gen code size       %td/%zd\n",
421226a5f13bSbellard                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
421326a5f13bSbellard     cpu_fprintf(f, "TB count            %d/%d\n",
421426a5f13bSbellard                 nb_tbs, code_gen_max_blocks);
4215e3db7226Sbellard     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4216e3db7226Sbellard                 nb_tbs ? target_code_size / nb_tbs : 0,
4217e3db7226Sbellard                 max_target_code_size);
4218055403b2SStefan Weil     cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4219e3db7226Sbellard                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4220e3db7226Sbellard                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4221e3db7226Sbellard     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4222e3db7226Sbellard             cross_page,
4223e3db7226Sbellard             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4224e3db7226Sbellard     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4225e3db7226Sbellard                 direct_jmp_count,
4226e3db7226Sbellard                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4227e3db7226Sbellard                 direct_jmp2_count,
4228e3db7226Sbellard                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
422957fec1feSbellard     cpu_fprintf(f, "\nStatistics:\n");
4230e3db7226Sbellard     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4231e3db7226Sbellard     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4232e3db7226Sbellard     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4233b67d9a52Sbellard     tcg_dump_info(f, cpu_fprintf);
4234e3db7226Sbellard }
4235e3db7226Sbellard 
423682afa586SBenjamin Herrenschmidt /*
423782afa586SBenjamin Herrenschmidt  * A helper function for the _utterly broken_ virtio device model to find out if
423882afa586SBenjamin Herrenschmidt  * it's running on a big endian machine. Don't do this at home kids!
423982afa586SBenjamin Herrenschmidt  */
424082afa586SBenjamin Herrenschmidt bool virtio_is_big_endian(void);
424182afa586SBenjamin Herrenschmidt bool virtio_is_big_endian(void)
424282afa586SBenjamin Herrenschmidt {
424382afa586SBenjamin Herrenschmidt #if defined(TARGET_WORDS_BIGENDIAN)
424482afa586SBenjamin Herrenschmidt     return true;
424582afa586SBenjamin Herrenschmidt #else
424682afa586SBenjamin Herrenschmidt     return false;
424782afa586SBenjamin Herrenschmidt #endif
424882afa586SBenjamin Herrenschmidt }
424982afa586SBenjamin Herrenschmidt 
425061382a50Sbellard #endif
425176f35538SWen Congyang 
425276f35538SWen Congyang #ifndef CONFIG_USER_ONLY
425376f35538SWen Congyang bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
425476f35538SWen Congyang {
425576f35538SWen Congyang     MemoryRegionSection *section;
425676f35538SWen Congyang 
425776f35538SWen Congyang     section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
425876f35538SWen Congyang 
425976f35538SWen Congyang     return !(memory_region_is_ram(section->mr) ||
426076f35538SWen Congyang              memory_region_is_romd(section->mr));
426176f35538SWen Congyang }
426276f35538SWen Congyang #endif
4263